code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1 value | license stringclasses 15 values | size int64 5 1M |
|---|---|---|---|---|---|
package nl.malienkolders.htm.viewer.jmonkey.lib.util
import java.awt.Graphics2D
import java.awt.image.BufferedImage
import java.nio.ByteBuffer
import _root_.com.jme3.texture._
abstract class PaintableImage(val size: (Int, Int), hasAlpha: Boolean) extends Image {
private val backImg = new BufferedImage(size._1, size._2,
if (hasAlpha) BufferedImage.TYPE_4BYTE_ABGR else BufferedImage.TYPE_3BYTE_BGR)
setFormat(if (hasAlpha) Image.Format.RGBA8 else Image.Format.RGB8)
setWidth(backImg.getWidth())
setHeight(backImg.getHeight())
private val scratch = ByteBuffer.allocateDirect(4 * backImg.getWidth()
* backImg.getHeight())
def refreshImage() {
val g: Graphics2D = backImg.createGraphics()
paint(g)
g.dispose()
/* get the image data */
val lData: Array[Byte] = backImg.getRaster().getDataElements(0, 0, backImg.getWidth(), backImg.getHeight(), null).asInstanceOf[Array[Byte]]
scratch.clear()
scratch.put(lData, 0, lData.length)
scratch.rewind()
setData(scratch)
}
def paint(g: Graphics2D)
} | hema-tournament-manager/htm | htm-viewer-jme/src/main/scala/nl/malienkolders/htm/viewer/jmonkey/lib/util/PaintableImage.scala | Scala | apache-2.0 | 1,082 |
package com.twitter.finatra.kafkastreams.integration.config
import com.twitter.finatra.kafka.serde.{UnKeyed, UnKeyedSerde}
import com.twitter.finatra.kafkastreams.KafkaStreamsTwitterServer
import com.twitter.finatra.kafkastreams.config.FinatraRocksDBConfig._
import com.twitter.finatra.kafkastreams.config.{DefaultTopicConfig, FinatraRocksDBConfig, KafkaStreamsConfig, RocksDbFlags}
import com.twitter.finatra.kafkastreams.test.{FinatraTopologyTester, TopologyFeatureTest}
import com.twitter.finatra.kafkastreams.transformer.FinatraTransformer
import com.twitter.finatra.kafkastreams.transformer.domain.Time
import org.apache.kafka.common.serialization.Serdes
import org.apache.kafka.streams.StreamsBuilder
import org.apache.kafka.streams.kstream.{Consumed, Produced}
import org.apache.kafka.streams.state.Stores
import org.joda.time.DateTime
class FinatraRocksDBConfigFeatureTest extends TopologyFeatureTest {
private val appId = "no-op"
private val stateStoreName = "test-state-store"
private val kafkaStreamsTwitterServer: KafkaStreamsTwitterServer = new KafkaStreamsTwitterServer with RocksDbFlags {
override val name: String = appId
override protected def configureKafkaStreams(builder: StreamsBuilder): Unit = {
builder.addStateStore(
Stores
.keyValueStoreBuilder(
Stores.persistentKeyValueStore(stateStoreName),
UnKeyedSerde,
Serdes.String
).withLoggingEnabled(DefaultTopicConfig.FinatraChangelogConfig)
)
val finatraTransformerSupplier = () =>
new FinatraTransformer[UnKeyed, String, UnKeyed, String](statsReceiver = statsReceiver) {
override def onInit(): Unit = { }
override def onClose(): Unit = { }
override def onMessage(messageTime: Time, unKeyed: UnKeyed, string: String): Unit = { }
}
builder.asScala
.stream("source")(Consumed.`with`(UnKeyedSerde, Serdes.String))
.transform(finatraTransformerSupplier, stateStoreName)
.to("sink")(Produced.`with`(UnKeyedSerde, Serdes.String))
}
override def streamsProperties(config: KafkaStreamsConfig): KafkaStreamsConfig = {
super
.streamsProperties(config)
.rocksDbConfigSetter[FinatraRocksDBConfig]
.withConfig(RocksDbBlockCacheSizeConfig, rocksDbCountsStoreBlockCacheSize())
.withConfig(RocksDbBlockCacheShardBitsConfig, rocksDbBlockCacheShardBitsConfig())
.withConfig(RocksDbLZ4Config, rocksDbEnableLZ4().toString)
.withConfig(RocksDbEnableStatistics, rocksDbEnableStatistics().toString)
.withConfig(RocksDbStatCollectionPeriodMs, rocksDbStatCollectionPeriodMs())
.withConfig(RocksDbInfoLogLevel, rocksDbInfoLogLevel())
.withConfig(RocksDbMaxLogFileSize, rocksDbMaxLogFileSize())
.withConfig(RocksDbKeepLogFileNum, rocksDbKeepLogFileNum())
.withConfig(RocksDbCacheIndexAndFilterBlocks, rocksDbCacheIndexAndFilterBlocks())
.withConfig(RocksDbCachePinL0IndexAndFilterBlocks, rocksDbCachePinL0IndexAndFilterBlocks())
.withConfig(RocksDbTableConfigBlockSize, rocksDbTableConfigBlockSize())
.withConfig(RocksDbTableConfigBoomFilterKeyBits, rocksDbTableConfigBoomFilterKeyBits())
.withConfig(RocksDbTableConfigBoomFilterMode, rocksDbTableConfigBoomFilterMode())
.withConfig(RocksDbDatabaseWriteBufferSize, rocksDbDatabaseWriteBufferSize())
.withConfig(RocksDbWriteBufferSize, rocksDbWriteBufferSize())
.withConfig(RocksDbManifestPreallocationSize, rocksDbManifestPreallocationSize())
.withConfig(RocksDbMinWriteBufferNumberToMerge, rocksDbMinWriteBufferNumberToMerge())
.withConfig(RocksDbMaxWriteBufferNumber, rocksDbMaxWriteBufferNumber())
.withConfig(RocksDbBytesPerSync, rocksDbBytesPerSync())
.withConfig(RocksDbMaxBackgroundCompactions, rocksDbMaxBackgroundCompactions())
.withConfig(RocksDbMaxBackgroundFlushes, rocksDbMaxBackgroundFlushes())
.withConfig(RocksDbIncreaseParallelism, rocksDbIncreaseParallelism())
.withConfig(RocksDbInplaceUpdateSupport, rocksDbInplaceUpdateSupport())
.withConfig(RocksDbAllowConcurrentMemtableWrite, rocksDbAllowConcurrentMemtableWrite())
.withConfig(RocksDbEnableWriteThreadAdaptiveYield, rocksDbEnableWriteThreadAdaptiveYield())
.withConfig(RocksDbCompactionStyle, rocksDbCompactionStyle())
.withConfig(RocksDbCompactionStyleOptimize, rocksDbCompactionStyleOptimize())
.withConfig(RocksDbMaxBytesForLevelBase, rocksDbMaxBytesForLevelBase())
.withConfig(RocksDbLevelCompactionDynamicLevelBytes, rocksDbLevelCompactionDynamicLevelBytes())
.withConfig(RocksDbCompactionStyleMemtableBudget, rocksDbCompactionStyleMemtableBudget())
}
}
private val _topologyTester = FinatraTopologyTester(
kafkaApplicationId = appId,
server = kafkaStreamsTwitterServer,
startingWallClockTime = DateTime.now,
flags = Map(
"rocksdb.block.cache.size" -> "1.byte",
"rocksdb.block.cache.shard.bits" -> "2",
"rocksdb.lz4" -> "true",
"rocksdb.statistics" -> "true",
"rocksdb.statistics.collection.period.ms" -> "60001",
"rocksdb.log.info.level" -> "INFO_LEVEL",
"rocksdb.log.max.file.size" -> "2.bytes",
"rocksdb.log.keep.file.num" -> "3",
"rocksdb.cache.index.and.filter.blocks" -> "false",
"rocksdb.cache.pin.l0.index.and.filter.blocks" -> "false",
"rocksdb.tableconfig.block.size" -> "4.bytes",
"rocksdb.tableconfig.bloomfilter.key.bits" -> "5",
"rocksdb.tableconfig.bloomfilter.mode" -> "false",
"rocksdb.db.write.buffer.size" -> "6.bytes",
"rocksdb.write.buffer.size" -> "7.bytes",
"rocksdb.manifest.preallocation.size" -> "5.megabytes",
"rocksdb.min.write.buffer.num.merge" -> "8",
"rocksdb.max.write.buffer.num" -> "9",
"rocksdb.bytes.per.sync" -> "10.bytes",
"rocksdb.max.background.compactions" -> "11",
"rocksdb.max.background.flushes" -> "12",
"rocksdb.parallelism" -> "2",
"rocksdb.inplace.update.support" -> "false",
"rocksdb.allow.concurrent.memtable.write" -> "true",
"rocksdb.enable.write.thread.adaptive.yield" -> "true",
"rocksdb.compaction.style" -> "UNIVERSAL",
"rocksdb.compaction.style.optimize" -> "false",
"rocksdb.max.bytes.for.level.base" -> "13.bytes",
"rocksdb.level.compaction.dynamic.level.bytes" -> "false",
"rocksdb.compaction.style.memtable.budget" -> "14.bytes"
)
)
override protected def topologyTester: FinatraTopologyTester = {
_topologyTester
}
override def beforeEach(): Unit = {
super.beforeEach()
topologyTester.reset()
topologyTester.topic(
"source",
UnKeyedSerde,
Serdes.String
)
topologyTester.topic(
"sink",
UnKeyedSerde,
Serdes.String
)
}
test("rocksdb properties") {
val properties = topologyTester.properties
properties.getProperty("rocksdb.config.setter") should be(
"com.twitter.finatra.kafkastreams.config.FinatraRocksDBConfig")
properties.getProperty("rocksdb.block.cache.size") should be("1")
properties.getProperty("rocksdb.block.cache.shard.bits") should be("2")
properties.getProperty("rocksdb.lz4") should be("true")
properties.getProperty("rocksdb.statistics") should be("true")
properties.getProperty("rocksdb.statistics.collection.period.ms") should be("60001")
properties.getProperty("rocksdb.log.info.level") should be("INFO_LEVEL")
properties.getProperty("rocksdb.log.max.file.size") should be("2")
properties.getProperty("rocksdb.log.keep.file.num") should be("3")
properties.getProperty("rocksdb.cache.index.and.filter.blocks") should be("false")
properties.getProperty("rocksdb.cache.pin.l0.index.and.filter.blocks") should be("false")
properties.getProperty("rocksdb.tableconfig.block.size") should be("4")
properties.getProperty("rocksdb.tableconfig.bloomfilter.key.bits") should be("5")
properties.getProperty("rocksdb.tableconfig.bloomfilter.mode") should be("false")
properties.getProperty("rocksdb.db.write.buffer.size") should be("6")
properties.getProperty("rocksdb.write.buffer.size") should be("7")
properties.getProperty("rocksdb.manifest.preallocation.size") should be("5242880")
properties.getProperty("rocksdb.min.write.buffer.num.merge") should be("8")
properties.getProperty("rocksdb.max.write.buffer.num") should be("9")
properties.getProperty("rocksdb.bytes.per.sync") should be("10")
properties.getProperty("rocksdb.max.background.compactions") should be("11")
properties.getProperty("rocksdb.max.background.flushes") should be("12")
properties.getProperty("rocksdb.parallelism") should be("2")
properties.getProperty("rocksdb.inplace.update.support") should be("false")
properties.getProperty("rocksdb.allow.concurrent.memtable.write") should be("true")
properties.getProperty("rocksdb.enable.write.thread.adaptive.yield") should be("true")
properties.getProperty("rocksdb.compaction.style") should be("UNIVERSAL")
properties.getProperty("rocksdb.compaction.style.optimize") should be("false")
properties.getProperty("rocksdb.max.bytes.for.level.base") should be("13")
properties.getProperty("rocksdb.level.compaction.dynamic.level.bytes") should be("false")
properties.getProperty("rocksdb.compaction.style.memtable.budget") should be("14")
topologyTester.driver
.getKeyValueStore[UnKeyed, String](stateStoreName) shouldNot be(null)
}
}
| twitter/finatra | kafka-streams/kafka-streams/src/test/scala/com/twitter/finatra/kafkastreams/integration/config/FinatraRocksDBConfigFeatureTest.scala | Scala | apache-2.0 | 9,573 |
package org.jetbrains.plugins.scala.codeInspection.collections
import com.intellij.codeInspection.ProblemHighlightType
import org.jetbrains.plugins.scala.codeInspection.InspectionBundle
import org.jetbrains.plugins.scala.extensions.{ChildOf, ExpressionType}
import org.jetbrains.plugins.scala.lang.psi.api.expr.{ScExpression, ScGenericCall}
import org.jetbrains.plugins.scala.lang.psi.types.ScTypeExt
/**
* @author Nikolay.Tropin
*/
object RedundantCollectionConversion extends SimplificationType {
override def hint: String = InspectionBundle.message("redundant.collection.conversion")
override def getSimplification(expr: ScExpression) = {
implicit val typeSystem = expr.typeSystem
val withGeneric = expr match {
case ChildOf(gc: ScGenericCall) => gc
case ref => ref
}
val typeAfterConversion = withGeneric.getType().getOrAny
withGeneric match {
case (base@ExpressionType(baseType)) `.toCollection` () if baseType.conforms(typeAfterConversion) =>
val simplification = replace(withGeneric).withText(base.getText).highlightFrom(base)
Some(simplification)
case _ => None
}
}
}
class RedundantCollectionConversionInspection extends OperationOnCollectionInspection {
override def highlightType = ProblemHighlightType.LIKE_UNUSED_SYMBOL
override def possibleSimplificationTypes: Array[SimplificationType] = Array(RedundantCollectionConversion)
}
| katejim/intellij-scala | src/org/jetbrains/plugins/scala/codeInspection/collections/RedundantCollectionConversionInspection.scala | Scala | apache-2.0 | 1,424 |
package monocle.function
import monocle.{Setter, Traversal}
import scala.annotation.implicitNotFound
import scalaz.std.stream._
import scalaz.std.anyVal._
import scalaz.syntax.monad._
import scalaz.{Applicative, Monad, State, Traverse}
/**
* [[Plated]] is a type-class for types which can extract their immediate
* self-similar children.
*
* @tparam A the parent and child type of a [[Plated]]
*/
@implicitNotFound("Could not find an instance of Plated[${A}], please check Monocle instance location policy to " +
"find out which import is necessary")
abstract class Plated[A] extends Serializable { self =>
def plate: Traversal[A, A]
}
trait PlatedFunctions {
/** [[Traversal]] of immediate self-similar children */
def plate[A](implicit P: Plated[A]): Traversal[A, A] = P.plate
/** get the immediate self-similar children of a target */
@inline def children[A: Plated](a: A): List[A] = plate[A].getAll(a)
/** get all transitive self-similar elements of a target, including itself */
def universe[A: Plated](a: A): Stream[A] = {
val fold = plate[A].asFold
def go(b: A): Stream[A] = b #:: fold.foldMap[Stream[A]](go)(b)
go(a)
}
/**
* rewrite a target by applying a rule as often as possible until it reaches
* a fixpoint (this is an infinite loop if there is no fixpoint)
*/
def rewrite[A: Plated](f: A => Option[A])(a: A): A =
rewriteOf(plate[A].asSetter)(f)(a)
/**
* rewrite a target by applying a rule within a [[Setter]], as often as
* possible until it reaches a fixpoint (this is an infinite loop if there is
* no fixpoint)
*/
def rewriteOf[A](l: Setter[A, A])(f: A => Option[A])(a: A): A = {
def go(b: A): A = {
val c = transformOf(l)(go)(b)
f(c).fold(c)(go)
}
go(a)
}
/** transform every element */
def transform[A: Plated](f: A => A)(a: A): A =
transformOf(plate[A].asSetter)(f)(a)
/** transform every element by applying a [[Setter]] */
def transformOf[A](l: Setter[A, A])(f: A => A)(a: A): A =
l.modify(b => transformOf(l)(f)(f(b)))(a)
/** transforming counting changes */
def transformCounting[A: Plated](f: A => Option[A])(a: A): (Int, A) = {
transformM[A, State[Int, ?]] { b =>
f(b).map(c => State((i: Int) => (i + 1, c)))
.getOrElse(State.state(b))
}(a).runZero
}
/** transforming every element using monadic transformation */
def transformM[A: Plated, M[_]: Monad](f: A => M[A])(a: A): M[A] = {
val l = plate[A]
def go(c: A): M[A] =
l.modifyF[M](b => f(b).flatMap(go))(c)
go(a)
}
}
object Plated extends PlatedFunctions {
/************************************************************************************************/
/** Std instances */
/************************************************************************************************/
implicit def listPlated[A]: Plated[List[A]] = new Plated[List[A]] {
val plate: Traversal[List[A], List[A]] = new Traversal[List[A], List[A]] {
def modifyF[F[_]: Applicative](f: List[A] => F[List[A]])(s: List[A]): F[List[A]] =
s match {
case x :: xs => Applicative[F].map(f(xs))(x :: _)
case Nil => Applicative[F].point(Nil)
}
}
}
implicit def streamPlated[A]: Plated[Stream[A]] = new Plated[Stream[A]] {
val plate: Traversal[Stream[A], Stream[A]] = new Traversal[Stream[A], Stream[A]] {
def modifyF[F[_]: Applicative](f: Stream[A] => F[Stream[A]])(s: Stream[A]): F[Stream[A]] =
s match {
case x #:: xs => Applicative[F].map(f(xs))(x #:: _)
case Stream() => Applicative[F].point(Stream.empty)
}
}
}
implicit val stringPlated: Plated[String] = new Plated[String] {
val plate: Traversal[String, String] = new Traversal[String, String] {
def modifyF[F[_]: Applicative](f: String => F[String])(s: String): F[String] =
s.headOption match {
case Some(h) => Applicative[F].map(f(s.tail))(h.toString ++ _)
case None => Applicative[F].point("")
}
}
}
implicit def vectorPlated[A]: Plated[Vector[A]] = new Plated[Vector[A]] {
val plate: Traversal[Vector[A], Vector[A]] = new Traversal[Vector[A], Vector[A]] {
def modifyF[F[_]: Applicative](f: Vector[A] => F[Vector[A]])(s: Vector[A]): F[Vector[A]] =
s match {
case h +: t => Applicative[F].map(f(t))(h +: _)
case _ => Applicative[F].point(Vector.empty)
}
}
}
/************************************************************************************************/
/** Scalaz instances */
/************************************************************************************************/
import scalaz.{Cofree, Free, IList, ICons, INil, Tree}
implicit def cofreePlated[S[_]: Traverse, A]: Plated[Cofree[S, A]] = new Plated[Cofree[S, A]] {
val plate: Traversal[Cofree[S, A], Cofree[S, A]] = new Traversal[Cofree[S, A], Cofree[S, A]] {
def modifyF[F[_]: Applicative](f: Cofree[S, A] => F[Cofree[S, A]])(s: Cofree[S, A]): F[Cofree[S, A]] =
Applicative[F].map(Traverse[S].traverse(s.t.run)(f))(Cofree(s.head, _))
}
}
implicit def freePlated[S[_]: Traverse, A]: Plated[Free[S, A]] = new Plated[Free[S, A]] {
val plate: Traversal[Free[S, A], Free[S, A]] = new Traversal[Free[S, A], Free[S, A]] {
def modifyF[F[_]: Applicative](f: Free[S, A] => F[Free[S, A]])(s: Free[S, A]): F[Free[S, A]] =
s.resume.fold(
as => Applicative[F].map(Traverse[S].traverse(as)(f))(Free.roll),
x => Applicative[F].point(Free.point(x))
)
}
}
implicit def ilistPlated[A]: Plated[IList[A]] = new Plated[IList[A]] {
val plate: Traversal[IList[A], IList[A]] = new Traversal[IList[A], IList[A]] {
def modifyF[F[_]: Applicative](f: IList[A] => F[IList[A]])(s: IList[A]): F[IList[A]] =
s match {
case ICons(x, xs) => Applicative[F].map(f(xs))(x :: _)
case INil() => Applicative[F].point(INil())
}
}
}
implicit def treePlated[A]: Plated[Tree[A]] = new Plated[Tree[A]] {
val plate: Traversal[Tree[A], Tree[A]] = new Traversal[Tree[A], Tree[A]] {
def modifyF[F[_]: Applicative](f: Tree[A] => F[Tree[A]])(s: Tree[A]): F[Tree[A]] =
Applicative[F].map(Traverse[Stream].traverse(s.subForest)(f))(Tree.Node(s.rootLabel, _))
}
}
} | rperry/Monocle | core/shared/src/main/scala/monocle/function/Plated.scala | Scala | mit | 6,509 |
package com.github.wartman4404.gldraw
object LuaSyntaxHighlightProcessor {
object Regex {
// used as-is
val comment = """(?m:--[^\\[].*$)|(?s:--\\[\\[.*?(?:\\]\\]|$))"""
val keywords = """\\b(?:do|end|while|repeat|until|if|then|else|elseif|end|for|in|function|local|and|or|not)\\b"""
val stringbody = """@(?:[^@\\\\]|\\\\.)*?@"""
val literals = joinKeywords(Array(
"""\\d+\\.?""",
"true|false|nil")) +
"|" + stringbody.replace("@", "\\"") +
"|" + stringbody.replace("@", "'")
val types = """\\bShaderPaintPoint\\b"""
val globalmethods = Array(
"assert", "error", "ipairs", "next", "pairs", "pcall", "print", "select",
"tonumber", "tostring", "type", "unpack")
val methods = Array(
"string\\\\.byte", "string\\\\.char", "string\\\\.dump", "string\\\\.find", "string\\\\.format",
"string\\\\.gsub", "string\\\\.len", "string\\\\.lower", "string\\\\.rep", "string\\\\.sub",
"string\\\\.upper", "string\\\\.gmatch", "string\\\\.match", "string\\\\.reverse",
"table\\\\.maxn", "table\\\\.concat", "table\\\\.sort", "table\\\\.insert", "table\\\\.remove",
"math\\\\.abs", "math\\\\.acos", "math\\\\.asin", "math\\\\.atan", "math\\\\.atan2", "math\\\\.ceil",
"math\\\\.sin", "math\\\\.cos", "math\\\\.tan", "math\\\\.deg", "math\\\\.exp", "math\\\\.floor",
"math\\\\.log", "math\\\\.max", "math\\\\.min", "math\\\\.log10", "math\\\\.huge", "math\\\\.fmod",
"math\\\\.modf", "math\\\\.cosh", "math\\\\.sinh", "math\\\\.tanh", "math\\\\.pow", "math\\\\.rad",
"math\\\\.sqrt", "math\\\\.frexp", "math\\\\.ldexp", "math\\\\.random", "math\\\\.randomseed",
"math\\\\.pi")
val apimethods = joinKeywords(Array(
"pushpoint", "pushline", "pushcatmullrom", "pushcubicbezier",
"loglua", "clearlayer", "savelayers", "saveundo"))
// not actually used!
val bannedmethods = joinKeywords(Array(
"(?:coroutine|io|os|debug|package|jit|ffi)\\\\.\\\\w+", "getmetatable", "setmetatable",
"xpcall", "_G", "loadfile", "rawequal", "require", "getfenv", "setfenv",
"loadstring", "module"))
def joinKeywords(kw: Array[String]) = kw.mkString("""\\b(?:""", "|", """)\\b""")
}
val luaColors: Array[Int] = Array(
0xff808080, // comment
0xff399ed7, // types
0xffd79e39, // builtin vars
0xffd79e39, // builtin fns
0xff399ed7, // keywords
0xff7ba212 // literals
)
val LuaProcessor = {
val regex = Array(
Regex.comment,
Regex.types,
Regex.apimethods,
Regex.joinKeywords(Regex.globalmethods ++ Regex.methods),
Regex.keywords,
Regex.literals
).mkString("(", ")|(", ")")
val values = new RegexSyntaxHighlightProcessor.RegexValues(regex, new LuaIndentCounter(), luaColors)
new RegexSyntaxHighlightProcessor(values)
}
}
| wartman4404/everybody-draw | src/main/scala/LuaSyntaxHighlightProcessor.scala | Scala | apache-2.0 | 2,730 |
/*******************************************************************************
* Copyright (c) 2019. Carl Minden
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
******************************************************************************/
package com.anathema_roguelike
package environment
import com.anathema_roguelike.main.Game
import com.anathema_roguelike.main.display.DungeonMap.DungeonLayer
import com.anathema_roguelike.main.display.VisualRepresentation
import com.anathema_roguelike.stats.effects.{Effect, HasEffect}
import com.anathema_roguelike.stats.locationstats.LocationStat
abstract class LocationProperty(
visualRepresentation: VisualRepresentation,
fogOfWarRepresentation: VisualRepresentation,
foreground: Boolean,
passable: Boolean,
opacity: Double,
damping: Double) extends HasEffect[Effect[Location, LocationStat]] with HasLocation {
private var location: Option[Location] = None
private var brightness = 0.0
private val layer = if(foreground) DungeonLayer.FOREGROUND else DungeonLayer.FOG_OF_WAR_FOREGROUND
private val fogOfWarLayer = if(foreground) DungeonLayer.FOG_OF_WAR_FOREGROUND else DungeonLayer.FOG_OF_WAR_BACKGROUND
override def getLocation: Location = location.get
def setLocation(loc: Location): Unit = {
location = loc
}
def isPassable: Boolean = passable
def getOpacity: Double = opacity
def getDamping: Double = damping
def getBrightness: Double = brightness
protected def setBrightness(brightness: Double): Unit = {
this.brightness = brightness
}
protected def renderToFogOfWar(): Unit = {
location.foreach(l => {
Game.getInstance.getMap.renderVisualRepresentation(fogOfWarLayer, l.getX, l.getY, getFogOfWarRepresentation)
})
}
def render(): Unit = {
Game.getInstance.getMap.renderVisualRepresentation(layer, getX, getY, getRepresentation)
renderToFogOfWar()
}
def getRepresentation: VisualRepresentation = visualRepresentation
def getFogOfWarRepresentation: VisualRepresentation = if (fogOfWarRepresentation != null) fogOfWarRepresentation
else getRepresentation
}
| carlminden/anathema-roguelike | src/com/anathema_roguelike/environment/LocationProperty.scala | Scala | gpl-3.0 | 2,720 |
package com.github.luzhuomi.regexphonetic
import com.github.luzhuomi.regexphonetic.SoundMap._
object Vowel {
type VowelTable = SoundMap
def eVowelsList = List("e")
}
| luzhuomi/regexphonetic | src/main/scala/com/gitbhub/luzhuomi/regexphonetic/Vowel.scala | Scala | apache-2.0 | 174 |
/*
* La Trobe University - Distributed Deep Learning System
* Copyright 2016 Matthias Langer (t3l@threelights.de)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package edu.latrobe.blaze
package object initializers {
final implicit class InitializerFunctions(ib: InitializerBuilder) {
def forModule(moduleHandle: String)
: RestrictedInitializerBuilder = RestrictedInitializerBuilder(
ib
).setModuleHandle(moduleHandle)
def forReference(referenceHandle: String)
: RestrictedInitializerBuilder = RestrictedInitializerBuilder(
ib
).setReferenceHandle(referenceHandle)
}
}
| bashimao/ltudl | blaze/src/main/scala/edu/latrobe/blaze/initializers/package.scala | Scala | apache-2.0 | 1,135 |
/*
* Copyright (c) 2002-2018 "Neo Technology,"
* Network Engine for Objects in Lund AB [http://neotechnology.com]
*
* This file is part of Neo4j.
*
* Neo4j is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.neo4j.cypher.internal.frontend.v2_3.ast
import org.neo4j.cypher.internal.frontend.v2_3.ast.Expression.SemanticContext
import org.neo4j.cypher.internal.frontend.v2_3.symbols._
import org.neo4j.cypher.internal.frontend.v2_3.test_helpers.CypherFunSuite
import org.neo4j.cypher.internal.frontend.v2_3.{DummyPosition, SemanticCheckResult, SemanticError, SemanticState}
class ListComprehensionTest extends CypherFunSuite {
val dummyExpression = DummyExpression(
CTCollection(CTNode) | CTBoolean | CTCollection(CTString))
test("withoutExtractExpressionShouldHaveCollectionTypesOfInnerExpression") {
val filter = ListComprehension(Identifier("x")(DummyPosition(5)), dummyExpression, None, None)(DummyPosition(0))
val result = filter.semanticCheck(Expression.SemanticContext.Simple)(SemanticState.clean)
result.errors shouldBe empty
filter.types(result.state) should equal(CTCollection(CTNode) | CTCollection(CTString))
}
test("shouldHaveCollectionWithInnerTypesOfExtractExpression") {
val extractExpression = DummyExpression(CTNode | CTNumber, DummyPosition(2))
val filter = ListComprehension(Identifier("x")(DummyPosition(5)), dummyExpression, None, Some(extractExpression))(DummyPosition(0))
val result = filter.semanticCheck(Expression.SemanticContext.Simple)(SemanticState.clean)
result.errors shouldBe empty
filter.types(result.state) should equal(CTCollection(CTNode) | CTCollection(CTNumber))
}
test("shouldSemanticCheckPredicateInStateContainingTypedIdentifier") {
val error = SemanticError("dummy error", DummyPosition(8))
val predicate = new DummyExpression(CTAny, DummyPosition(7)) {
override def semanticCheck(ctx: SemanticContext) = s => {
s.symbolTypes("x") should equal(CTNode | CTString)
SemanticCheckResult.error(s, error)
}
}
val filter = ListComprehension(Identifier("x")(DummyPosition(2)), dummyExpression, Some(predicate), None)(DummyPosition(0))
val result = filter.semanticCheck(Expression.SemanticContext.Simple)(SemanticState.clean)
result.errors should equal(Seq(error))
result.state.symbol("x") should equal(None)
}
}
| HuangLS/neo4j | community/cypher/frontend-2.3/src/test/scala/org/neo4j/cypher/internal/frontend/v2_3/ast/ListComprehensionTest.scala | Scala | apache-2.0 | 2,963 |
package cromwell.engine.backend
import com.google.api.services.genomics.model.Disk
import cromwell.engine.backend.runtimeattributes.{AttributeMap, CromwellRuntimeAttributes, ContinueOnReturnCodeFlag, ContinueOnReturnCodeSet}
import wdl4s.NamespaceWithWorkflow
import cromwell.engine.backend.CromwellRuntimeAttributeSpec._
import org.scalatest.{EitherValues, FlatSpec, Matchers}
object CromwellRuntimeAttributeSpec {
val WorkflowWithRuntime =
"""
|task ps {
| command {
| ps
| }
| output {
| File procs = stdout()
| }
| runtime {
| docker: "ubuntu:latest"
| }
|}
|
|task cgrep {
| String pattern
| File in_file
| command {
| grep '${pattern}' ${in_file} | wc -l
| }
| output {
| Int count = read_int(stdout())
| }
| runtime {
| docker: "ubuntu:latest"
| }
|}
|
|task wc {
| File in_file
| command {
| cat ${in_file} | wc -l
| }
| output {
| Int count = read_int(stdout())
| }
| runtime {
| docker: "ubuntu:latest"
| }
|}
|
|workflow three_step {
| call ps
| call cgrep {
| input: in_file=ps.procs
| }
| call wc {
| input: in_file=ps.procs
| }
|}
|
""".stripMargin
val WorkflowWithoutRuntime =
"""
|task hello {
| String addressee
| command {
| echo "Hello ${addressee}!"
| }
| output {
| String salutation = read_string(stdout())
| }
|}
|
|workflow hello {
| call hello
|}
""".stripMargin
val WorkflowWithFailOnStderr =
"""
|task echoWithFailOnStderr {
| command {
| echo 66555 >&2
| }
| runtime {
| failOnStderr: "true"
| }
|}
|task echoWithoutFailOnStderr {
| command {
| echo 66555 >&2
| }
| runtime {
| failOnStderr: "false"
| }
|}
|
|workflow echo_wf {
| call echoWithFailOnStderr
| call echoWithoutFailOnStderr
|}
""".stripMargin
val WorkflowWithContinueOnReturnCode =
"""
|task echoWithSingleContinueOnReturnCode {
| command {
| cat non_existent_file
| }
| runtime {
| continueOnReturnCode: 123
| }
|}
|task echoWithExpressionContinueOnReturnCode {
| command {
| cat non_existent_file
| }
| runtime {
| continueOnReturnCode: 123 + 321
| }
|}
|task echoWithListContinueOnReturnCode {
| command {
| cat non_existent_file
| }
| runtime {
| continueOnReturnCode: [0, 1, 2, 3]
| }
|}
|task echoWithTrueContinueOnReturnCode {
| command {
| cat non_existent_file
| }
| runtime {
| continueOnReturnCode: true
| }
|}
|task echoWithFalseContinueOnReturnCode {
| command {
| cat non_existent_file
| }
| runtime {
| continueOnReturnCode: false
| }
|}
|task echoWithTrueStringContinueOnReturnCode {
| command {
| cat non_existent_file
| }
| runtime {
| continueOnReturnCode: "true"
| }
|}
|task echoWithFalseStringContinueOnReturnCode {
| command {
| cat non_existent_file
| }
| runtime {
| continueOnReturnCode: "false"
| }
|}
|
|workflow echo_wf {
| call echoWithSingleContinueOnReturnCode
| call echoWithExpressionContinueOnReturnCode
| call echoWithListContinueOnReturnCode
| call echoWithTrueContinueOnReturnCode
| call echoWithFalseContinueOnReturnCode
| call echoWithTrueStringContinueOnReturnCode
| call echoWithFalseStringContinueOnReturnCode
|}
""".stripMargin
val WorkflowWithFullGooglyConfig =
"""
|task googly_task {
| command {
| echo "Hello JES!"
| }
| runtime {
| docker: "ubuntu:latest"
| memory: "4G"
| cpu: "3"
| defaultZones: "US_Metro US_Backwater"
| defaultDisks: "Disk1 3 SSD, Disk2 500 HDD"
| }
|}
|
|workflow googly_workflow {
| call googly_task
|}
""".stripMargin
val WorkflowWithoutGooglyConfig =
"""
|task googly_task {
| command {
| echo "Hello JES!"
| }
| runtime {
| docker: "ubuntu:latest"
| }
|}
|
|workflow googly_workflow {
| call googly_task
|}
""".stripMargin
val WorkflowWithLocalDiskGooglyConfig =
"""
|task googly_task {
| command {
| echo "Hello JES!"
| }
| runtime {
| docker: "ubuntu:latest"
| defaultDisks: "local-disk 123 HDD"
| }
|}
|
|workflow googly_workflow {
| call googly_task
|}
""".stripMargin
val WorkflowWithMessedUpMemory =
"""
|task messed_up_memory {
| command {
| echo "YO"
| }
| runtime {
| memory: "HI TY"
| }
|}
|
|workflow great_googly_moogly {
| call messed_up_memory
|}
""".stripMargin
val WorkflowWithMessedUpMemoryUnit =
"""
|task messed_up_memory {
| command {
| echo "YO"
| }
| runtime {
| memory: "5 TY"
| }
|}
|
|workflow great_googly_moogly {
| call messed_up_memory
|}
""".stripMargin
val WorkflowWithMessedUpLocalDisk =
"""
|task messed_up_disk {
| command {
| echo "YO"
| }
| runtime {
| docker: "ubuntu:latest"
| defaultDisks: "Disk1 123 LOCAL"
| }
|}
|
|workflow great_googly_moogly {
| call messed_up_disk
|}
""".stripMargin
val WorkflowWithMessedUpDiskSize =
"""
|task messed_up_disk {
| command {
| echo "YO"
| }
| runtime {
| docker: "ubuntu:latest"
| defaultDisks: "Disk1 123.0 SSD"
| }
|}
|
|workflow great_googly_moogly {
| call messed_up_disk
|}
""".stripMargin
val WorkflowWithMessedUpDiskType =
"""
|task messed_up_disk {
| command {
| echo "YO"
| }
| runtime {
| docker: "ubuntu:latest"
| defaultDisks: "Disk1 123 SDD"
| }
|}
|
|workflow great_googly_moogly {
| call messed_up_disk
|}
""".stripMargin
implicit class EnhancedNamespace(val namespace: NamespaceWithWorkflow) extends AnyVal {
def toCromwellRuntimeAttribute(index: Int): CromwellRuntimeAttributes = toCromwellRuntimeAttribute(index, BackendType.LOCAL)
def toCromwellRuntimeAttribute(index: Int, backendType: BackendType): CromwellRuntimeAttributes = {
CromwellRuntimeAttributes(namespace.workflow.calls(index).task.runtimeAttributes, backendType)
}
def toCromwellRuntimeAttributes(backendType: BackendType) = {
namespace.workflow.calls map { x => CromwellRuntimeAttributes(x.task.runtimeAttributes, backendType) }
}
}
}
class CromwellRuntimeAttributeSpec extends FlatSpec with Matchers with EitherValues {
val NamespaceWithRuntime = NamespaceWithWorkflow.load(WorkflowWithRuntime)
it should "have docker information" in {
assert(NamespaceWithRuntime.workflow.calls forall { x =>
CromwellRuntimeAttributes(x.task.runtimeAttributes, BackendType.LOCAL).docker.get == "ubuntu:latest"
})
}
"WDL file with failOnStderr runtime" should "identify failOnStderr for (and only for) appropriate tasks" in {
val namespaceWithFailOnStderr = NamespaceWithWorkflow.load(WorkflowWithFailOnStderr)
val echoWithFailOnStderrIndex = namespaceWithFailOnStderr.workflow.calls.indexWhere(call => call.unqualifiedName == "echoWithFailOnStderr")
assert(echoWithFailOnStderrIndex >= 0)
assert(namespaceWithFailOnStderr.toCromwellRuntimeAttribute(echoWithFailOnStderrIndex).failOnStderr)
val echoWithoutFailOnStderrIndex = namespaceWithFailOnStderr.workflow.calls.indexWhere(call => call.unqualifiedName == "echoWithoutFailOnStderr")
assert(echoWithoutFailOnStderrIndex >= 0)
assert(!namespaceWithFailOnStderr.toCromwellRuntimeAttribute(echoWithoutFailOnStderrIndex).failOnStderr)
}
"WDL file with continueOnReturnCode runtime" should "identify continueOnReturnCode for (and only for) appropriate tasks" in {
val namespaceWithContinueOnReturnCode = NamespaceWithWorkflow.load(WorkflowWithContinueOnReturnCode)
val echoWithSingleContinueOnReturnCodeIndex =
namespaceWithContinueOnReturnCode.workflow.calls indexWhere { call =>
call.unqualifiedName == "echoWithSingleContinueOnReturnCode"
}
echoWithSingleContinueOnReturnCodeIndex should be >= 0
namespaceWithContinueOnReturnCode.toCromwellRuntimeAttribute(echoWithSingleContinueOnReturnCodeIndex).continueOnReturnCode should be (ContinueOnReturnCodeSet(Set(123)))
val echoWithExpressionContinueOnReturnCodeIndex =
namespaceWithContinueOnReturnCode.workflow.calls indexWhere { call =>
call.unqualifiedName == "echoWithExpressionContinueOnReturnCode"
}
echoWithExpressionContinueOnReturnCodeIndex should be >= 0
namespaceWithContinueOnReturnCode.toCromwellRuntimeAttribute(echoWithExpressionContinueOnReturnCodeIndex).continueOnReturnCode should be(ContinueOnReturnCodeSet(Set(444)))
val echoWithListContinueOnReturnCodeIndex =
namespaceWithContinueOnReturnCode.workflow.calls indexWhere { call =>
call.unqualifiedName == "echoWithListContinueOnReturnCode"
}
echoWithListContinueOnReturnCodeIndex should be >= 0
namespaceWithContinueOnReturnCode.toCromwellRuntimeAttribute(echoWithListContinueOnReturnCodeIndex).continueOnReturnCode should be(ContinueOnReturnCodeSet(Set(0, 1, 2, 3)))
val echoWithTrueContinueOnReturnCodeIndex =
namespaceWithContinueOnReturnCode.workflow.calls indexWhere { call =>
call.unqualifiedName == "echoWithTrueContinueOnReturnCode"
}
echoWithTrueContinueOnReturnCodeIndex should be >= 0
namespaceWithContinueOnReturnCode.toCromwellRuntimeAttribute(echoWithTrueContinueOnReturnCodeIndex).continueOnReturnCode should be(ContinueOnReturnCodeFlag(true))
val echoWithFalseContinueOnReturnCodeIndex =
namespaceWithContinueOnReturnCode.workflow.calls indexWhere { call =>
call.unqualifiedName == "echoWithFalseContinueOnReturnCode"
}
echoWithFalseContinueOnReturnCodeIndex should be >= 0
namespaceWithContinueOnReturnCode.toCromwellRuntimeAttribute(echoWithFalseContinueOnReturnCodeIndex).continueOnReturnCode should be(ContinueOnReturnCodeFlag(false))
val echoWithTrueStringContinueOnReturnCodeIndex =
namespaceWithContinueOnReturnCode.workflow.calls indexWhere { call =>
call.unqualifiedName == "echoWithTrueStringContinueOnReturnCode"
}
echoWithTrueStringContinueOnReturnCodeIndex should be >= 0
namespaceWithContinueOnReturnCode.toCromwellRuntimeAttribute(echoWithTrueStringContinueOnReturnCodeIndex).continueOnReturnCode should be(ContinueOnReturnCodeFlag(true))
val echoWithFalseStringContinueOnReturnCodeIndex =
namespaceWithContinueOnReturnCode.workflow.calls indexWhere { call =>
call.unqualifiedName == "echoWithFalseStringContinueOnReturnCode"
}
echoWithFalseStringContinueOnReturnCodeIndex should be >= 0
namespaceWithContinueOnReturnCode.toCromwellRuntimeAttribute(echoWithFalseStringContinueOnReturnCodeIndex).continueOnReturnCode should be(ContinueOnReturnCodeFlag(false))
}
"WDL file with Googly config" should "parse up properly" in {
val namespaceWithGooglyConfig = NamespaceWithWorkflow.load(WorkflowWithFullGooglyConfig)
val calls = namespaceWithGooglyConfig.workflow.calls
val callIndex = calls.indexWhere(call => call.unqualifiedName == "googly_task")
callIndex should be >= 0
val googlyCall = calls(callIndex)
val attributes = CromwellRuntimeAttributes(googlyCall.task.runtimeAttributes, BackendType.LOCAL)
attributes.cpu shouldBe 3
val firstDisk = new Disk().setName("Disk1").setSizeGb(3L).setType("PERSISTENT_SSD").setAutoDelete(true)
val secondDisk = new Disk().setName("Disk2").setSizeGb(500L).setType("PERSISTENT_HDD").setAutoDelete(true)
val expectedDisks = Vector(firstDisk, secondDisk, CromwellRuntimeAttributes.LocalizationDisk)
attributes.defaultDisks should contain theSameElementsAs expectedDisks
val expectedZones = Vector("US_Metro", "US_Backwater")
attributes.defaultZones foreach { z => expectedZones should contain (z) }
attributes.memoryGB shouldBe 4
}
"WDL file with no Googly config" should "also parse up properly to defaults" in {
val NamespaceWithoutGooglyConfig = NamespaceWithWorkflow.load(WorkflowWithoutGooglyConfig)
val calls = NamespaceWithoutGooglyConfig.workflow.calls
val callIndex = calls.indexWhere(call => call.unqualifiedName == "googly_task")
callIndex should be >= 0
val googlyCall = calls(callIndex)
val attributes = CromwellRuntimeAttributes(googlyCall.task.runtimeAttributes, BackendType.LOCAL)
attributes.cpu shouldBe CromwellRuntimeAttributes.Defaults.Cpu
attributes.defaultDisks foreach { d => CromwellRuntimeAttributes.Defaults.Disk should contain (d) }
attributes.defaultZones foreach { z => CromwellRuntimeAttributes.Defaults.Zones should contain (z) }
attributes.memoryGB shouldBe CromwellRuntimeAttributes.Defaults.Memory
}
"WDL file with local disk Googly config" should "parse up properly" in {
val NamespaceWithoutGooglyConfig = NamespaceWithWorkflow.load(WorkflowWithLocalDiskGooglyConfig)
val calls = NamespaceWithoutGooglyConfig.workflow.calls
val callIndex = calls.indexWhere(call => call.unqualifiedName == "googly_task")
callIndex should be >= 0
val googlyCall = calls(callIndex)
val attributes = CromwellRuntimeAttributes(googlyCall.task.runtimeAttributes, BackendType.LOCAL)
attributes.cpu shouldBe CromwellRuntimeAttributes.Defaults.Cpu
val localHddDisk = new Disk().setName("local-disk").setSizeGb(123L).setType("PERSISTENT_HDD").setAutoDelete(true)
attributes.defaultDisks should contain theSameElementsAs Vector(localHddDisk)
attributes.defaultZones should contain theSameElementsAs CromwellRuntimeAttributes.Defaults.Zones
attributes.memoryGB shouldBe CromwellRuntimeAttributes.Defaults.Memory
}
"WDL file with Googly config" should "issue warnings on the local backend" in {
val namespace = NamespaceWithWorkflow.load(WorkflowWithFullGooglyConfig)
val attributeMap = AttributeMap(namespace.workflow.calls.head.task.runtimeAttributes.attrs)
val expectedString = "Found unsupported keys for backend 'LOCAL': cpu, defaultDisks, defaultZones, memory"
attributeMap.unsupportedKeys(BackendType.LOCAL).head shouldBe expectedString
}
"WDL file without runtime section" should "not be accepted on JES backend as it has no docker" in {
val ex = intercept[IllegalArgumentException] {
NamespaceWithWorkflow.load(WorkflowWithoutRuntime).toCromwellRuntimeAttributes(BackendType.JES)
}
ex.getMessage should include ("Missing required keys in runtime configuration for backend 'JES': docker")
}
"WDL file with runtime section but no docker" should "not be accepted on JES backend" in {
val ex = intercept[IllegalArgumentException] {
val workflow = NamespaceWithWorkflow.load(WorkflowWithFailOnStderr).toCromwellRuntimeAttribute(0, BackendType.JES)
}
ex.getMessage should include ("Missing required keys in runtime configuration for backend 'JES': docker")
}
"WDL file with a seriously screwed up memory runtime" should "not parse" in {
val ex = intercept[IllegalArgumentException] {
val namespaceWithBorkedMemory = NamespaceWithWorkflow.load(WorkflowWithMessedUpMemory).toCromwellRuntimeAttributes(BackendType.LOCAL)
}
ex.getMessage should include ("should be of the form X Unit")
}
"WDL file with an invalid memory unit" should "say so" in {
val ex = intercept[IllegalArgumentException] {
val namespaceWithBorkedMemory = NamespaceWithWorkflow.load(WorkflowWithMessedUpMemoryUnit).toCromwellRuntimeAttributes(BackendType.LOCAL)
}
ex.getMessage should include ("is an invalid memory unit")
}
"WDL file with an invalid local disk" should "say so" in {
val ex = intercept[IllegalArgumentException] {
NamespaceWithWorkflow.load(WorkflowWithMessedUpLocalDisk).toCromwellRuntimeAttributes(BackendType.JES)
}
ex.getMessage should include(
"'Disk1 123 LOCAL' should be in form 'NAME SIZE TYPE', with SIZE blank for LOCAL, otherwise SIZE in GB")
}
"WDL file with an invalid disk size" should "say so" in {
val ex = intercept[IllegalArgumentException] {
NamespaceWithWorkflow.load(WorkflowWithMessedUpDiskSize).toCromwellRuntimeAttributes(BackendType.JES)
}
ex.getMessage should include("123.0 not convertible to a Long")
}
"WDL file with an invalid disk type" should "say so" in {
val ex = intercept[IllegalArgumentException] {
NamespaceWithWorkflow.load(WorkflowWithMessedUpDiskType).toCromwellRuntimeAttributes(BackendType.JES)
}
ex.getMessage should include("Disk TYPE SDD should be one of LOCAL, SSD, HDD")
}
}
| dgtester/cromwell | src/test/scala/cromwell/engine/backend/CromwellRuntimeAttributeSpec.scala | Scala | bsd-3-clause | 17,667 |
package persistence.property;
import org.hibernate.mapping._
import org.hibernate.tuple.entity._
import org.hibernate.property.access.spi.{Getter, Setter}
// The way this class overrides the property-accessors is supremely
// hacky. Keep an eye on the relevant issue at:
// https://hibernate.atlassian.net/browse/HCANN-48. If that issue is
// resolved, we can remove this class.
//
// Also, in here we are assuming that all property access will be through the
// scala getter/setters. So, this doesn't allow for java-bean style
// getters and setters. If we need to allow that, modify the code to read
// appropriate Access annotations and behave accordingly.
//
// Also, we still need to put the org.hibernate.annotations.Type
// annotations everywhere the Option types are used. Try to find a way to
// do away with that as well.
class CustomPojoEntityTuplizer(emm: EntityMetamodel, pc: PersistentClass) extends PojoEntityTuplizer(emm, pc) {
override def buildPropertyGetter(mappedProperty: Property, mappedEntity: PersistentClass) = {
mappedProperty.setPropertyAccessorName("persistence.property.ScalaPropertyAccessStrategy");
super.buildPropertyGetter(mappedProperty, mappedEntity);
}
override def buildPropertySetter(mappedProperty: Property, mappedEntity: PersistentClass) = {
mappedProperty.setPropertyAccessorName("persistence.property.ScalaPropertyAccessStrategy");
super.buildPropertySetter(mappedProperty, mappedEntity);
}
}
| Bhashit/better-hibernate-with-scala | hibernate-property-access/app/persistence/property/CustomPojoEntityTuplizer.scala | Scala | unlicense | 1,464 |
package com.softwaremill.codebrag
import com.typesafe.config.Config
import com.softwaremill.codebrag.common.config.ConfigWithDefault
trait WebServerConfig extends ConfigWithDefault {
def rootConfig: Config
lazy val webServerHost: String = getString("codebrag.web-server-host", "0.0.0.0")
lazy val webServerPort: Int = getInt("codebrag.web-server-port", 8080)
}
| softwaremill/codebrag | codebrag-dist/src/main/scala/com/softwaremill/codebrag/WebServerConfig.scala | Scala | agpl-3.0 | 370 |
import types.{MalList, _list, _list_Q, MalVector, MalHashMap,
Func, MalFunction}
import env.Env
object step8_macros {
// read
def READ(str: String): Any = {
reader.read_str(str)
}
// eval
def is_pair(x: Any): Boolean = {
types._sequential_Q(x) && x.asInstanceOf[MalList].value.length > 0
}
def quasiquote(ast: Any): Any = {
if (!is_pair(ast)) {
return _list(Symbol("quote"), ast)
} else {
val a0 = ast.asInstanceOf[MalList](0)
if (types._symbol_Q(a0) &&
a0.asInstanceOf[Symbol].name == "unquote") {
return ast.asInstanceOf[MalList](1)
} else if (is_pair(a0)) {
val a00 = a0.asInstanceOf[MalList](0)
if (types._symbol_Q(a00) &&
a00.asInstanceOf[Symbol].name == "splice-unquote") {
return _list(Symbol("concat"),
a0.asInstanceOf[MalList](1),
quasiquote(ast.asInstanceOf[MalList].drop(1)))
}
}
return _list(Symbol("cons"),
quasiquote(a0),
quasiquote(ast.asInstanceOf[MalList].drop(1)))
}
}
def is_macro_call(ast: Any, env: Env): Boolean = {
ast match {
case ml: MalList => {
if (ml.value.length > 0 &&
types._symbol_Q(ml(0)) &&
env.find(ml(0).asInstanceOf[Symbol]) != null) {
env.get(ml(0).asInstanceOf[Symbol]) match {
case f: MalFunction => return f.ismacro
case _ => return false
}
}
return false
}
case _ => return false
}
}
def macroexpand(orig_ast: Any, env: Env): Any = {
var ast = orig_ast;
while (is_macro_call(ast, env)) {
ast.asInstanceOf[MalList].value match {
case f :: args => {
val mac = env.get(f.asInstanceOf[Symbol])
ast = mac.asInstanceOf[MalFunction](args)
}
case _ => throw new Exception("macroexpand: invalid call")
}
}
ast
}
def eval_ast(ast: Any, env: Env): Any = {
ast match {
case s : Symbol => env.get(s)
case v: MalVector => v.map(EVAL(_, env))
case l: MalList => l.map(EVAL(_, env))
case m: MalHashMap => {
m.map{case (k,v) => (k, EVAL(v, env))}
}
case _ => ast
}
}
def EVAL(orig_ast: Any, orig_env: Env): Any = {
var ast = orig_ast; var env = orig_env;
while (true) {
//println("EVAL: " + printer._pr_str(ast,true))
if (!_list_Q(ast))
return eval_ast(ast, env)
// apply list
ast = macroexpand(ast, env)
if (!_list_Q(ast))
return eval_ast(ast, env)
ast.asInstanceOf[MalList].value match {
case Nil => {
return ast
}
case Symbol("def!") :: a1 :: a2 :: Nil => {
return env.set(a1.asInstanceOf[Symbol], EVAL(a2, env))
}
case Symbol("let*") :: a1 :: a2 :: Nil => {
val let_env = new Env(env)
for (g <- a1.asInstanceOf[MalList].value.grouped(2)) {
let_env.set(g(0).asInstanceOf[Symbol],EVAL(g(1),let_env))
}
env = let_env
ast = a2 // continue loop (TCO)
}
case Symbol("quote") :: a1 :: Nil => {
return a1
}
case Symbol("quasiquote") :: a1 :: Nil => {
ast = quasiquote(a1) // continue loop (TCO)
}
case Symbol("defmacro!") :: a1 :: a2 :: Nil => {
val f = EVAL(a2, env)
f.asInstanceOf[MalFunction].ismacro = true
return env.set(a1.asInstanceOf[Symbol], f)
}
case Symbol("macroexpand") :: a1 :: Nil => {
return macroexpand(a1, env)
}
case Symbol("do") :: rest => {
eval_ast(_list(rest.slice(0,rest.length-1):_*), env)
ast = ast.asInstanceOf[MalList].value.last // continue loop (TCO)
}
case Symbol("if") :: a1 :: a2 :: rest => {
val cond = EVAL(a1, env)
if (cond == null || cond == false) {
if (rest.length == 0) return null
ast = rest(0) // continue loop (TCO)
} else {
ast = a2 // continue loop (TCO)
}
}
case Symbol("fn*") :: a1 :: a2 :: Nil => {
return new MalFunction(a2, env, a1.asInstanceOf[MalList],
(args: List[Any]) => {
EVAL(a2, new Env(env, types._toIter(a1), args.iterator))
}
)
}
case _ => {
// function call
eval_ast(ast, env).asInstanceOf[MalList].value match {
case f :: el => {
f match {
case fn: MalFunction => {
env = fn.gen_env(el)
ast = fn.ast // continue loop (TCO)
}
case fn: Func => {
return fn(el)
}
case _ => {
throw new Exception("attempt to call non-function: " + f)
}
}
}
case _ => throw new Exception("invalid apply")
}
}
}
}
}
// print
def PRINT(exp: Any): String = {
printer._pr_str(exp, true)
}
// repl
def main(args: Array[String]) = {
val repl_env: Env = new Env()
val REP = (str: String) => PRINT(EVAL(READ(str), repl_env))
// core.scala: defined using scala
core.ns.map{case (k: String,v: Any) => {
repl_env.set(Symbol(k), new Func(v))
}}
repl_env.set(Symbol("eval"), new Func((a: List[Any]) => EVAL(a(0), repl_env)))
repl_env.set(Symbol("*ARGV*"), _list(args.slice(1,args.length):_*))
// core.mal: defined using the language itself
REP("(def! not (fn* (a) (if a false true)))")
REP("(def! load-file (fn* (f) (eval (read-string (str \\"(do \\" (slurp f) \\")\\")))))")
REP("(defmacro! cond (fn* (& xs) (if (> (count xs) 0) (list 'if (first xs) (if (> (count xs) 1) (nth xs 1) (throw \\"odd number of forms to cond\\")) (cons 'cond (rest (rest xs)))))))")
REP("(defmacro! or (fn* (& xs) (if (empty? xs) nil (if (= 1 (count xs)) (first xs) `(let* (or_FIXME ~(first xs)) (if or_FIXME or_FIXME (or ~@(rest xs))))))))")
if (args.length > 0) {
REP("(load-file \\"" + args(0) + "\\")")
System.exit(0)
}
// repl loop
var line:String = null
while ({line = readLine("user> "); line != null}) {
try {
println(REP(line))
} catch {
case e : Throwable => {
println("Error: " + e.getMessage)
println(" " + e.getStackTrace.mkString("\\n "))
}
}
}
}
}
// vim: ts=2:sw=2
| jwalsh/mal | scala/step8_macros.scala | Scala | mpl-2.0 | 6,458 |
package models
import com.mohiva.play.silhouette.core.{ LoginInfo, Identity }
import java.util.UUID
import com.conekta.Customer
import models.daos.SubscriptionDAO
import models.daos.ChargeDAO
/**
* The user object.
*
* @param userID The unique ID of the user.
* @param loginInfo The linked login info.
* @param firstName Maybe the first name of the authenticated user.
* @param lastName Maybe the last name of the authenticated user.
* @param fullName Maybe the full name of the authenticated user.
* @param email Maybe the email of the authenticated provider.
* @param avatarURL Maybe the avatar URL of the authenticated provider.
*/
case class User(
userID: UUID,
loginInfo: LoginInfo,
firstName: Option[String],
lastName: Option[String],
fullName: Option[String],
email: Option[String],
avatarURL: Option[String],
conektaUserId: Option[String]) extends Identity {
def currentSubscription(): Option[Subscription] = {
SubscriptionDAO.findByUserId(userID.toString())
}
def charges(): List[Charge] = {
ChargeDAO.findAllForUserId(userID.toString)
}
def saveCard(cardToken: String) = {
val customerId = conektaUserId.getOrElse(throw new RuntimeException("Can't."))
val customer = Customer.find(customerId)
if (customer.cards.isEmpty) {
customer.createCard(cardToken)
} else {
customer.cards.head.update(Map("token" -> cardToken))
}
}
def hasCard(): Boolean = {
if (!conektaUserId.isDefined) {
false
}
val customer = Customer.find(conektaUserId.get)
!customer.cards.isEmpty
}
} | Wirwing/hello-conekta-play-framework | app/models/User.scala | Scala | mit | 1,594 |
package lib
import play.api.libs.json.Json
import io.apibuilder.spec.v0.models._
import io.apibuilder.api.v0.models.{Diff, DiffBreaking, DiffNonBreaking}
/**
* Takes two service descriptions. Returns a list of changes from
* service a to service b. The list of changes is intended to be
* legible by a human.
*/
case class ServiceDiff(
a: Service,
b: Service
) {
val differences: Seq[Diff] = Seq(
diffApidoc(),
diffInfo(),
diffName(),
diffOrganization(),
diffApplication(),
diffNamespace(),
diffVersion(),
diffBaseUrl(),
diffDescription(),
diffAttributes(),
diffHeaders(),
diffImports(),
diffEnums(),
diffUnions(),
diffModels(),
diffResources(),
diffAnnotations()
).flatten
private[this] def diffApidoc(): Seq[Diff] = {
Helpers.diffStringNonBreaking("apidoc/version", a.apidoc.version, b.apidoc.version)
}
private[this] def diffInfo(): Seq[Diff] = {
diffContact() ++ diffLicense()
}
private[this] def diffContact(): Seq[Diff] = {
Helpers.diffOptionalStringNonBreaking("contact/name", a.info.contact.flatMap(_.name), b.info.contact.flatMap(_.name)) ++
Helpers.diffOptionalStringNonBreaking("contact/url", a.info.contact.flatMap(_.url), b.info.contact.flatMap(_.url)) ++
Helpers.diffOptionalStringNonBreaking("contact/email", a.info.contact.flatMap(_.email), b.info.contact.flatMap(_.email))
}
private[this] def diffLicense(): Seq[Diff] = {
Helpers.diffOptionalStringNonBreaking("license/name", a.info.license.map(_.name), b.info.license.map(_.name)) ++
Helpers.diffOptionalStringNonBreaking("license/url", a.info.license.flatMap(_.url), b.info.license.flatMap(_.url))
}
private[this] def diffName(): Seq[Diff] = {
Helpers.diffStringNonBreaking("name", a.name, b.name)
}
private[this] def diffOrganization(): Seq[Diff] = {
Helpers.diffStringNonBreaking("organization/key", a.organization.key, b.organization.key)
}
private[this] def diffApplication(): Seq[Diff] = {
Helpers.diffStringNonBreaking("application/key", a.application.key, b.application.key)
}
private[this] def diffNamespace(): Seq[Diff] = {
Helpers.diffStringBreaking("namespace", a.namespace, b.namespace)
}
private[this] def diffVersion(): Seq[Diff] = {
Helpers.diffStringNonBreaking("version", a.version, b.version)
}
private[this] def diffBaseUrl(): Seq[Diff] = {
Helpers.diffOptionalStringNonBreaking("base_url", a.baseUrl, b.baseUrl)
}
private[this] def diffDescription(): Seq[Diff] = {
Helpers.diffOptionalStringNonBreaking("description", a.description, b.description)
}
private[this] def diffAttributes(): Seq[Diff] = {
Helpers.diffAttributes("attributes", a.attributes, b.attributes)
}
private[this] def diffHeaders(): Seq[Diff] = {
val added = b.headers.map(_.name).filter(h => !a.headers.exists(_.name == h))
a.headers.flatMap { headerA =>
b.headers.find(_.name == headerA.name) match {
case None => Some(DiffNonBreaking(Helpers.removed("header", headerA.name)))
case Some(headerB) => diffHeader(headerA, headerB)
}
} ++ b.headers.find( h => added.contains(h.name) ).map { h =>
h.required match {
case false => DiffNonBreaking(Helpers.added("optional header", h.name))
case true => DiffBreaking(Helpers.added("required header", h.name))
}
}
}
private[this] def diffHeader(a: Header, b: Header): Seq[Diff] = {
assert(a.name == b.name, "Header names must be the same")
val prefix = s"header ${a.name}"
Helpers.diffStringBreaking(s"$prefix type", a.`type`, b.`type`) ++
Helpers.diffOptionalStringNonBreaking(s"$prefix description", a.description, b.description) ++
Helpers.diffDeprecation(prefix, a.deprecation, b.deprecation) ++
Helpers.diffRequired(prefix, a.required, b.required) ++
Helpers.diffDefault(prefix, a.default, b.default) ++
Helpers.diffAttributes(prefix, a.attributes, b.attributes)
}
private[this] def diffImports(): Seq[Diff] = {
a.imports.flatMap { importA =>
b.imports.find(_.uri == importA.uri) match {
case None => Some(DiffNonBreaking(Helpers.removed("import", importA.uri)))
case Some(importB) => diffImport(importA, importB)
}
} ++ Helpers.findNew("import", a.imports.map(_.uri), b.imports.map(_.uri))
}
private[this] def diffImport(a: Import, b: Import): Seq[Diff] = {
assert(a.uri == b.uri, "Import uri's must be the same")
val prefix = s"import ${a.uri}"
Helpers.diffStringNonBreaking(s"$prefix namespace", a.namespace, b.namespace) ++
Helpers.diffStringNonBreaking(s"$prefix organization/key", a.organization.key, b.organization.key) ++
Helpers.diffStringNonBreaking(s"$prefix application/key", a.application.key, b.application.key) ++
Helpers.diffStringNonBreaking(s"$prefix version", a.version, b.version) ++
Helpers.diffArrayNonBreaking(s"$prefix enums", a.enums, b.enums) ++
Helpers.diffArrayNonBreaking(s"$prefix unions", a.unions, b.unions) ++
Helpers.diffArrayNonBreaking(s"$prefix models", a.models, b.models)
}
private[this] def diffEnums(): Seq[Diff] = {
a.enums.flatMap { enumA =>
b.enums.find(_.name == enumA.name) match {
case None => Some(DiffBreaking(Helpers.removed("enum", enumA.name)))
case Some(enumB) => diffEnum(enumA, enumB)
}
} ++ Helpers.findNew("enum", a.enums.map(_.name), b.enums.map(_.name))
}
private[this] def diffEnum(a: Enum, b: Enum): Seq[Diff] = {
assert(a.name == b.name, "Enum name's must be the same")
val prefix = s"enum ${a.name}"
Helpers.diffStringNonBreaking(s"$prefix plural", a.plural, b.plural) ++
Helpers.diffOptionalStringNonBreaking(s"$prefix description", a.description, b.description) ++
Helpers.diffAttributes(prefix, a.attributes, b.attributes) ++
Helpers.diffDeprecation(prefix, a.deprecation, b.deprecation) ++
diffEnumValues(a.name, a.values, b.values)
}
private[this] def diffEnumValues(enumName: String, a: Seq[EnumValue], b: Seq[EnumValue]): Seq[Diff] = {
val prefix = s"enum $enumName value"
a.flatMap { valueA =>
b.find(_.name == valueA.name) match {
case None => Some(DiffBreaking(Helpers.removed(prefix, valueA.name)))
case Some(valueB) => diffEnumValue(enumName, valueA, valueB)
}
} ++ Helpers.findNew(prefix, a.map(_.name), b.map(_.name))
}
private[this] def diffEnumValue(enumName: String, a: EnumValue, b: EnumValue): Seq[Diff] = {
assert(a.name == b.name, "Enum value name's must be the same")
val prefix = s"enum $enumName value ${a.name}"
Helpers.diffOptionalStringNonBreaking(s"$prefix description", a.description, b.description) ++
Helpers.diffAttributes(prefix, a.attributes, b.attributes) ++
Helpers.diffDeprecation(prefix, a.deprecation, b.deprecation)
}
private[this] def diffUnions(): Seq[Diff] = {
a.unions.flatMap { unionA =>
b.unions.find(_.name == unionA.name) match {
case None => Some(DiffBreaking(Helpers.removed("union", unionA.name)))
case Some(unionB) => diffUnion(unionA, unionB)
}
} ++ Helpers.findNew("union", a.unions.map(_.name), b.unions.map(_.name))
}
private[this] def diffUnion(a: Union, b: Union): Seq[Diff] = {
assert(a.name == b.name, "Union name's must be the same")
val prefix = s"union ${a.name}"
Helpers.diffOptionalStringBreaking(s"$prefix discriminator", a.discriminator, b.discriminator) ++
Helpers.diffStringNonBreaking(s"$prefix plural", a.plural, b.plural) ++
Helpers.diffOptionalStringNonBreaking(s"$prefix description", a.description, b.description) ++
Helpers.diffAttributes(prefix, a.attributes, b.attributes) ++
Helpers.diffDeprecation(prefix, a.deprecation, b.deprecation) ++
diffUnionTypeDefault(a, b) ++
diffUnionTypes(a.name, a.types, b.types)
}
private[this] def diffUnionTypeDefault(a: Union, b: Union): Seq[Diff] = {
val prefix = s"union ${a.name} default type"
val defaultTypeA: Option[String] = a.types.find(_.default.getOrElse(false)).map(_.`type`)
val defaultTypeB: Option[String] = b.types.find(_.default.getOrElse(false)).map(_.`type`)
(defaultTypeA, defaultTypeB) match {
case (None, None) => Nil
case (None, Some(typeNameB)) => Seq(DiffNonBreaking(Helpers.added(prefix, typeNameB)))
case (Some(typeNameA), Some(typeNameB)) if typeNameA == typeNameB => Nil
case (Some(typeNameA), Some(typeNameB)) => Seq(DiffBreaking(Helpers.changed(prefix, typeNameA, typeNameB)))
case (Some(typeNameA), None) => Seq(DiffBreaking(Helpers.removed(prefix, typeNameA)))
}
}
private[this] def diffUnionTypes(unionName: String, a: Seq[UnionType], b: Seq[UnionType]): Seq[Diff] = {
val prefix = s"union $unionName type"
a.flatMap { typeA =>
b.find(_.`type` == typeA.`type`) match {
case None => Some(DiffBreaking(Helpers.removed(prefix, typeA.`type`)))
case Some(typeB) => diffUnionType(unionName, typeA, typeB)
}
} ++ Helpers.findNew(prefix, a.map(_.`type`), b.map(_.`type`))
}
private[this] def diffUnionType(unionName: String, a: UnionType, b: UnionType): Seq[Diff] = {
assert(a.`type` == b.`type`, "Union type name's must be the same")
val prefix = s"union $unionName type ${a.`type`}"
Helpers.diffOptionalStringNonBreaking(s"$prefix description", a.description, b.description) ++
Helpers.diffAttributes(prefix, a.attributes, b.attributes) ++
Helpers.diffDeprecation(prefix, a.deprecation, b.deprecation)
}
private[this] def diffModels(): Seq[Diff] = {
a.models.flatMap { modelA =>
b.models.find(_.name == modelA.name) match {
case None => Some(DiffBreaking(Helpers.removed("model", modelA.name)))
case Some(modelB) => diffModel(modelA, modelB)
}
} ++ Helpers.findNew("model", a.models.map(_.name), b.models.map(_.name))
}
private[this] def diffModel(a: Model, b: Model): Seq[Diff] = {
assert(a.name == b.name, "Model name's must be the same")
val prefix = s"model ${a.name}"
Helpers.diffStringNonBreaking(s"$prefix plural", a.plural, b.plural) ++
Helpers.diffOptionalStringNonBreaking(s"$prefix description", a.description, b.description) ++
Helpers.diffAttributes(prefix, a.attributes, b.attributes) ++
Helpers.diffDeprecation(prefix, a.deprecation, b.deprecation) ++
diffFields(a.name, a.fields, b.fields)
}
private[this] def diffFields(modelName: String, a: Seq[Field], b: Seq[Field]): Seq[Diff] = {
val added = b.map(_.name).filter(h => a.find(_.name == h).isEmpty)
val prefix = s"model $modelName"
a.flatMap { fieldA =>
b.find(_.name == fieldA.name) match {
case None => Some(DiffBreaking(Helpers.removed(s"$prefix field", fieldA.name)))
case Some(fieldB) => diffField(modelName, fieldA, fieldB)
}
} ++ b.filter( f => added.contains(f.name) ).map { f =>
(f.required, f.default) match {
case (false, None) => DiffNonBreaking(Helpers.added(s"$prefix optional field", f.name))
case (false, Some(default)) => DiffNonBreaking(Helpers.added(s"$prefix optional field", s"${f.name}, defaults to ${Text.truncate(default)}"))
case (true, None) => DiffBreaking(Helpers.added(s"$prefix required field", f.name))
case (true, Some(default)) => DiffNonBreaking(Helpers.added(s"$prefix required field", s"${f.name}, defaults to ${Text.truncate(default)}"))
}
}
}
private[this] def diffField(modelName: String, a: Field, b: Field): Seq[Diff] = {
assert(a.name == b.name, "Model field name's must be the same")
val prefix = s"model $modelName field ${a.name}"
Helpers.diffStringBreaking(s"$prefix type", a.`type`, b.`type`) ++
Helpers.diffOptionalStringNonBreaking(s"$prefix description", a.description, b.description) ++
Helpers.diffDeprecation(prefix, a.deprecation, b.deprecation) ++
Helpers.diffDefault(prefix, a.default, b.default) ++
Helpers.diffRequired(prefix, a.required, b.required) ++
Helpers.diffAttributes(prefix, a.attributes, b.attributes) ++
Helpers.diffMinimum(prefix, a.minimum, b.minimum) ++
Helpers.diffMaximum(prefix, a.maximum, b.maximum) ++
Helpers.diffOptionalStringNonBreaking(s"$prefix example", a.example, b.example)
}
private[this] def diffResources(): Seq[Diff] = {
a.resources.flatMap { resourceA =>
b.resources.find(_.`type` == resourceA.`type`) match {
case None => Some(DiffBreaking(Helpers.removed("resource", resourceA.`type`)))
case Some(resourceB) => diffResource(resourceA, resourceB)
}
} ++ Helpers.findNew("resource", a.resources.map(_.`type`), b.resources.map(_.`type`))
}
private[this] def diffResource(a: Resource, b: Resource): Seq[Diff] = {
assert(a.`type` == b.`type`, "Resource types must be the same")
val prefix = s"resource ${a.`type`}"
Helpers.diffStringNonBreaking(s"$prefix plural", a.plural, b.plural) ++
Helpers.diffOptionalStringNonBreaking(s"$prefix description", a.description, b.description) ++
Helpers.diffAttributes(prefix, a.attributes, b.attributes) ++
Helpers.diffDeprecation(prefix, a.deprecation, b.deprecation) ++
diffOperations(a.`type`, a.operations, b.operations)
}
private[this] def diffAnnotations(): Seq[Diff] = {
a.annotations.flatMap { annotA =>
b.annotations.find(_.name == annotA.name) match {
case None => Some(DiffNonBreaking(Helpers.removed("annotation", annotA.name)))
case Some(annotB) => diffAnnotation(annotA, annotB)
}
} ++ Helpers.findNew("annotation", a.annotations.map(_.name), b.annotations.map(_.name))
}
private[this] def diffAnnotation(a: Annotation, b: Annotation): Seq[Diff] = {
assert(a.name == b.name, "Annotation names must be the same")
val prefix = s"annotation ${a.name}"
Helpers.diffOptionalStringNonBreaking(s"$prefix description", a.description, b.description) ++
Helpers.diffDeprecation(prefix, a.deprecation, b.deprecation)
}
private[this] def operationKey(op: Operation): String = {
s"${op.method.toString.toUpperCase} ${op.path}".trim
}
private[this] def diffOperations(resourceType: String, a: Seq[Operation], b: Seq[Operation]): Seq[Diff] = {
val added = b.filter(opB => !a.exists(opA => operationKey(opB) == operationKey(opA)))
val prefix = s"resource $resourceType"
a.flatMap { opA =>
b.find(opB => operationKey(opB) == operationKey(opA)) match {
case None => Some(DiffBreaking(Helpers.removed(s"$prefix operation", operationKey(opA))))
case Some(opB) => diffOperation(resourceType, opA, opB)
}
} ++ added.map { op =>
DiffNonBreaking(Helpers.added(s"$prefix operation", operationKey(op)))
}
}
private[this] def diffOperation(resourceType: String, a: Operation, b: Operation): Seq[Diff] = {
assert(a.method == b.method, "Operation methods must be the same")
assert(a.path == b.path, "Operation paths must be the same")
val prefix = s"resource $resourceType operation " + operationKey(a)
Helpers.diffOptionalStringNonBreaking(s"$prefix description", a.description, b.description) ++
Helpers.diffAttributes(prefix, a.attributes, b.attributes) ++
Helpers.diffDeprecation(prefix, a.deprecation, b.deprecation) ++
diffBody(prefix, a.body, b.body) ++
diffParameters(prefix, a.parameters, b.parameters) ++
diffResponses(prefix, a.responses, b.responses)
}
private[this] def diffBody(prefix: String, a: Option[Body], b: Option[Body]): Seq[Diff] = {
(a, b) match {
case (None, None) => Nil
case (None, Some(bodyB)) => Seq(DiffBreaking(Helpers.added(prefix, "body")))
case (Some(bodyB), None) => Seq(DiffBreaking(Helpers.removed(prefix, "body")))
case (Some(bodyA), Some(bodyB)) => {
Helpers.diffStringBreaking(s"$prefix body type", bodyA.`type`, bodyB.`type`) ++
Helpers.diffOptionalStringNonBreaking(s"$prefix body description", bodyA.description, bodyB.description) ++
Helpers.diffAttributes(prefix, bodyA.attributes, bodyB.attributes) ++
Helpers.diffDeprecation(s"$prefix body", bodyA.deprecation, bodyB.deprecation)
}
}
}
private[this] def diffParameters(prefix: String, a: Seq[Parameter], b: Seq[Parameter]): Seq[Diff] = {
val added = b.map(_.name).filter(h => a.find(_.name == h).isEmpty)
a.flatMap { parameterA =>
b.find(_.name == parameterA.name) match {
case None => Some(DiffBreaking(Helpers.removed(s"$prefix parameter", parameterA.name)))
case Some(parameterB) => diffParameter(prefix, parameterA, parameterB)
}
} ++ b.filter( p => added.contains(p.name) ).map { p =>
(p.required, p.default) match {
case (false, None) => DiffNonBreaking(Helpers.added(s"$prefix optional parameter", p.name))
case (false, Some(default)) => DiffNonBreaking(Helpers.added(s"$prefix optional parameter", s"${p.name}, defaults to ${Text.truncate(default)}"))
case (true, None) => DiffBreaking(Helpers.added(s"$prefix required parameter", p.name))
case (true, Some(default)) => DiffNonBreaking(Helpers.added(s"$prefix required parameter", s"${p.name}, defaults to ${Text.truncate(default)}"))
}
}
}
private[this] def diffParameter(prefix: String, a: Parameter, b: Parameter): Seq[Diff] = {
assert(a.name == b.name, "Parameter name's must be the same")
val thisPrefix = s"$prefix parameter ${a.name}"
Helpers.diffStringBreaking(s"$thisPrefix type", a.`type`, b.`type`) ++
Helpers.diffStringBreaking(s"$thisPrefix location", a.location.toString, b.location.toString) ++
Helpers.diffOptionalStringNonBreaking(s"$thisPrefix description", a.description, b.description) ++
Helpers.diffDeprecation(thisPrefix, a.deprecation, b.deprecation) ++
Helpers.diffOptionAttributes(prefix, a.attributes, b.attributes) ++
Helpers.diffDefault(thisPrefix, a.default, b.default) ++
Helpers.diffRequired(thisPrefix, a.required, b.required) ++
Helpers.diffMinimum(thisPrefix, a.minimum, b.minimum) ++
Helpers.diffMaximum(thisPrefix, a.maximum, b.maximum) ++
Helpers.diffOptionalStringNonBreaking(s"$thisPrefix example", a.example, b.example)
}
/**
* Returns the response code as a string.
*/
private[this] def responseCode(r: Response): String = {
r.code match {
case ResponseCodeInt(code) => code.toString
case ResponseCodeUndefinedType(desc) => desc
case ResponseCodeOption.Default => ResponseCodeOption.Default.toString
case ResponseCodeOption.UNDEFINED(value) => value
}
}
private[this] def diffResponses(prefix: String, a: Seq[Response], b: Seq[Response]): Seq[Diff] = {
val added = b.map(_.code).filter(code => a.find(_.code == code).isEmpty)
a.flatMap { responseA =>
b.find(_.code == responseA.code) match {
case None => Some(DiffBreaking(Helpers.removed(s"$prefix response", responseCode(responseA))))
case Some(responseB) => diffResponse(prefix, responseA, responseB)
}
} ++ b.filter( r => added.contains(r.code) ).map { r =>
DiffNonBreaking(Helpers.added(s"$prefix response", responseCode(r)))
}
}
private[this] def diffResponse(prefix: String, a: Response, b: Response): Seq[Diff] = {
assert(responseCode(a) == responseCode(b), "Response codes must be the same")
val thisPrefix = s"$prefix response ${responseCode(a)}"
Helpers.diffStringBreaking(s"$thisPrefix type", a.`type`, b.`type`) ++
Helpers.diffOptionalStringNonBreaking(s"$thisPrefix description", a.description, b.description) ++
Helpers.diffDeprecation(thisPrefix, a.deprecation, b.deprecation) ++
Helpers.diffOptionAttributes(prefix, a.attributes, b.attributes)
}
private object Helpers {
def removed(label: String, value: String) = s"$label removed: ${Text.truncate(value)}"
def added(label: String, value: String) = s"$label added: ${Text.truncate(value)}"
def changed(label: String, from: String, to: String) = s"$label changed from ${Text.truncate(from)} to ${Text.truncate(to)}"
def findNew(prefix: String, a: Seq[String], b: Seq[String]): Seq[Diff] = {
b.filter(n => a.find(_ == n).isEmpty).map { name =>
DiffNonBreaking(Helpers.added(prefix, name))
}
}
def diffRequired(label: String, a: Boolean, b: Boolean): Seq[Diff] = {
(a, b) match {
case (true, true) => Nil
case (false, false) => Nil
case (true, false) => Seq(DiffNonBreaking(s"$label is no longer required"))
case (false, true) => Seq(DiffBreaking(s"$label is now required"))
}
}
/**
* We consider a breaking change if a minimum is added or lowered.
*/
def diffMinimum(label: String, a: Option[Long], b: Option[Long]): Seq[Diff] = {
(a, b) match {
case (None, None) => Nil
case (Some(from), Some(to)) => {
if (from == to) {
Nil
} else {
val desc = s"$label minimum changed from $from to $to"
if (from < to) {
Seq(DiffBreaking(desc))
} else {
Seq(DiffNonBreaking(desc))
}
}
}
case (None, Some(min)) => {
Seq(DiffBreaking(s"$label minimum added: $min"))
}
case (Some(min), None) => {
Seq(DiffNonBreaking(s"$label minimum removed: $min"))
}
}
}
/**
* We consider a breaking change if a maximum is added or increased.
*/
def diffMaximum(label: String, a: Option[Long], b: Option[Long]): Seq[Diff] = {
(a, b) match {
case (None, None) => Nil
case (Some(from), Some(to)) => {
if (from == to) {
Nil
} else {
val desc = s"$label maximum changed from $from to $to"
if (from < to) {
Seq(DiffNonBreaking(desc))
} else {
Seq(DiffBreaking(desc))
}
}
}
case (None, Some(max)) => {
Seq(DiffBreaking(s"$label maximum added: $max"))
}
case (Some(max), None) => {
Seq(DiffNonBreaking(s"$label maximum removed: $max"))
}
}
}
def diffDefault(label: String, a: Option[String], b: Option[String]): Seq[Diff] = {
(a, b) match {
case (None, None) => Nil
case (Some(from), Some(to)) => {
if (from == to) {
Nil
} else {
Seq(DiffNonBreaking(s"$label default changed from ${Text.truncate(from)} to ${Text.truncate(to)}"))
}
}
case (None, Some(default)) => {
Seq(DiffNonBreaking(s"$label default added: ${Text.truncate(default)}"))
}
case (Some(default), None) => {
Seq(DiffBreaking(s"$label default removed: ${Text.truncate(default)}"))
}
}
}
def diffString(
label: String,
a: String,
b: String
): Seq[String] = {
diffOptionalString(label, Some(a), Some(b))
}
def diffStringBreaking(
label: String,
a: String,
b: String
): Seq[Diff] = {
diffString(label, a, b).map { DiffBreaking(_) }
}
def diffStringNonBreaking(
label: String,
a: String,
b: String
): Seq[Diff] = {
diffString(label, a, b).map { DiffNonBreaking(_) }
}
def diffArrayNonBreaking(
label: String,
a: Seq[String],
b: Seq[String]
): Seq[Diff] = {
diffString(label, "[" + a.mkString(", ") + "]", "[" + b.mkString(", ") + "]").map { DiffNonBreaking(_) }
}
def diffOptionalString(
label: String,
a: Option[String],
b: Option[String]
): Seq[String] = {
(a, b) match {
case (None, None) => Nil
case (Some(value), None) => Seq(Helpers.removed(label, value))
case (None, Some(value)) => Seq(Helpers.added(label, value))
case (Some(valueA), Some(valueB)) => {
if (valueA == valueB) {
Nil
} else {
Seq(Helpers.changed(label, valueA, valueB))
}
}
}
}
def diffOptionalStringBreaking(
label: String,
a: Option[String],
b: Option[String]
): Seq[Diff] = {
diffOptionalString(label, a, b).map { DiffBreaking(_) }
}
def diffOptionalStringNonBreaking(
label: String,
a: Option[String],
b: Option[String]
): Seq[Diff] = {
diffOptionalString(label, a, b).map { DiffNonBreaking(_) }
}
def diffDeprecation(prefix: String, a: Option[Deprecation], b: Option[Deprecation]): Seq[Diff] = {
(a, b) match {
case (None, None) => Nil
case (Some(_), Some(_)) => Nil
case (Some(_), None) => Seq(DiffNonBreaking(Helpers.removed(prefix, "deprecation")))
case (None, Some(d)) => {
d.description match {
case None => Seq(DiffNonBreaking(s"$prefix deprecated"))
case Some(desc) => Seq(DiffNonBreaking(s"$prefix deprecated: $desc"))
}
}
}
}
def diffOptionAttributes(prefix: String, a: Option[Seq[Attribute]], b: Option[Seq[Attribute]]): Seq[Diff] = {
diffAttributes(prefix, a.getOrElse(Nil), b.getOrElse(Nil))
}
def diffAttributes(prefix: String, a: Seq[Attribute], b: Seq[Attribute]): Seq[Diff] = {
val aMap = Map(a map ( attr => attr.name -> attr ): _*)
val bMap = Map(b map ( attr => attr.name -> attr ): _*)
val aNames = aMap.keys.toSeq
val bNames = bMap.keys.toSeq
// Removed
val removedNames = aNames diff bNames
val removedDiffs = removedNames map (name => DiffNonBreaking(s"$prefix attribute removed: $name"))
// Added
val addedNames = bNames diff aNames
val addedDiffs = addedNames map (name => DiffNonBreaking(s"$prefix attribute added: $name"))
// Changed
val namesInBoth = aNames intersect bNames
val changedDiffs:Seq[Diff] = namesInBoth flatMap {name =>
val aAttr = aMap(name)
val bAttr = bMap(name)
diffStringNonBreaking(s"$prefix attribute '$name' value", Json.stringify(aAttr.value), Json.stringify(bAttr.value)) ++
diffOptionalStringNonBreaking(s"$prefix attribute '$name' description", aAttr.description, bAttr.description) ++
diffDeprecation(s"$prefix attribute '$name'", aAttr.deprecation, bAttr.deprecation)
}
changedDiffs ++ removedDiffs ++ addedDiffs
}
}
}
| mbryzek/apidoc | api/app/lib/ServiceDiff.scala | Scala | mit | 26,510 |
package templemore.onx.version6
import templemore.onx.version5. {Grid, Cross, Nought}
/**
* @author Chris Turner
*/
object OandXVersion6 {
def main(args: Array[String]) {
Grid.randomness = true
val grid = new GridActor().start
val noughts = new PlayerActor(Nought).start
val crosses = new PlayerActor(Cross).start
val game = new GameActor(grid, noughts, crosses).start
game ! StartGame
}
} | skipoleschris/OandX | src/main/scala/templemore/onx/version6/OandXVersion6.scala | Scala | apache-2.0 | 424 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.streaming
import scala.collection.mutable.ArrayBuffer
import org.scalatest.{BeforeAndAfter, BeforeAndAfterAll}
import org.apache.spark.{SparkConf, SparkContext, SparkFunSuite}
import org.apache.spark.rdd.{RDD, RDDOperationScope}
import org.apache.spark.streaming.dstream.DStream
import org.apache.spark.streaming.ui.UIUtils
import org.apache.spark.util.ManualClock
/**
* Tests whether scope information is passed from DStream operations to RDDs correctly.
*/
class DStreamScopeSuite
extends SparkFunSuite
with LocalStreamingContext {
override def beforeEach(): Unit = {
super.beforeEach()
val conf = new SparkConf().setMaster("local").setAppName("test")
conf.set("spark.streaming.clock", classOf[ManualClock].getName())
val batchDuration: Duration = Seconds(1)
ssc = new StreamingContext(new SparkContext(conf), batchDuration)
assertPropertiesNotSet()
}
override def afterEach(): Unit = {
try {
assertPropertiesNotSet()
} finally {
super.afterEach()
}
}
test("dstream without scope") {
val dummyStream = new DummyDStream(ssc)
dummyStream.initialize(Time(0))
// This DStream is not instantiated in any scope, so all RDDs
// created by this stream should similarly not have a scope
assert(dummyStream.baseScope === None)
assert(dummyStream.getOrCompute(Time(1000)).get.scope === None)
assert(dummyStream.getOrCompute(Time(2000)).get.scope === None)
assert(dummyStream.getOrCompute(Time(3000)).get.scope === None)
}
test("input dstream without scope") {
val inputStream = new DummyInputDStream(ssc)
inputStream.initialize(Time(0))
val baseScope = inputStream.baseScope.map(RDDOperationScope.fromJson)
val scope1 = inputStream.getOrCompute(Time(1000)).get.scope
val scope2 = inputStream.getOrCompute(Time(2000)).get.scope
val scope3 = inputStream.getOrCompute(Time(3000)).get.scope
// This DStream is not instantiated in any scope, so all RDDs
assertDefined(baseScope, scope1, scope2, scope3)
assert(baseScope.get.name.startsWith("dummy stream"))
assertScopeCorrect(baseScope.get, scope1.get, 1000)
assertScopeCorrect(baseScope.get, scope2.get, 2000)
assertScopeCorrect(baseScope.get, scope3.get, 3000)
}
test("scoping simple operations") {
val inputStream = new DummyInputDStream(ssc)
val mappedStream = inputStream.map { i => i + 1 }
val filteredStream = mappedStream.filter { i => i % 2 == 0 }
filteredStream.initialize(Time(0))
val mappedScopeBase = mappedStream.baseScope.map(RDDOperationScope.fromJson)
val mappedScope1 = mappedStream.getOrCompute(Time(1000)).get.scope
val mappedScope2 = mappedStream.getOrCompute(Time(2000)).get.scope
val mappedScope3 = mappedStream.getOrCompute(Time(3000)).get.scope
val filteredScopeBase = filteredStream.baseScope.map(RDDOperationScope.fromJson)
val filteredScope1 = filteredStream.getOrCompute(Time(1000)).get.scope
val filteredScope2 = filteredStream.getOrCompute(Time(2000)).get.scope
val filteredScope3 = filteredStream.getOrCompute(Time(3000)).get.scope
// These streams are defined in their respective scopes "map" and "filter", so all
// RDDs created by these streams should inherit the IDs and names of their parent
// DStream's base scopes
assertDefined(mappedScopeBase, mappedScope1, mappedScope2, mappedScope3)
assertDefined(filteredScopeBase, filteredScope1, filteredScope2, filteredScope3)
assert(mappedScopeBase.get.name === "map")
assert(filteredScopeBase.get.name === "filter")
assertScopeCorrect(mappedScopeBase.get, mappedScope1.get, 1000)
assertScopeCorrect(mappedScopeBase.get, mappedScope2.get, 2000)
assertScopeCorrect(mappedScopeBase.get, mappedScope3.get, 3000)
assertScopeCorrect(filteredScopeBase.get, filteredScope1.get, 1000)
assertScopeCorrect(filteredScopeBase.get, filteredScope2.get, 2000)
assertScopeCorrect(filteredScopeBase.get, filteredScope3.get, 3000)
}
test("scoping nested operations") {
val inputStream = new DummyInputDStream(ssc)
// countByKeyAndWindow internally uses reduceByKeyAndWindow, but only countByKeyAndWindow
// should appear in scope
val countStream = inputStream.countByWindow(Seconds(10), Seconds(1))
countStream.initialize(Time(0))
val countScopeBase = countStream.baseScope.map(RDDOperationScope.fromJson)
val countScope1 = countStream.getOrCompute(Time(1000)).get.scope
val countScope2 = countStream.getOrCompute(Time(2000)).get.scope
val countScope3 = countStream.getOrCompute(Time(3000)).get.scope
// Assert that all children RDDs inherit the DStream operation name correctly
assertDefined(countScopeBase, countScope1, countScope2, countScope3)
assert(countScopeBase.get.name === "countByWindow")
assertScopeCorrect(countScopeBase.get, countScope1.get, 1000)
assertScopeCorrect(countScopeBase.get, countScope2.get, 2000)
assertScopeCorrect(countScopeBase.get, countScope3.get, 3000)
// All streams except the input stream should share the same scopes as `countStream`
def testStream(stream: DStream[_]): Unit = {
if (stream != inputStream) {
val myScopeBase = stream.baseScope.map(RDDOperationScope.fromJson)
val myScope1 = stream.getOrCompute(Time(1000)).get.scope
val myScope2 = stream.getOrCompute(Time(2000)).get.scope
val myScope3 = stream.getOrCompute(Time(3000)).get.scope
assertDefined(myScopeBase, myScope1, myScope2, myScope3)
assert(myScopeBase === countScopeBase)
assert(myScope1 === countScope1)
assert(myScope2 === countScope2)
assert(myScope3 === countScope3)
// Climb upwards to test the parent streams
stream.dependencies.foreach(testStream)
}
}
testStream(countStream)
}
test("transform should allow RDD operations to be captured in scopes") {
val inputStream = new DummyInputDStream(ssc)
val transformedStream = inputStream.transform { _.map { _ -> 1}.reduceByKey(_ + _) }
transformedStream.initialize(Time(0))
val transformScopeBase = transformedStream.baseScope.map(RDDOperationScope.fromJson)
val transformScope1 = transformedStream.getOrCompute(Time(1000)).get.scope
val transformScope2 = transformedStream.getOrCompute(Time(2000)).get.scope
val transformScope3 = transformedStream.getOrCompute(Time(3000)).get.scope
// Assert that all children RDDs inherit the DStream operation name correctly
assertDefined(transformScopeBase, transformScope1, transformScope2, transformScope3)
assert(transformScopeBase.get.name === "transform")
assertNestedScopeCorrect(transformScope1.get, 1000)
assertNestedScopeCorrect(transformScope2.get, 2000)
assertNestedScopeCorrect(transformScope3.get, 3000)
def assertNestedScopeCorrect(rddScope: RDDOperationScope, batchTime: Long): Unit = {
assert(rddScope.name === "reduceByKey")
assert(rddScope.parent.isDefined)
assertScopeCorrect(transformScopeBase.get, rddScope.parent.get, batchTime)
}
}
test("foreachRDD should allow RDD operations to be captured in scope") {
val inputStream = new DummyInputDStream(ssc)
val generatedRDDs = new ArrayBuffer[RDD[(Int, Int)]]
inputStream.foreachRDD { rdd =>
generatedRDDs += rdd.map { _ -> 1}.reduceByKey(_ + _)
}
val batchCounter = new BatchCounter(ssc)
ssc.start()
val clock = ssc.scheduler.clock.asInstanceOf[ManualClock]
clock.advance(3000)
batchCounter.waitUntilBatchesCompleted(3, 10000)
assert(generatedRDDs.size === 3)
val foreachBaseScope =
ssc.graph.getOutputStreams().head.baseScope.map(RDDOperationScope.fromJson)
assertDefined(foreachBaseScope)
assert(foreachBaseScope.get.name === "foreachRDD")
val rddScopes = generatedRDDs.map { _.scope }
assertDefined(rddScopes: _*)
rddScopes.zipWithIndex.foreach { case (rddScope, idx) =>
assert(rddScope.get.name === "reduceByKey")
assert(rddScope.get.parent.isDefined)
assertScopeCorrect(foreachBaseScope.get, rddScope.get.parent.get, (idx + 1) * 1000)
}
}
/** Assert that the RDD operation scope properties are not set in our SparkContext. */
private def assertPropertiesNotSet(): Unit = {
assert(ssc != null)
assert(ssc.sc.getLocalProperty(SparkContext.RDD_SCOPE_KEY) == null)
assert(ssc.sc.getLocalProperty(SparkContext.RDD_SCOPE_NO_OVERRIDE_KEY) == null)
}
/** Assert that the given RDD scope inherits the name and ID of the base scope correctly. */
private def assertScopeCorrect(
baseScope: RDDOperationScope,
rddScope: RDDOperationScope,
batchTime: Long): Unit = {
val (baseScopeId, baseScopeName) = (baseScope.id, baseScope.name)
val formattedBatchTime = UIUtils.formatBatchTime(
batchTime, ssc.graph.batchDuration.milliseconds, showYYYYMMSS = false)
assert(rddScope.id === s"${baseScopeId}_$batchTime")
assert(rddScope.name.replaceAll("\\\\n", " ") === s"$baseScopeName @ $formattedBatchTime")
assert(rddScope.parent.isEmpty) // There should not be any higher scope
}
/** Assert that all the specified options are defined. */
private def assertDefined[T](options: Option[T]*): Unit = {
options.zipWithIndex.foreach { case (o, i) => assert(o.isDefined, s"Option $i was empty!") }
}
}
| bdrillard/spark | streaming/src/test/scala/org/apache/spark/streaming/DStreamScopeSuite.scala | Scala | apache-2.0 | 10,196 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.examples.sql.jdbc
import java.sql.{Connection, Driver}
import org.apache.spark.internal.Logging
import org.apache.spark.sql.jdbc.JdbcConnectionProvider
class ExampleJdbcConnectionProvider extends JdbcConnectionProvider with Logging {
logInfo("ExampleJdbcConnectionProvider instantiated")
override val name: String = "ExampleJdbcConnectionProvider"
override def canHandle(driver: Driver, options: Map[String, String]): Boolean = false
override def getConnection(driver: Driver, options: Map[String, String]): Connection = null
override def modifiesSecurityContext(
driver: Driver,
options: Map[String, String]
): Boolean = false
}
| ueshin/apache-spark | examples/src/main/scala/org/apache/spark/examples/sql/jdbc/ExampleJdbcConnectionProvider.scala | Scala | apache-2.0 | 1,484 |
/*
* Copyright 2014 - 2015 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package slamdata.engine
import slamdata.Predef._
import scala.Any
import scalaz._
import Scalaz._
import org.threeten.bp.{Instant, LocalDate, LocalTime, Duration}
import slamdata.engine.analysis.fixplate._
import slamdata.engine.fp._
import slamdata.engine.javascript.Js
import slamdata.engine.javascript.JsCore
sealed trait Data {
def dataType: Type
def toJs: Term[JsCore]
}
object Data {
final case object Null extends Data {
def dataType = Type.Null
def toJs = JsCore.Literal(Js.Null).fix
}
final case class Str(value: String) extends Data {
def dataType = Type.Str
def toJs = JsCore.Literal(Js.Str(value)).fix
}
final case class Bool(value: Boolean) extends Data {
def dataType = Type.Bool
def toJs = JsCore.Literal(Js.Bool(value)).fix
}
val True = Bool(true)
val False = Bool(false)
sealed trait Number extends Data {
override def equals(other: Any) = (this, other) match {
case (Int(v1), Number(v2)) => BigDecimal(v1) == v2
case (Dec(v1), Number(v2)) => v1 == v2
case _ => false
}
}
object Number {
def unapply(value: Data): Option[BigDecimal] = value match {
case Int(value) => Some(BigDecimal(value))
case Dec(value) => Some(value)
case _ => None
}
}
final case class Dec(value: BigDecimal) extends Number {
def dataType = Type.Dec
def toJs = JsCore.Literal(Js.Num(value.doubleValue, true)).fix
}
final case class Int(value: BigInt) extends Number {
def dataType = Type.Int
def toJs = JsCore.Literal(Js.Num(value.doubleValue, false)).fix
}
final case class Obj(value: Map[String, Data]) extends Data {
def dataType = Type.Obj(value ∘ (Type.Const(_)), None)
def toJs =
JsCore.Obj(value.toList.map { case (k, v) => k -> v.toJs }.toListMap).fix
}
final case class Arr(value: List[Data]) extends Data {
def dataType = Type.Arr(value ∘ (Type.Const(_)))
def toJs = JsCore.Arr(value.map(_.toJs)).fix
}
final case class Set(value: List[Data]) extends Data {
def dataType = (value.headOption.map { head =>
value.drop(1).map(_.dataType).foldLeft(head.dataType)(Type.lub _)
}).getOrElse(Type.Bottom) // TODO: ???
def toJs = JsCore.Arr(value.map(_.toJs)).fix
}
final case class Timestamp(value: Instant) extends Data {
def dataType = Type.Timestamp
def toJs = JsCore.Call(JsCore.Ident("ISODate").fix, List(JsCore.Literal(Js.Str(value.toString)).fix)).fix
}
final case class Date(value: LocalDate) extends Data {
def dataType = Type.Date
def toJs = JsCore.Call(JsCore.Ident("ISODate").fix, List(JsCore.Literal(Js.Str(value.toString)).fix)).fix
}
final case class Time(value: LocalTime) extends Data {
def dataType = Type.Time
def toJs = JsCore.Literal(Js.Str(value.toString)).fix
}
final case class Interval(value: Duration) extends Data {
def dataType = Type.Interval
def toJs = JsCore.Literal(Js.Num(value.getSeconds*1000 + value.getNano*1e-6, true)).fix
}
final case class Binary(value: ImmutableArray[Byte]) extends Data {
def dataType = Type.Binary
def toJs = JsCore.Call(JsCore.Ident("BinData").fix, List(
JsCore.Literal(Js.Num(0, false)).fix,
JsCore.Literal(Js.Str(base64)).fix)).fix
def base64: String = new sun.misc.BASE64Encoder().encode(value.toArray)
override def toString = "Binary(Array[Byte](" + value.mkString(", ") + "))"
override def equals(that: Any): Boolean = that match {
case Binary(value2) => value === value2
case _ => false
}
override def hashCode = java.util.Arrays.hashCode(value.toArray[Byte])
}
object Binary {
def apply(array: Array[Byte]): Binary = Binary(ImmutableArray.fromArray(array))
}
final case class Id(value: String) extends Data {
def dataType = Type.Id
def toJs = JsCore.Call(JsCore.Ident("ObjectId").fix, List(JsCore.Literal(Js.Str(value)).fix)).fix
}
/**
An object to represent any value that might come from a backend, but that
we either don't know about or can't represent in this ADT. We represent it
with JS's `undefined`, just because no other value will ever be translated
that way.
*/
final case object NA extends Data {
def dataType = Type.Bottom
def toJs = JsCore.Ident(Js.Undefined.ident).fix
}
}
| wemrysi/quasar | core/src/main/scala/slamdata/engine/data.scala | Scala | apache-2.0 | 4,931 |
package twatcher.controllers
import twatcher.controllers.forms.SettingForm
import twatcher.globals.{db, twitter}
import twatcher.models.{Accounts, Configs, Scripts}
import twatcher.logics.{FileLogic, TwitterLogic}
import play.api.mvc._
import play.api.libs.concurrent.Execution.Implicits.defaultContext
import play.api.libs.json.Json
import scala.concurrent.Future
// class SettingController extends Controller {
object SettingController extends Controller {
def updatePeriod = Action.async { implicit request =>
SettingForm.periodForm.bindFromRequest.fold(
formWithError => Future.successful(BadRequest)
, period => {
db.run(Configs.update(period)).map{ _ =>
Redirect(routes.AppController.showSetting)
} recover {
case e: Exception => InternalServerError("db error")
}
}
)
}
def createScript = Action.async { implicit request =>
SettingForm.scriptForm.bindFromRequest.fold(
formWithError => Future.successful(BadRequest)
, script => {
db.run(Scripts.insert(script)).map { _ =>
Redirect(routes.AppController.showSetting)
} recover {
case e: Exception => InternalServerError("db error")
}
}
)
}
def updateScript = Action.async { implicit request =>
SettingForm.scriptForm.bindFromRequest.fold(
formWithError => Future.successful(BadRequest)
, script => {
db.run(Scripts.update(script)).map { _ =>
Redirect(routes.AppController.showSetting)
} recover {
case e: Exception => InternalServerError("db error")
}
}
)
}
def deleteScript = Action.async { implicit request =>
SettingForm.scriptForm.bindFromRequest.fold(
formWithError => Future.successful(BadRequest)
, script => {
db.run(Scripts.delete(script)).map { _ =>
Redirect(routes.AppController.showSetting)
} recover {
case e: Exception => InternalServerError("db error")
}
}
)
}
def updateAccount = Action.async { implicit request =>
SettingForm.accountDetailForm.bindFromRequest.fold(
formWithError => Future.successful(BadRequest)
, detailOnlyAccount => {
db.run(Accounts.updateDetail(detailOnlyAccount)).map { _ =>
Redirect(routes.AppController.showSetting)
} recover {
case e: Exception => InternalServerError("db error")
}
}
)
}
def deleteAccount = Action.async { implicit request =>
SettingForm.accountForm.bindFromRequest.fold(
formWithError => Future.successful(BadRequest)
, userId => {
db.run(Accounts.delete(userId)).map { _ =>
Redirect(routes.AppController.showSetting)
} recover {
case e: Exception => InternalServerError("db error")
}
}
)
}
/**
* Check Twitter Authentication and update Twitter info
*/
def checkAccount(userId: Long) = Action.async { implicit request =>
db.run(Accounts.findByUserId(userId)).flatMap { accountOp =>
accountOp.fold[Future[Result]](Future.successful(BadRequest))(account =>
TwitterLogic.upsertUserProfile(twitter, account.token).map { newAccount =>
Ok(Json.obj(
"userId" -> newAccount.userId
, "screenName" -> newAccount.screenName
, "imageUrl" -> newAccount.imageUrl
))
} recover {
case e: Exception => BadRequest(Json.obj(
"error" -> "authentication failed"
, "userId" -> userId
))
}
)
}
}
/**
* Upload zip file, parse it, and insert tweet ids into DB
*/
def insertTweetZip = Action.async(parse.multipartFormData) { implicit request =>
request.body.file("zip").fold[Future[Result]](Future.successful(BadRequest)) { zip =>
FileLogic.insertTweetZip(zip.ref.file).map { _ =>
Redirect(routes.AppController.showSetting)
} recover {
case e: Exception => InternalServerError("db error")
}
}
}
}
| srd7/twatcher | app/controllers/SettingController.scala | Scala | mit | 4,034 |
package argonaut
import scalaz._, syntax.equal._
object CodecJsonScalaz extends CodecJsonScalazs {
}
trait CodecJsonScalazs {
}
| jedws/argonaut | argonaut-scalaz/src/main/scala/argonaut/CodecJsonScalaz.scala | Scala | bsd-3-clause | 131 |
package a
import scala.quoted.*
object A:
inline def transform[A](inline expr: A): A = ${
transformImplExpr('expr)
}
def pure[A](a:A):A = ???
def transformImplExpr[A:Type](using Quotes)(expr: Expr[A]): Expr[A] = {
import quotes.reflect.*
expr.asTerm match {
case Inlined(x,y,z) => transformImplExpr(z.asExpr.asInstanceOf[Expr[A]])
case Apply(fun,args) => '{ A.pure(${Apply(fun,args).asExpr.asInstanceOf[Expr[A]]}) }
case other => expr
}
}
| lampepfl/dotty | tests/pos-macros/i8325/Macro_1.scala | Scala | apache-2.0 | 503 |
package org.jetbrains.plugins.scala
package codeInspection
package scaladoc
import com.intellij.codeInspection.{LocalInspectionTool, ProblemHighlightType, ProblemsHolder}
import com.intellij.psi.PsiElementVisitor
import org.jetbrains.plugins.scala.extensions._
import org.jetbrains.plugins.scala.lang.psi.api.ScalaElementVisitor
import org.jetbrains.plugins.scala.lang.scaladoc.lexer.ScalaDocTokenType
import org.jetbrains.plugins.scala.lang.scaladoc.psi.api.ScDocTag
/**
* User: Dmitry Naydanov
* Date: 12/17/11
*/
class ScalaDocMissingParameterDescriptionInspection extends LocalInspectionTool {
override def getDisplayName: String = "Missing Parameter Description"
override def buildVisitor(holder: ProblemsHolder, isOnTheFly: Boolean): PsiElementVisitor = {
new ScalaElementVisitor {
override def visitTag(s: ScDocTag) {
if (!ScalaDocMissingParameterDescriptionInspection.OurTags.contains(s.name) || s.getValueElement == null) {
return
}
val children = s.findChildrenByType(ScalaDocTokenType.DOC_COMMENT_DATA)
for (child <- children) {
if (child.getText.length() > 1 && child.getText.split(" ").nonEmpty) {
return
}
}
holder.registerProblem(holder.getManager.createProblemDescriptor(
if (s.getValueElement != null) s.getValueElement else s, getDisplayName, true,
ProblemHighlightType.GENERIC_ERROR_OR_WARNING, isOnTheFly))
}
}
}
}
object ScalaDocMissingParameterDescriptionInspection {
import org.jetbrains.plugins.scala.lang.scaladoc.parser.parsing.MyScaladocParsing._
val OurTags = Set(PARAM_TAG, THROWS_TAG, TYPE_PARAM_TAG)
}
| ilinum/intellij-scala | src/org/jetbrains/plugins/scala/codeInspection/scaladoc/ScalaDocMissingParameterDescriptionInspection.scala | Scala | apache-2.0 | 1,687 |
package scala.pickling
package internal
import java.util.concurrent.locks.ReentrantLock
import scala.collection.mutable
import scala.pickling.spi._
import scala.reflect.runtime.universe.Mirror
/**
* The default implementation of a pickling runtime.
*
* Notes:
* - This supports circular reference handling via TLS buffers during pickling/unpickling
* - This supports runtime pickler/unpickler generation via scala reflection.
* - This uses an actual lock to keep reflective usages safe.
*/
class DefaultRuntime extends spi.PicklingRuntime {
override val GRL = new ReentrantLock()
/** Gives access to the current refRegistry. */
override val refRegistry: RefRegistry = new DefaultRefRegistry
/** Creates a new fastTypeTag with the given tagKey.
*
* NOTE; this only assumes the T lines up.
*/
override def makeFastTag[T](tagKey: String): FastTypeTag[T] = FastTypeTag.apply(tagKey).asInstanceOf[FastTypeTag[T]]
/** The current reflection mirror to use when doing runtime unpickling/pickling. */
override def currentMirror: Mirror = _root_.scala.reflect.runtime.currentMirror
/** A registry of picklers for runtime lookup/usage. */
override val picklers: PicklerRegistry = new DefaultPicklerRegistry(new DefaultRuntimePicklerGenerator(GRL))
}
| scala/pickling | core/src/main/scala/scala/pickling/internal/DefaultRuntime.scala | Scala | bsd-3-clause | 1,287 |
package org.littlewings.infinispan.interceptor
import scala.beans.BeanProperty
import org.infinispan.commands.write.PutKeyValueCommand
import org.infinispan.context.InvocationContext
import org.infinispan.interceptors.base.BaseCustomInterceptor
class IntegerMultiplyInterceptor extends BaseCustomInterceptor {
@BeanProperty
var num: String = "1"
override def visitPutKeyValueCommand(ctx: InvocationContext, command: PutKeyValueCommand): AnyRef = {
val newValue: AnyRef = command.getValue match {
case n: Integer => Integer.valueOf(n * num.toInt)
case n => n
}
command.setValue(newValue)
super.visitPutKeyValueCommand(ctx, command)
// もしくは
// invokeNextInterceptor(ctx, command)
}
}
| kazuhira-r/infinispan-getting-started | embedded-custom-interceptor/src/main/scala/org/littlewings/infinispan/interceptor/IntegerMultiplyInterceptor.scala | Scala | mit | 741 |
package io.protoless.error
import com.google.protobuf.InvalidProtocolBufferException
import com.google.protobuf.WireFormat.FieldType
/**
* An exception representing a decoding failure associated with a possible cause
*/
sealed class DecodingFailure(message: String, cause: Option[Throwable] = None)
extends Exception(message, cause.orNull) {
def withMessage(msg: String): DecodingFailure = new DecodingFailure(msg, cause)
}
case class MissingField(index: Int, fieldType: FieldType, wireType: Int, fieldNumber: Int) extends DecodingFailure(
"Field not present in protobuff message.\\n" +
s"Expected to read field at index $index with type ${fieldType.getJavaType.name()}, " +
s"but next index is $fieldNumber with wire type ${DecodingFailure.wireTypeDetail(wireType)}")
case class WrongFieldType(expectedType: FieldType, fieldNumber: Int, wireType: Int) extends DecodingFailure(
s"Field read at index $fieldNumber doesn't meet field type requirements. " +
s"Expected type ${expectedType.getJavaType.name()} (wire: ${expectedType.getWireType}) " +
s"but wire type read is ${DecodingFailure.wireTypeDetail(wireType)}"
)
case class InternalProtobufError(message: String, cause: Throwable) extends DecodingFailure(message, Some(cause))
object DecodingFailure {
/**
* Transform a generic Exception into a [[DecodingFailure]] associated with the `field number` of the failure.
*/
def fromThrowable(ex: Throwable, index: Int): InternalProtobufError = ex match {
case err: InvalidProtocolBufferException =>
val unfinishedMessage = Option(err.getUnfinishedMessage).map(msg => s"\\nMessage read: ${msg.toByteArray}").getOrElse("")
InternalProtobufError(s"Cannot read field at index $index\\n${err.getMessage}\\n$unfinishedMessage", ex)
case _ => InternalProtobufError(ex.getMessage, ex)
}
/**
* Build a [[DecodingFailure]] from a message.
*/
def apply(message: String): DecodingFailure = new DecodingFailure(message)
private val mappingWireTypeWithCompatibleType = Seq(
(0, "Varint", "int32, int64, uint32, uint64, sint32, sint64, bool, enum"),
(1, "64-bit", "fixed64, sfixed64, double"),
(2, "Length-delimited", "string, bytes, embedded messages, packed repeated fields"),
(3, "Start group", "groups (deprecated)"),
(4, "End group", "groups (deprecated)"),
(5, "32-bit", "fixed32, sfixed32, float")
)
private[protoless] def wireTypeDetail(wireType: Int): String = {
mappingWireTypeWithCompatibleType
.find(_._1 == wireType)
.map { case (index, name, compatible) => s"$index:$name ($compatible)" }
.getOrElse(s"$wireType:Unknown")
}
}
| julien-lafont/protoless | modules/core/src/main/scala/io/protoless/error/DecodingFailure.scala | Scala | apache-2.0 | 2,653 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.cg.monadic.transformer.spark
import org.cg.monadic.transformer.Transformer
import org.apache.spark.sql.SQLContext
/**
* @author yanlin wang (yanlinw@yahoo.com)
*/
abstract class DataFrameTransformer extends Transformer[Unit] {
def ssc: SQLContext
} | CodeGerm/monadic-lib | src/main/scala/org/cg/monadic/transformer/spark/DataFrameTransformer.scala | Scala | apache-2.0 | 1,067 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive
import java.sql.Timestamp
import org.apache.hadoop.hive.conf.HiveConf
import org.apache.spark.sql.Row
import org.apache.spark.sql.execution.datasources.parquet.ParquetCompatibilityTest
import org.apache.spark.sql.hive.test.TestHiveSingleton
import org.apache.spark.sql.internal.SQLConf
class ParquetHiveCompatibilitySuite extends ParquetCompatibilityTest with TestHiveSingleton {
/**
* Set the staging directory (and hence path to ignore Parquet files under)
* to the default value of hive.exec.stagingdir.
*/
private val stagingDir = ".hive-staging"
override protected def logParquetSchema(path: String): Unit = {
val schema = readParquetSchema(path, { path =>
!path.getName.startsWith("_") && !path.getName.startsWith(stagingDir)
})
logInfo(
s"""Schema of the Parquet file written by parquet-avro:
|$schema
""".stripMargin)
}
private def testParquetHiveCompatibility(row: Row, hiveTypes: String*): Unit = {
withTable("parquet_compat") {
withTempPath { dir =>
val path = dir.getCanonicalPath
// Hive columns are always nullable, so here we append a all-null row.
val rows = row :: Row(Seq.fill(row.length)(null): _*) :: Nil
// Don't convert Hive metastore Parquet tables to let Hive write those Parquet files.
withSQLConf(HiveUtils.CONVERT_METASTORE_PARQUET.key -> "false") {
withTempView("data") {
val fields = hiveTypes.zipWithIndex.map { case (typ, index) => s" col_$index $typ" }
val ddl =
s"""CREATE TABLE parquet_compat(
|${fields.mkString(",\\n")}
|)
|STORED AS PARQUET
|LOCATION '$path'
""".stripMargin
logInfo(
s"""Creating testing Parquet table with the following DDL:
|$ddl
""".stripMargin)
spark.sql(ddl)
val schema = spark.table("parquet_compat").schema
val rowRDD = spark.sparkContext.parallelize(rows).coalesce(1)
spark.createDataFrame(rowRDD, schema).createOrReplaceTempView("data")
spark.sql("INSERT INTO TABLE parquet_compat SELECT * FROM data")
}
}
logParquetSchema(path)
// Unfortunately parquet-hive doesn't add `UTF8` annotation to BINARY when writing strings.
// Have to assume all BINARY values are strings here.
withSQLConf(SQLConf.PARQUET_BINARY_AS_STRING.key -> "true") {
checkAnswer(spark.read.parquet(path), rows)
}
}
}
}
test("simple primitives") {
testParquetHiveCompatibility(
Row(true, 1.toByte, 2.toShort, 3, 4.toLong, 5.1f, 6.1d, "foo"),
"BOOLEAN", "TINYINT", "SMALLINT", "INT", "BIGINT", "FLOAT", "DOUBLE", "STRING")
}
test("SPARK-10177 timestamp") {
testParquetHiveCompatibility(Row(Timestamp.valueOf("2015-08-24 00:31:00")), "TIMESTAMP")
}
test("array") {
testParquetHiveCompatibility(
Row(
Seq[Integer](1: Integer, null, 2: Integer, null),
Seq[String]("foo", null, "bar", null),
Seq[Seq[Integer]](
Seq[Integer](1: Integer, null),
Seq[Integer](2: Integer, null))),
"ARRAY<INT>",
"ARRAY<STRING>",
"ARRAY<ARRAY<INT>>")
}
test("map") {
testParquetHiveCompatibility(
Row(
Map[Integer, String](
(1: Integer) -> "foo",
(2: Integer) -> null)),
"MAP<INT, STRING>")
}
// HIVE-11625: Parquet map entries with null keys are dropped by Hive
ignore("map entries with null keys") {
testParquetHiveCompatibility(
Row(
Map[Integer, String](
null.asInstanceOf[Integer] -> "bar",
null.asInstanceOf[Integer] -> null)),
"MAP<INT, STRING>")
}
test("struct") {
testParquetHiveCompatibility(
Row(Row(1, Seq("foo", "bar", null))),
"STRUCT<f0: INT, f1: ARRAY<STRING>>")
}
}
| gioenn/xSpark | sql/hive/src/test/scala/org/apache/spark/sql/hive/ParquetHiveCompatibilitySuite.scala | Scala | apache-2.0 | 4,805 |
package rpn_fancy
import annotation.tailrec
object RpnFancy {
def main(args: Array[String]) {
val tree = rpn("12 8comment- morecomments7 2*+") // gives the tree structure of the operation
val res = tree.get // gives the result of the operation
println(res) // prints 18
}
val oprs = Map[Char, Option[(Int, Int) => Int]](
'%' -> Some(_%_),
'*' -> Some(_*_),
'+' -> Some(_+_),
'-' -> Some(_-_),
'/' -> Some(_/_)
).withDefaultValue(None)
def rpn(arg: String) = {
@tailrec
def rpn_rec(args: List[String], acc: List[Elt]): Elt = {
args match {
case List() => acc(0)
case s :: ss =>
if (isNumber(s))
rpn_rec(ss, Num(s.toInt) :: acc)
else oprs(s(0)) match {
case None => rpn_rec(ss, acc)
case Some(op) => {
val (a :: b :: as) = acc
rpn_rec(ss, Opr(op, b, a) :: as)
}
}
}
}
val ss = "(\\d+|.)".r.findAllIn(arg)
rpn_rec(ss.toList, List())
}
def isNumber(arg: String) = {
val n = arg(0).toInt
(n >= 48 && n < 58)
}
}
| HiinoFW/stuff | scala/rpn_fancy/RpnFancy.scala | Scala | mit | 1,128 |
package org.jmespike.controls
import com.jme3.renderer.{ViewPort, RenderManager}
import com.jme3.scene.control.{Control, AbstractControl}
import org.jmespike.entity.EntityConf
import org.jmespike.conf.Conf
import org.scalaprops.{Property, Bean, BeanListener}
import java.lang.reflect.Constructor
import org.jmespike.utils.ClassUtils
import com.jme3.scene.{Node, Spatial}
/**
* Convenience base class for Controls that are used to control entity nodes.
* If a conf is supplied, will listen to changes to it and call confUpdated during an update after it is changed.
* NOTE: Should only be used on spatials of type Node.
*/
// TODO: Should this class handle input actions also?
abstract class EntityControl[T <: ControlConf](conf: T) extends AbstractControl {
private var enabledChanged = true // Call onEnabled during first update.
private var confChanged = false
private var confListenerRegistered = false
private val confListener = new BeanListener {
def onPropertyChanged(bean: Bean, property: Property[_]) {confChanged = true}
def onPropertyRemoved(bean: Bean, property: Property[_]) {confChanged = true}
def onPropertyAdded(bean: Bean, property: Property[_]) {confChanged = true}
}
/**
* Called when the config has been changed.
*/
def onConfChanged(conf: T) {}
/**
* Called if the control is enabled, during the update pass.
*/
def controlUpdate(tpf: Float) {}
/**
* Called if the control is enabled, during the render pass.
*/
def controlRender(rm: RenderManager, vp: ViewPort) {}
/**
* Called during update, before controlUpdate, if the control was enabled since the last update.
* Also called during the first update of the control, after it was created.
*/
def onEnabled() {}
/**
* Called during update, if the control was disabled since the last update.
*/
def onDisabled() {}
/**
* A convenience method that returns the random seed for the spatial.
*/
def randomSeed: Int = SeedCalculator.randomSeedOf(spatial)
/**
* Returns the node that this control is controlling.
*/
def entity: Node = spatial.asInstanceOf[Node]
/**
* Returns the first entity control of the specified type for the entity that the current control is attached to,
* if it was available, otherwise None.
*/
def entityControl[E <: EntityControl[_]](implicit m: Manifest[E]): Option[E] = {
val control = spatial.getControl(m.erasure.asInstanceOf[Class[E]])
if (control == null) None
else Some(control)
}
/**
* Applies the specified code on the first entity control of the specified type, if found in this entity.
*/
def withEntityControl[T <: EntityControl[_]](block: T => Unit)(implicit m: Manifest[T]) {
entityControl[T](m) foreach {ec => block(ec)}
}
/**
* Returns the EntityConf that is used to configure the entity that this control belongs to,
* or null if this control is not controlling an entity.
*/
def entityConf: EntityConf = spatial.getUserData("EntityConf").asInstanceOf[EntityConf]
/**
* Should create a copy of the control.
* By default creates a new instance of the descendant by invoking a constructor with one conf parameter if found,
* otherwise using a no-parameter constructor.
*/
def createCopy: EntityControl[T] = {
ClassUtils.newInstance[ControlConf](getClass,
conf,
getClass.newInstance.asInstanceOf[AnyRef],
classOf[ControlConf]).asInstanceOf[EntityControl[T]]
}
final def cloneForSpatial(spatial: Spatial): Control = {
val copy = createCopy
spatial.addControl(copy)
return copy
}
final override def setEnabled(enabled: Boolean) {
if (isEnabled != enabled) {
super.setEnabled(enabled)
enabledChanged = true
}
}
final override def update(tpf: Float) {
// Handle change of enabled state
if (enabledChanged) {
enabledChanged = false
// Register or unregister listener to the conf when the control is enabled or disabled
if (conf != null) {
if (isEnabled && !confListenerRegistered) {
conf.addDeepListener(confListener)
confListenerRegistered = true
}
else if (!isEnabled && confListenerRegistered) {
conf.removeDeepListener(confListener)
confListenerRegistered = false
}
}
// Notify descendant about enable state change
if (isEnabled) onEnabled()
else onDisabled()
}
// Notify configuration changes
if (isEnabled && conf != null && confChanged) {
confChanged = false
onConfChanged(conf)
}
// Invoke controlUpdate if this control is enabled
super.update(tpf)
}
} | zzorn/skycastle | src/main/scala/org/jmespike/controls/EntityControl.scala | Scala | gpl-2.0 | 4,789 |
package com.blogspot.etude.test.scala
//: Created by Administrator on 2015/11/6.
//: com.blogspot.etude.test.Timer.java in project of island
object Timer {
def oncePerSecond(callback:() =>Unit){
while (true){callback(); Thread.sleep(1000)}
}
def timeFlies() = {println("Time flies like an arrow...")}
def main(args: Array[String]) {
oncePerSecond(timeFlies)
}
}
| TaronLiu/island | scala/Timer.scala | Scala | gpl-2.0 | 382 |
package test
import org.specs2.mutable._
import play.api.test._
import play.api.test.Helpers._
/**
* add your integration spec here.
* An integration test will fire up a whole play application in a real (or headless) browser
*/
class IntegrationSpec extends Specification {
"Application" should {
"work from within a browser" in {
running(TestServer(3333), HTMLUNIT) { browser =>
browser.goTo("http://localhost:3333/communities/count")
browser.pageSource must contain("Found 150 communities")
}
}
}
} | dwhjames/play-datomisca | samples/1-play-datomic-getting-started/test/IntegrationSpec.scala | Scala | apache-2.0 | 572 |
package com.codingkapoor.codingbat
object ArrayI {
def firstLast6(arr: Array[Int]): Boolean = {
if (arr.length < 1) false else arr.head == 6 || arr.last == 6
}
def sameFirstLast(arr: Array[Int]): Boolean = {
if (arr.length < 1) false else arr.head == arr.last
}
def makePi(): Array[Int] = {
Math.PI.toString.take(4).replace(".", "").toArray.map(_.toString).map(_.toInt)
}
def commonEnd(a: Array[Int], b: Array[Int]): Boolean = {
if (a.length < 1 || b.length < 1) false else a.head == b.head || a.last == b.last
}
def sum3(arr: Array[Int]): Int = {
arr.sum
}
def rotateLeft3(arr: Array[Int]): Array[Int] = {
arr.tail :+ arr.head
}
def reverse3(arr: Array[Int]): Array[Int] = {
arr.reverse
}
def maxEnd3(arr: Array[Int]): Array[Int] = {
val res = if (arr.head > arr.last) arr.head else arr.last
Array.fill(arr.length) { res }
}
def sum2(arr: Array[Int]): Int = {
if (arr.length < 1) 0 else arr.take(2).sum
}
def middleWay(a: Array[Int], b: Array[Int]): Array[Int] = {
Array(a(1), b(1))
}
def makeEnds(arr: Array[Int]): Array[Int] = {
if (arr.length <= 1) arr else Array(arr.head, arr.last)
}
def has23(arr: Array[Int]): Boolean = {
arr.exists(x => x == 2 || x == 3)
}
def no23(arr: Array[Int]): Boolean = {
!arr.contains(2) && !arr.contains(3)
}
def makeLast(arr: Array[Int]): Array[Int] = {
val res = Array.fill(arr.length * 2)(0)
res(res.length - 1) = arr.last
res
}
def double23(arr: Array[Int]): Boolean = {
arr.count(_ == 2) == 2 || arr.count(_ == 3) == 2
}
def fix23(arr: Array[Int]): Array[Int] = {
arr.mkString.replaceAll("23", "20").toArray.map(_.toString).map(_.toInt)
}
def start1(a: Array[Int], b: Array[Int]): Int = {
Array(a.take(1), b.take(1)).flatten.count(_ == 1)
}
def biggerTwo(a: Array[Int], b: Array[Int]): Array[Int] = {
val sumA = a.sum
val sumB = b.sum
if (sumA > sumB) a else if (sumA < sumB) b else a
}
def makeMiddle(nums: Array[Int]): Array[Int] = {
Array(nums((nums.length / 2) - 1), nums(nums.length / 2))
}
def plusTwo(a: Array[Int], b: Array[Int]): Array[Int] = {
a ++ b
}
def swapEnds(arr: Array[Int]): Array[Int] = {
if (arr.length < 2) arr else {
val first = arr(0); val last = arr(arr.length - 1)
arr(0) = last; arr(arr.length - 1) = first
arr
}
}
def midThree(arr: Array[Int]): Array[Int] = {
val mid = arr.length / 2
Array(arr(mid - 1), arr(mid), arr(mid + 1))
}
def maxTriple(arr: Array[Int]): Int = {
if (arr.length == 1) arr(0)
Array(arr(0), arr(arr.length / 2), arr(arr.length - 1)).max
}
def frontPiece(arr: Array[Int]): Array[Int] = {
if (arr.length < 2) arr else Array(arr(0), arr(1))
}
def unlucky1(arr: Array[Int]): Boolean = {
if (arr.length < 2) false else {
val res = arr.zip(arr.tail).map(x => s"${x._1}" + s"${x._2}")
res.head == "13" || res.tail.head == "13" || res.last == "13"
}
}
def make2(a: Array[Int], b: Array[Int]): Array[Int] = {
(a ++ b).take(2)
}
def front11(a: Array[Int], b: Array[Int]): Array[Int] = {
def getFirst(arr: Array[Int]) = if (arr.length < 1) "" else s"${arr.head}"
val res = getFirst(a) + getFirst(b)
res.toArray.map(_.toString).map(_.toInt)
}
}
| codingkapoor/scala-coding-bat | src/main/scala/com/codingkapoor/codingbat/ArrayI.scala | Scala | mit | 3,345 |
/***********************************************************************
* Copyright (c) 2013-2016 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
*************************************************************************/
package org.locationtech.geomesa.utils.stats
import org.opengis.feature.simple.SimpleFeature
/**
* Counts features
*/
class CountStat() extends Stat {
override type S = CountStat
private [stats] var counter: Long = 0L
def count: Long = counter
override def observe(sf: SimpleFeature): Unit = counter += 1
override def unobserve(sf: SimpleFeature): Unit = counter -= 1
override def +(other: CountStat): CountStat = {
val plus = new CountStat()
plus.counter = this.counter + other.counter
plus
}
override def +=(other: CountStat): Unit = counter += other.counter
override def toJsonObject = Map("count" -> counter)
override def isEmpty: Boolean = counter == 0
override def clear(): Unit = counter = 0
override def isEquivalent(other: Stat): Boolean = other match {
case that: CountStat => counter == that.counter
case _ => false
}
}
| MutahirKazmi/geomesa | geomesa-utils/src/main/scala/org/locationtech/geomesa/utils/stats/CountStat.scala | Scala | apache-2.0 | 1,355 |
package com.geeksville.ftdi
import net.java.dev.sna.SNA
import com.sun.jna.ptr._
import com.sun.jna._
import java.nio.ByteBuffer
import scala.reflect.Manifest
class FtdiException(msg: String) extends Exception(msg)
/**
* Native glue for the libftdi library
*/
object LibFtdi {
type ENUM = Int
type INT = Int
type BYTE = Byte
type UINT = Int
type ftdi_context = Pointer
val SIO_DISABLE_FLOW_CTRL = 0x0
val SIO_RTS_CTS_HS = (0x1 << 8)
val SIO_DTR_DSR_HS = (0x2 << 8)
val SIO_XON_XOFF_HS = (0x4 << 8)
val SIO_SET_DTR_MASK = 0x1
val SIO_SET_DTR_HIGH = (1 | (SIO_SET_DTR_MASK << 8))
val SIO_SET_DTR_LOW = (0 | (SIO_SET_DTR_MASK << 8))
val SIO_SET_RTS_MASK = 0x2
val SIO_SET_RTS_HIGH = (2 | (SIO_SET_RTS_MASK << 8))
val SIO_SET_RTS_LOW = (0 | (SIO_SET_RTS_MASK << 8))
val snaLibrary = if (!Platform.isWindows) "ftdi" else "libftdi"
Native.register(snaLibrary)
@native
def ftdi_new(): ftdi_context;
@native
def ftdi_free(ftdi: ftdi_context): Unit
@native
def ftdi_usb_open(ftdi: ftdi_context, vendor: INT, product: INT): INT
@native
def ftdi_usb_open_desc(ftdi: ftdi_context, vendor: INT, product: INT, desc: String, serial: String): INT
@native
def ftdi_usb_close(ftdi: ftdi_context): INT
@native
def ftdi_setflowctrl(ftdi: ftdi_context, ctrl: INT): INT
@native
def ftdi_setdtr(ftdi: ftdi_context, ctrl: INT): INT
@native
def ftdi_setrts(ftdi: ftdi_context, ctrl: INT): INT
@native
def ftdi_read_data(ftdi: ftdi_context, buf: ByteBuffer, size: INT): INT
@native
def ftdi_write_data(ftdi: ftdi_context, buf: Array[Byte], size: INT): INT
@native
def ftdi_set_latency_timer(ftdi: ftdi_context, latency: BYTE): INT
@native
def ftdi_read_data_set_chunksize(ftdi: ftdi_context, size: UINT): INT
@native
def ftdi_write_data_set_chunksize(ftdi: ftdi_context, size: UINT): INT
@native
def ftdi_set_baudrate(ftdi: ftdi_context, baud: INT): INT
@native
def ftdi_usb_purge_buffers(ftdi: ftdi_context): INT
@native
def ftdi_get_error_string(ftdi: ftdi_context): String
def open(vendor: Int, product: Int, serial: String = null) = {
new FtdiDevice(vendor, product, serial)
}
/// For testing
def main(args: Array[String]) {
println("Starting")
val context = open(0x0403, 0x6001)
context.close()
}
}
| geeksville/arduleader | posixpilot/src/main/scala/com/geeksville/ftdi/LibFtdi.scala | Scala | gpl-3.0 | 2,319 |
package com.gilt.gfc.aws.kinesis.akka
import com.amazonaws.services.kinesis.clientlibrary.lib.worker.KinesisClientLibConfiguration
import com.gilt.gfc.aws.kinesis.client.{KCLConfiguration, KCLWorkerRunner, KinesisRecordReader}
class KinesisStreamConsumer[T](
streamConfig: KinesisStreamConsumerConfig[T],
handler: KinesisStreamHandler[T]
) (
implicit private val evReader: KinesisRecordReader[T]
) {
private val maxRecords: Int = streamConfig.maxRecordsPerBatch.orElse(
streamConfig.dynamoDBKinesisAdapterClient.map(_ => 1000)
).getOrElse(KinesisClientLibConfiguration.DEFAULT_MAX_RECORDS)
private val kclConfig = KCLConfiguration(
streamConfig.applicationName,
streamConfig.streamName,
streamConfig.kinesisCredentialsProvider,
streamConfig.dynamoCredentialsProvider,
streamConfig.cloudWatchCredentialsProvider,
streamConfig.regionName,
streamConfig.initialPositionInStream,
streamConfig.kinesisClientEndpoints,
streamConfig.failoverTimeoutMillis,
maxRecords,
streamConfig.idleTimeBetweenReads
)
private def createWorker = KCLWorkerRunner(
kclConfig,
dynamoDBKinesisAdapter = streamConfig.dynamoDBKinesisAdapterClient,
metricsFactory = Some(streamConfig.metricsFactory),
checkpointInterval = streamConfig.checkPointInterval,
initialize = handler.onInit,
shutdown = handler.onShutdown,
initialRetryDelay = streamConfig.retryConfig.initialDelay,
maxRetryDelay = streamConfig.retryConfig.retryDelay,
numRetries = streamConfig.retryConfig.maxRetries
)
/***
* Creates the worker and runs it
*/
def run() = {
val worker = createWorker
worker.runSingleRecordProcessor(handler.onRecord)
}
}
| gilt/gfc-aws-kinesis | akka/src/main/scala/com/gilt/gfc/aws/kinesis/akka/KinesisStreamConsumer.scala | Scala | apache-2.0 | 1,714 |
package dk.gp.hgpc
import breeze.linalg.DenseMatrix
import scala.util.Random
import dk.bayes.math.accuracy.loglik
import dk.bayes.math.accuracy.binaryAcc
import breeze.linalg.DenseVector
import breeze.numerics._
import breeze.stats._
import dk.gp.hgpc.util.calcHGPCLoglik
import dk.gp.hgpc.util.calcHGPCAcc
object accuracy {
def apply(model: HgpcModel): String = {
val (allLoglik, allAvgLoglik, allAcc,allTpr,allTnr) = calcLoglikAndAcc(model.x, model.y, model)
val looCVLoglik = calcHGPCLoglik(model)
val (looAcc,looTpr,looTnr) = calcHGPCAcc(model)
val (trainingSet, testSet) = split(DenseMatrix.horzcat(model.x, model.y.toDenseMatrix.t))
val trainingSetX = trainingSet(::, (0 until trainingSet.cols - 1))
val trainingSetY = trainingSet(::, trainingSet.cols - 1)
val trainingSetModel = model.copy(x = trainingSetX, y = trainingSetY)
val (trainLoglik, trainAvgLoglik, trainAcc,trainTpr,trainTnr) = calcLoglikAndAcc(trainingSetX, trainingSetY, trainingSetModel)
val testSetX = testSet(::, (0 until testSet.cols - 1))
val testSetY = testSet(::, testSet.cols - 1)
val (testLoglik, testAvgLoglik, testAcc,testTpr,testTnr) = calcLoglikAndAcc(testSetX, testSetY, trainingSetModel)
val allReport = "All: n=%2d, loglik=%.2f, avgLoglik=%.2f, acc=%.3f, tpr=%.3f, tnr=%.3f".format(model.y.size, allLoglik, allAvgLoglik, allAcc,allTpr,allTnr)
val looCVReport = "LooCV: loglik=%.2f, avgLoglik=%.2f,acc=%.3f, tpr=%.3f, tnr=%.3f".format(looCVLoglik, looCVLoglik / model.y.size, looAcc,looTpr,looTnr)
val trainReport = "Train: n=%2d, loglik=%.2f, avgLoglik=%.2f, acc=%.3f, tpr=%.3f, tnr=%.3f".format(trainingSetY.size, trainLoglik, trainAvgLoglik, trainAcc,trainTpr,trainTnr)
val testReport = "Test: n=%2d, loglik=%.2f, avgLoglik=%.2f, acc=%.3f, tpr=%.3f, tnr=%.3f".format(testSetY.size, testLoglik, testAvgLoglik, testAcc,testTpr,testTnr)
allReport + "\n" + looCVReport + "\n" + trainReport + "\n" + testReport
}
/**
* @return (loglik,avgLoglik,acc,tpr,tnr)
*/
private def calcLoglikAndAcc(x: DenseMatrix[Double], y: DenseVector[Double], model: HgpcModel): Tuple5[Double, Double, Double, Double, Double] = {
val predicted = hgpcPredict(x, model)
val modelLoglik = loglik(predicted, y)
val avgLoglik = modelLoglik / y.size
val acc = binaryAcc(predicted, y)
val tpr = binaryAcc(predicted(y :== 1d).toDenseVector, y(y :== 1d).toDenseVector)
val tnr = binaryAcc(predicted(y :== 0d).toDenseVector, y(y :== 0d).toDenseVector)
(modelLoglik, avgLoglik, acc, tpr, tnr)
}
private def split(data: DenseMatrix[Double]): (DenseMatrix[Double], DenseMatrix[Double]) = {
val random = new Random(54354)
val (trainingSetIdx, testSetIdx) = (0 until data.rows).partition { x => random.nextDouble() < 0.7 }
val trainingSet = data(trainingSetIdx, ::).toDenseMatrix
val testSet = data(testSetIdx, ::).toDenseMatrix
(trainingSet, testSet)
}
} | danielkorzekwa/bayes-scala-gp | src/main/scala/dk/gp/hgpc/accuracy.scala | Scala | bsd-2-clause | 2,958 |
/*
* Copyright 2016 Groupon, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.groupon.sparklint.events
import java.io.{File, FileNotFoundException}
import org.scalatest.{FlatSpec, Matchers}
/**
* Created by Roboxue on 2017/4/2.
*/
class FolderEventSourceGroupManagerTest extends FlatSpec with Matchers {
it should "get name correctly" in {
val folder = new File(getClass.getClassLoader.getResource("directory_source").getFile)
val esgm = new FolderEventSourceGroupManager(folder)
esgm.name shouldBe "directory_source"
}
it should "throw up if directory is a file" in {
intercept[FileNotFoundException] {
val folder = new File(getClass.getClassLoader.getResource("spark_event_log_example").getFile)
new FolderEventSourceGroupManager(folder)
}
}
}
| groupon/sparklint | src/test/scala/com/groupon/sparklint/events/FolderEventSourceGroupManagerTest.scala | Scala | apache-2.0 | 1,326 |
package com.digdeep.util
package concurrent
import java.util.concurrent.atomic.AtomicBoolean
/**
* Created by denismo on 18/09/15.
*/
class ThreadOnce {
private val called = new ThreadLocal[AtomicBoolean]() {
override def initialValue(): AtomicBoolean = new AtomicBoolean()
}
def callOnce(callable: => Unit): Unit = {
if (called.get().compareAndSet(false, true)) {
callable
}
}
def reset() = called.get().set(false)
}
| jramos/kinesis-redshift-sink | src/main/scala/com.digdeep.util/concurrent/ThreadOnce.scala | Scala | apache-2.0 | 451 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.parser
import java.sql.{Date, Timestamp}
import scala.collection.JavaConverters._
import scala.collection.mutable.ArrayBuffer
import org.antlr.v4.runtime.{ParserRuleContext, Token}
import org.antlr.v4.runtime.tree.{ParseTree, RuleNode, TerminalNode}
import org.apache.spark.internal.Logging
import org.apache.spark.sql.AnalysisException
import org.apache.spark.sql.catalyst.{FunctionIdentifier, TableIdentifier}
import org.apache.spark.sql.catalyst.analysis._
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.parser.SqlBaseParser._
import org.apache.spark.sql.catalyst.plans._
import org.apache.spark.sql.catalyst.plans.logical._
import org.apache.spark.sql.types._
import org.apache.spark.unsafe.types.CalendarInterval
import org.apache.spark.util.random.RandomSampler
/**
* The AstBuilder converts an ANTLR4 ParseTree into a catalyst Expression, LogicalPlan or
* TableIdentifier.
*/
class AstBuilder extends SqlBaseBaseVisitor[AnyRef] with Logging {
import ParserUtils._
protected def typedVisit[T](ctx: ParseTree): T = {
ctx.accept(this).asInstanceOf[T]
}
/**
* Override the default behavior for all visit methods. This will only return a non-null result
* when the context has only one child. This is done because there is no generic method to
* combine the results of the context children. In all other cases null is returned.
*/
override def visitChildren(node: RuleNode): AnyRef = {
if (node.getChildCount == 1) {
node.getChild(0).accept(this)
} else {
null
}
}
override def visitSingleStatement(ctx: SingleStatementContext): LogicalPlan = withOrigin(ctx) {
visit(ctx.statement).asInstanceOf[LogicalPlan]
}
override def visitSingleExpression(ctx: SingleExpressionContext): Expression = withOrigin(ctx) {
visitNamedExpression(ctx.namedExpression)
}
override def visitSingleTableIdentifier(
ctx: SingleTableIdentifierContext): TableIdentifier = withOrigin(ctx) {
visitTableIdentifier(ctx.tableIdentifier)
}
override def visitSingleDataType(ctx: SingleDataTypeContext): DataType = withOrigin(ctx) {
visit(ctx.dataType).asInstanceOf[DataType]
}
/* ********************************************************************************************
* Plan parsing
* ******************************************************************************************** */
protected def plan(tree: ParserRuleContext): LogicalPlan = typedVisit(tree)
/**
* Create a top-level plan with Common Table Expressions.
*/
override def visitQuery(ctx: QueryContext): LogicalPlan = withOrigin(ctx) {
val query = plan(ctx.queryNoWith)
// Apply CTEs
query.optional(ctx.ctes) {
val ctes = ctx.ctes.namedQuery.asScala.map { nCtx =>
val namedQuery = visitNamedQuery(nCtx)
(namedQuery.alias, namedQuery)
}
// Check for duplicate names.
checkDuplicateKeys(ctes, ctx)
With(query, ctes.toMap)
}
}
/**
* Create a named logical plan.
*
* This is only used for Common Table Expressions.
*/
override def visitNamedQuery(ctx: NamedQueryContext): SubqueryAlias = withOrigin(ctx) {
SubqueryAlias(ctx.name.getText, plan(ctx.queryNoWith))
}
/**
* Create a logical plan which allows for multiple inserts using one 'from' statement. These
* queries have the following SQL form:
* {{{
* [WITH cte...]?
* FROM src
* [INSERT INTO tbl1 SELECT *]+
* }}}
* For example:
* {{{
* FROM db.tbl1 A
* INSERT INTO dbo.tbl1 SELECT * WHERE A.value = 10 LIMIT 5
* INSERT INTO dbo.tbl2 SELECT * WHERE A.value = 12
* }}}
* This (Hive) feature cannot be combined with set-operators.
*/
override def visitMultiInsertQuery(ctx: MultiInsertQueryContext): LogicalPlan = withOrigin(ctx) {
val from = visitFromClause(ctx.fromClause)
// Build the insert clauses.
val inserts = ctx.multiInsertQueryBody.asScala.map {
body =>
validate(body.querySpecification.fromClause == null,
"Multi-Insert queries cannot have a FROM clause in their individual SELECT statements",
body)
withQuerySpecification(body.querySpecification, from).
// Add organization statements.
optionalMap(body.queryOrganization)(withQueryResultClauses).
// Add insert.
optionalMap(body.insertInto())(withInsertInto)
}
// If there are multiple INSERTS just UNION them together into one query.
inserts match {
case Seq(query) => query
case queries => Union(queries)
}
}
/**
* Create a logical plan for a regular (single-insert) query.
*/
override def visitSingleInsertQuery(
ctx: SingleInsertQueryContext): LogicalPlan = withOrigin(ctx) {
plan(ctx.queryTerm).
// Add organization statements.
optionalMap(ctx.queryOrganization)(withQueryResultClauses).
// Add insert.
optionalMap(ctx.insertInto())(withInsertInto)
}
/**
* Add an INSERT INTO [TABLE]/INSERT OVERWRITE TABLE operation to the logical plan.
*/
private def withInsertInto(
ctx: InsertIntoContext,
query: LogicalPlan): LogicalPlan = withOrigin(ctx) {
val tableIdent = visitTableIdentifier(ctx.tableIdentifier)
val partitionKeys = Option(ctx.partitionSpec).map(visitPartitionSpec).getOrElse(Map.empty)
val dynamicPartitionKeys = partitionKeys.filter(_._2.isEmpty)
if (ctx.EXISTS != null && dynamicPartitionKeys.nonEmpty) {
throw new ParseException(s"Dynamic partitions do not support IF NOT EXISTS. Specified " +
"partitions with value: " + dynamicPartitionKeys.keys.mkString("[", ",", "]"), ctx)
}
InsertIntoTable(
UnresolvedRelation(tableIdent, None),
partitionKeys,
query,
ctx.OVERWRITE != null,
ctx.EXISTS != null)
}
/**
* Create a partition specification map.
*/
override def visitPartitionSpec(
ctx: PartitionSpecContext): Map[String, Option[String]] = withOrigin(ctx) {
val parts = ctx.partitionVal.asScala.map { pVal =>
val name = pVal.identifier.getText.toLowerCase
val value = Option(pVal.constant).map(visitStringConstant)
name -> value
}
// Check for duplicate partition columns in one spec.
checkDuplicateKeys(parts, ctx)
parts.toMap
}
/**
* Create a partition specification map without optional values.
*/
protected def visitNonOptionalPartitionSpec(
ctx: PartitionSpecContext): Map[String, String] = withOrigin(ctx) {
visitPartitionSpec(ctx).mapValues(_.orNull).map(identity)
}
/**
* Convert a constant of any type into a string. This is typically used in DDL commands, and its
* main purpose is to prevent slight differences due to back to back conversions i.e.:
* String -> Literal -> String.
*/
protected def visitStringConstant(ctx: ConstantContext): String = withOrigin(ctx) {
ctx match {
case s: StringLiteralContext => createString(s)
case o => o.getText
}
}
/**
* Add ORDER BY/SORT BY/CLUSTER BY/DISTRIBUTE BY/LIMIT/WINDOWS clauses to the logical plan. These
* clauses determine the shape (ordering/partitioning/rows) of the query result.
*/
private def withQueryResultClauses(
ctx: QueryOrganizationContext,
query: LogicalPlan): LogicalPlan = withOrigin(ctx) {
import ctx._
// Handle ORDER BY, SORT BY, DISTRIBUTE BY, and CLUSTER BY clause.
val withOrder = if (
!order.isEmpty && sort.isEmpty && distributeBy.isEmpty && clusterBy.isEmpty) {
// ORDER BY ...
Sort(order.asScala.map(visitSortItem), global = true, query)
} else if (order.isEmpty && !sort.isEmpty && distributeBy.isEmpty && clusterBy.isEmpty) {
// SORT BY ...
Sort(sort.asScala.map(visitSortItem), global = false, query)
} else if (order.isEmpty && sort.isEmpty && !distributeBy.isEmpty && clusterBy.isEmpty) {
// DISTRIBUTE BY ...
RepartitionByExpression(expressionList(distributeBy), query)
} else if (order.isEmpty && !sort.isEmpty && !distributeBy.isEmpty && clusterBy.isEmpty) {
// SORT BY ... DISTRIBUTE BY ...
Sort(
sort.asScala.map(visitSortItem),
global = false,
RepartitionByExpression(expressionList(distributeBy), query))
} else if (order.isEmpty && sort.isEmpty && distributeBy.isEmpty && !clusterBy.isEmpty) {
// CLUSTER BY ...
val expressions = expressionList(clusterBy)
Sort(
expressions.map(SortOrder(_, Ascending)),
global = false,
RepartitionByExpression(expressions, query))
} else if (order.isEmpty && sort.isEmpty && distributeBy.isEmpty && clusterBy.isEmpty) {
// [EMPTY]
query
} else {
throw new ParseException(
"Combination of ORDER BY/SORT BY/DISTRIBUTE BY/CLUSTER BY is not supported", ctx)
}
// WINDOWS
val withWindow = withOrder.optionalMap(windows)(withWindows)
// LIMIT
withWindow.optional(limit) {
Limit(typedVisit(limit), withWindow)
}
}
/**
* Create a logical plan using a query specification.
*/
override def visitQuerySpecification(
ctx: QuerySpecificationContext): LogicalPlan = withOrigin(ctx) {
val from = OneRowRelation.optional(ctx.fromClause) {
visitFromClause(ctx.fromClause)
}
withQuerySpecification(ctx, from)
}
/**
* Add a query specification to a logical plan. The query specification is the core of the logical
* plan, this is where sourcing (FROM clause), transforming (SELECT TRANSFORM/MAP/REDUCE),
* projection (SELECT), aggregation (GROUP BY ... HAVING ...) and filtering (WHERE) takes place.
*
* Note that query hints are ignored (both by the parser and the builder).
*/
private def withQuerySpecification(
ctx: QuerySpecificationContext,
relation: LogicalPlan): LogicalPlan = withOrigin(ctx) {
import ctx._
// WHERE
def filter(ctx: BooleanExpressionContext, plan: LogicalPlan): LogicalPlan = {
Filter(expression(ctx), plan)
}
// Expressions.
val expressions = Option(namedExpressionSeq).toSeq
.flatMap(_.namedExpression.asScala)
.map(typedVisit[Expression])
// Create either a transform or a regular query.
val specType = Option(kind).map(_.getType).getOrElse(SqlBaseParser.SELECT)
specType match {
case SqlBaseParser.MAP | SqlBaseParser.REDUCE | SqlBaseParser.TRANSFORM =>
// Transform
// Add where.
val withFilter = relation.optionalMap(where)(filter)
// Create the attributes.
val (attributes, schemaLess) = if (colTypeList != null) {
// Typed return columns.
(createStructType(colTypeList).toAttributes, false)
} else if (identifierSeq != null) {
// Untyped return columns.
val attrs = visitIdentifierSeq(identifierSeq).map { name =>
AttributeReference(name, StringType, nullable = true)()
}
(attrs, false)
} else {
(Seq(AttributeReference("key", StringType)(),
AttributeReference("value", StringType)()), true)
}
// Create the transform.
ScriptTransformation(
expressions,
string(script),
attributes,
withFilter,
withScriptIOSchema(
ctx, inRowFormat, recordWriter, outRowFormat, recordReader, schemaLess))
case SqlBaseParser.SELECT =>
// Regular select
// Add lateral views.
val withLateralView = ctx.lateralView.asScala.foldLeft(relation)(withGenerate)
// Add where.
val withFilter = withLateralView.optionalMap(where)(filter)
// Add aggregation or a project.
val namedExpressions = expressions.map {
case e: NamedExpression => e
case e: Expression => UnresolvedAlias(e)
}
val withProject = if (aggregation != null) {
withAggregation(aggregation, namedExpressions, withFilter)
} else if (namedExpressions.nonEmpty) {
Project(namedExpressions, withFilter)
} else {
withFilter
}
// Having
val withHaving = withProject.optional(having) {
// Note that we add a cast to non-predicate expressions. If the expression itself is
// already boolean, the optimizer will get rid of the unnecessary cast.
val predicate = expression(having) match {
case p: Predicate => p
case e => Cast(e, BooleanType)
}
Filter(predicate, withProject)
}
// Distinct
val withDistinct = if (setQuantifier() != null && setQuantifier().DISTINCT() != null) {
Distinct(withHaving)
} else {
withHaving
}
// Window
withDistinct.optionalMap(windows)(withWindows)
}
}
/**
* Create a (Hive based) [[ScriptInputOutputSchema]].
*/
protected def withScriptIOSchema(
ctx: QuerySpecificationContext,
inRowFormat: RowFormatContext,
recordWriter: Token,
outRowFormat: RowFormatContext,
recordReader: Token,
schemaLess: Boolean): ScriptInputOutputSchema = {
throw new ParseException("Script Transform is not supported", ctx)
}
/**
* Create a logical plan for a given 'FROM' clause. Note that we support multiple (comma
* separated) relations here, these get converted into a single plan by condition-less inner join.
*/
override def visitFromClause(ctx: FromClauseContext): LogicalPlan = withOrigin(ctx) {
val from = ctx.relation.asScala.foldLeft(null: LogicalPlan) { (left, relation) =>
val right = plan(relation.relationPrimary)
val join = right.optionalMap(left)(Join(_, _, Inner, None))
withJoinRelations(join, relation)
}
ctx.lateralView.asScala.foldLeft(from)(withGenerate)
}
/**
* Connect two queries by a Set operator.
*
* Supported Set operators are:
* - UNION [DISTINCT]
* - UNION ALL
* - EXCEPT [DISTINCT]
* - INTERSECT [DISTINCT]
*/
override def visitSetOperation(ctx: SetOperationContext): LogicalPlan = withOrigin(ctx) {
val left = plan(ctx.left)
val right = plan(ctx.right)
val all = Option(ctx.setQuantifier()).exists(_.ALL != null)
ctx.operator.getType match {
case SqlBaseParser.UNION if all =>
Union(left, right)
case SqlBaseParser.UNION =>
Distinct(Union(left, right))
case SqlBaseParser.INTERSECT if all =>
throw new ParseException("INTERSECT ALL is not supported.", ctx)
case SqlBaseParser.INTERSECT =>
Intersect(left, right)
case SqlBaseParser.EXCEPT if all =>
throw new ParseException("EXCEPT ALL is not supported.", ctx)
case SqlBaseParser.EXCEPT =>
Except(left, right)
}
}
/**
* Add a [[WithWindowDefinition]] operator to a logical plan.
*/
private def withWindows(
ctx: WindowsContext,
query: LogicalPlan): LogicalPlan = withOrigin(ctx) {
// Collect all window specifications defined in the WINDOW clause.
val baseWindowMap = ctx.namedWindow.asScala.map {
wCtx =>
(wCtx.identifier.getText, typedVisit[WindowSpec](wCtx.windowSpec))
}.toMap
// Handle cases like
// window w1 as (partition by p_mfgr order by p_name
// range between 2 preceding and 2 following),
// w2 as w1
val windowMapView = baseWindowMap.mapValues {
case WindowSpecReference(name) =>
baseWindowMap.get(name) match {
case Some(spec: WindowSpecDefinition) =>
spec
case Some(ref) =>
throw new ParseException(s"Window reference '$name' is not a window specification", ctx)
case None =>
throw new ParseException(s"Cannot resolve window reference '$name'", ctx)
}
case spec: WindowSpecDefinition => spec
}
// Note that mapValues creates a view instead of materialized map. We force materialization by
// mapping over identity.
WithWindowDefinition(windowMapView.map(identity), query)
}
/**
* Add an [[Aggregate]] to a logical plan.
*/
private def withAggregation(
ctx: AggregationContext,
selectExpressions: Seq[NamedExpression],
query: LogicalPlan): LogicalPlan = withOrigin(ctx) {
import ctx._
val groupByExpressions = expressionList(groupingExpressions)
if (GROUPING != null) {
// GROUP BY .... GROUPING SETS (...)
val expressionMap = groupByExpressions.zipWithIndex.toMap
val numExpressions = expressionMap.size
val mask = (1 << numExpressions) - 1
val masks = ctx.groupingSet.asScala.map {
_.expression.asScala.foldLeft(mask) {
case (bitmap, eCtx) =>
// Find the index of the expression.
val e = typedVisit[Expression](eCtx)
val index = expressionMap.find(_._1.semanticEquals(e)).map(_._2).getOrElse(
throw new ParseException(
s"$e doesn't show up in the GROUP BY list", ctx))
// 0 means that the column at the given index is a grouping column, 1 means it is not,
// so we unset the bit in bitmap.
bitmap & ~(1 << (numExpressions - 1 - index))
}
}
GroupingSets(masks, groupByExpressions, query, selectExpressions)
} else {
// GROUP BY .... (WITH CUBE | WITH ROLLUP)?
val mappedGroupByExpressions = if (CUBE != null) {
Seq(Cube(groupByExpressions))
} else if (ROLLUP != null) {
Seq(Rollup(groupByExpressions))
} else {
groupByExpressions
}
Aggregate(mappedGroupByExpressions, selectExpressions, query)
}
}
/**
* Add a [[Generate]] (Lateral View) to a logical plan.
*/
private def withGenerate(
query: LogicalPlan,
ctx: LateralViewContext): LogicalPlan = withOrigin(ctx) {
val expressions = expressionList(ctx.expression)
Generate(
UnresolvedGenerator(visitFunctionName(ctx.qualifiedName), expressions),
join = true,
outer = ctx.OUTER != null,
Some(ctx.tblName.getText.toLowerCase),
ctx.colName.asScala.map(_.getText).map(UnresolvedAttribute.apply),
query)
}
/**
* Create a single relation referenced in a FROM claused. This method is used when a part of the
* join condition is nested, for example:
* {{{
* select * from t1 join (t2 cross join t3) on col1 = col2
* }}}
*/
override def visitRelation(ctx: RelationContext): LogicalPlan = withOrigin(ctx) {
withJoinRelations(plan(ctx.relationPrimary), ctx)
}
/**
* Join one more [[LogicalPlan]]s to the current logical plan.
*/
private def withJoinRelations(base: LogicalPlan, ctx: RelationContext): LogicalPlan = {
ctx.joinRelation.asScala.foldLeft(base) { (left, join) =>
withOrigin(join) {
val baseJoinType = join.joinType match {
case null => Inner
case jt if jt.FULL != null => FullOuter
case jt if jt.SEMI != null => LeftSemi
case jt if jt.ANTI != null => LeftAnti
case jt if jt.LEFT != null => LeftOuter
case jt if jt.RIGHT != null => RightOuter
case _ => Inner
}
// Resolve the join type and join condition
val (joinType, condition) = Option(join.joinCriteria) match {
case Some(c) if c.USING != null =>
val columns = c.identifier.asScala.map { column =>
UnresolvedAttribute.quoted(column.getText)
}
(UsingJoin(baseJoinType, columns), None)
case Some(c) if c.booleanExpression != null =>
(baseJoinType, Option(expression(c.booleanExpression)))
case None if join.NATURAL != null =>
(NaturalJoin(baseJoinType), None)
case None =>
(baseJoinType, None)
}
Join(left, plan(join.right), joinType, condition)
}
}
}
/**
* Add a [[Sample]] to a logical plan.
*
* This currently supports the following sampling methods:
* - TABLESAMPLE(x ROWS): Sample the table down to the given number of rows.
* - TABLESAMPLE(x PERCENT): Sample the table down to the given percentage. Note that percentages
* are defined as a number between 0 and 100.
* - TABLESAMPLE(BUCKET x OUT OF y): Sample the table down to a 'x' divided by 'y' fraction.
*/
private def withSample(ctx: SampleContext, query: LogicalPlan): LogicalPlan = withOrigin(ctx) {
// Create a sampled plan if we need one.
def sample(fraction: Double): Sample = {
// The range of fraction accepted by Sample is [0, 1]. Because Hive's block sampling
// function takes X PERCENT as the input and the range of X is [0, 100], we need to
// adjust the fraction.
val eps = RandomSampler.roundingEpsilon
validate(fraction >= 0.0 - eps && fraction <= 1.0 + eps,
s"Sampling fraction ($fraction) must be on interval [0, 1]",
ctx)
Sample(0.0, fraction, withReplacement = false, (math.random * 1000).toInt, query)(true)
}
ctx.sampleType.getType match {
case SqlBaseParser.ROWS =>
Limit(expression(ctx.expression), query)
case SqlBaseParser.PERCENTLIT =>
val fraction = ctx.percentage.getText.toDouble
sample(fraction / 100.0d)
case SqlBaseParser.BYTELENGTH_LITERAL =>
throw new ParseException(
"TABLESAMPLE(byteLengthLiteral) is not supported", ctx)
case SqlBaseParser.BUCKET if ctx.ON != null =>
if (ctx.identifier != null) {
throw new ParseException(
"TABLESAMPLE(BUCKET x OUT OF y ON colname) is not supported", ctx)
} else {
throw new ParseException(
"TABLESAMPLE(BUCKET x OUT OF y ON function) is not supported", ctx)
}
case SqlBaseParser.BUCKET =>
sample(ctx.numerator.getText.toDouble / ctx.denominator.getText.toDouble)
}
}
/**
* Create a logical plan for a sub-query.
*/
override def visitSubquery(ctx: SubqueryContext): LogicalPlan = withOrigin(ctx) {
plan(ctx.queryNoWith)
}
/**
* Create an un-aliased table reference. This is typically used for top-level table references,
* for example:
* {{{
* INSERT INTO db.tbl2
* TABLE db.tbl1
* }}}
*/
override def visitTable(ctx: TableContext): LogicalPlan = withOrigin(ctx) {
UnresolvedRelation(visitTableIdentifier(ctx.tableIdentifier), None)
}
/**
* Create an aliased table reference. This is typically used in FROM clauses.
*/
override def visitTableName(ctx: TableNameContext): LogicalPlan = withOrigin(ctx) {
val table = UnresolvedRelation(
visitTableIdentifier(ctx.tableIdentifier),
Option(ctx.strictIdentifier).map(_.getText))
table.optionalMap(ctx.sample)(withSample)
}
/**
* Create a table-valued function call with arguments, e.g. range(1000)
*/
override def visitTableValuedFunction(ctx: TableValuedFunctionContext)
: LogicalPlan = withOrigin(ctx) {
UnresolvedTableValuedFunction(ctx.identifier.getText, ctx.expression.asScala.map(expression))
}
/**
* Create an inline table (a virtual table in Hive parlance).
*/
override def visitInlineTable(ctx: InlineTableContext): LogicalPlan = withOrigin(ctx) {
// Get the backing expressions.
val rows = ctx.expression.asScala.map { e =>
expression(e) match {
// inline table comes in two styles:
// style 1: values (1), (2), (3) -- multiple columns are supported
// style 2: values 1, 2, 3 -- only a single column is supported here
case CreateStruct(children) => children // style 1
case child => Seq(child) // style 2
}
}
val aliases = if (ctx.identifierList != null) {
visitIdentifierList(ctx.identifierList)
} else {
Seq.tabulate(rows.head.size)(i => s"col${i + 1}")
}
val table = UnresolvedInlineTable(aliases, rows)
table.optionalMap(ctx.identifier)(aliasPlan)
}
/**
* Create an alias (SubqueryAlias) for a join relation. This is practically the same as
* visitAliasedQuery and visitNamedExpression, ANTLR4 however requires us to use 3 different
* hooks.
*/
override def visitAliasedRelation(ctx: AliasedRelationContext): LogicalPlan = withOrigin(ctx) {
plan(ctx.relation)
.optionalMap(ctx.sample)(withSample)
.optionalMap(ctx.strictIdentifier)(aliasPlan)
}
/**
* Create an alias (SubqueryAlias) for a sub-query. This is practically the same as
* visitAliasedRelation and visitNamedExpression, ANTLR4 however requires us to use 3 different
* hooks.
*/
override def visitAliasedQuery(ctx: AliasedQueryContext): LogicalPlan = withOrigin(ctx) {
plan(ctx.queryNoWith)
.optionalMap(ctx.sample)(withSample)
.optionalMap(ctx.strictIdentifier)(aliasPlan)
}
/**
* Create an alias (SubqueryAlias) for a LogicalPlan.
*/
private def aliasPlan(alias: ParserRuleContext, plan: LogicalPlan): LogicalPlan = {
SubqueryAlias(alias.getText, plan)
}
/**
* Create a Sequence of Strings for a parenthesis enclosed alias list.
*/
override def visitIdentifierList(ctx: IdentifierListContext): Seq[String] = withOrigin(ctx) {
visitIdentifierSeq(ctx.identifierSeq)
}
/**
* Create a Sequence of Strings for an identifier list.
*/
override def visitIdentifierSeq(ctx: IdentifierSeqContext): Seq[String] = withOrigin(ctx) {
ctx.identifier.asScala.map(_.getText)
}
/* ********************************************************************************************
* Table Identifier parsing
* ******************************************************************************************** */
/**
* Create a [[TableIdentifier]] from a 'tableName' or 'databaseName'.'tableName' pattern.
*/
override def visitTableIdentifier(
ctx: TableIdentifierContext): TableIdentifier = withOrigin(ctx) {
TableIdentifier(ctx.table.getText, Option(ctx.db).map(_.getText))
}
/* ********************************************************************************************
* Expression parsing
* ******************************************************************************************** */
/**
* Create an expression from the given context. This method just passes the context on to the
* visitor and only takes care of typing (We assume that the visitor returns an Expression here).
*/
protected def expression(ctx: ParserRuleContext): Expression = typedVisit(ctx)
/**
* Create sequence of expressions from the given sequence of contexts.
*/
private def expressionList(trees: java.util.List[ExpressionContext]): Seq[Expression] = {
trees.asScala.map(expression)
}
/**
* Create a star (i.e. all) expression; this selects all elements (in the specified object).
* Both un-targeted (global) and targeted aliases are supported.
*/
override def visitStar(ctx: StarContext): Expression = withOrigin(ctx) {
UnresolvedStar(Option(ctx.qualifiedName()).map(_.identifier.asScala.map(_.getText)))
}
/**
* Create an aliased expression if an alias is specified. Both single and multi-aliases are
* supported.
*/
override def visitNamedExpression(ctx: NamedExpressionContext): Expression = withOrigin(ctx) {
val e = expression(ctx.expression)
if (ctx.identifier != null) {
Alias(e, ctx.identifier.getText)()
} else if (ctx.identifierList != null) {
MultiAlias(e, visitIdentifierList(ctx.identifierList))
} else {
e
}
}
/**
* Combine a number of boolean expressions into a balanced expression tree. These expressions are
* either combined by a logical [[And]] or a logical [[Or]].
*
* A balanced binary tree is created because regular left recursive trees cause considerable
* performance degradations and can cause stack overflows.
*/
override def visitLogicalBinary(ctx: LogicalBinaryContext): Expression = withOrigin(ctx) {
val expressionType = ctx.operator.getType
val expressionCombiner = expressionType match {
case SqlBaseParser.AND => And.apply _
case SqlBaseParser.OR => Or.apply _
}
// Collect all similar left hand contexts.
val contexts = ArrayBuffer(ctx.right)
var current = ctx.left
def collectContexts: Boolean = current match {
case lbc: LogicalBinaryContext if lbc.operator.getType == expressionType =>
contexts += lbc.right
current = lbc.left
true
case _ =>
contexts += current
false
}
while (collectContexts) {
// No body - all updates take place in the collectContexts.
}
// Reverse the contexts to have them in the same sequence as in the SQL statement & turn them
// into expressions.
val expressions = contexts.reverse.map(expression)
// Create a balanced tree.
def reduceToExpressionTree(low: Int, high: Int): Expression = high - low match {
case 0 =>
expressions(low)
case 1 =>
expressionCombiner(expressions(low), expressions(high))
case x =>
val mid = low + x / 2
expressionCombiner(
reduceToExpressionTree(low, mid),
reduceToExpressionTree(mid + 1, high))
}
reduceToExpressionTree(0, expressions.size - 1)
}
/**
* Invert a boolean expression.
*/
override def visitLogicalNot(ctx: LogicalNotContext): Expression = withOrigin(ctx) {
Not(expression(ctx.booleanExpression()))
}
/**
* Create a filtering correlated sub-query (EXISTS).
*/
override def visitExists(ctx: ExistsContext): Expression = {
Exists(plan(ctx.query))
}
/**
* Create a comparison expression. This compares two expressions. The following comparison
* operators are supported:
* - Equal: '=' or '=='
* - Null-safe Equal: '<=>'
* - Not Equal: '<>' or '!='
* - Less than: '<'
* - Less then or Equal: '<='
* - Greater than: '>'
* - Greater then or Equal: '>='
*/
override def visitComparison(ctx: ComparisonContext): Expression = withOrigin(ctx) {
val left = expression(ctx.left)
val right = expression(ctx.right)
val operator = ctx.comparisonOperator().getChild(0).asInstanceOf[TerminalNode]
operator.getSymbol.getType match {
case SqlBaseParser.EQ =>
EqualTo(left, right)
case SqlBaseParser.NSEQ =>
EqualNullSafe(left, right)
case SqlBaseParser.NEQ | SqlBaseParser.NEQJ =>
Not(EqualTo(left, right))
case SqlBaseParser.LT =>
LessThan(left, right)
case SqlBaseParser.LTE =>
LessThanOrEqual(left, right)
case SqlBaseParser.GT =>
GreaterThan(left, right)
case SqlBaseParser.GTE =>
GreaterThanOrEqual(left, right)
}
}
/**
* Create a predicated expression. A predicated expression is a normal expression with a
* predicate attached to it, for example:
* {{{
* a + 1 IS NULL
* }}}
*/
override def visitPredicated(ctx: PredicatedContext): Expression = withOrigin(ctx) {
val e = expression(ctx.valueExpression)
if (ctx.predicate != null) {
withPredicate(e, ctx.predicate)
} else {
e
}
}
/**
* Add a predicate to the given expression. Supported expressions are:
* - (NOT) BETWEEN
* - (NOT) IN
* - (NOT) LIKE
* - (NOT) RLIKE
* - IS (NOT) NULL.
*/
private def withPredicate(e: Expression, ctx: PredicateContext): Expression = withOrigin(ctx) {
// Invert a predicate if it has a valid NOT clause.
def invertIfNotDefined(e: Expression): Expression = ctx.NOT match {
case null => e
case not => Not(e)
}
// Create the predicate.
ctx.kind.getType match {
case SqlBaseParser.BETWEEN =>
// BETWEEN is translated to lower <= e && e <= upper
invertIfNotDefined(And(
GreaterThanOrEqual(e, expression(ctx.lower)),
LessThanOrEqual(e, expression(ctx.upper))))
case SqlBaseParser.IN if ctx.query != null =>
invertIfNotDefined(In(e, Seq(ListQuery(plan(ctx.query)))))
case SqlBaseParser.IN =>
invertIfNotDefined(In(e, ctx.expression.asScala.map(expression)))
case SqlBaseParser.LIKE =>
invertIfNotDefined(Like(e, expression(ctx.pattern)))
case SqlBaseParser.RLIKE =>
invertIfNotDefined(RLike(e, expression(ctx.pattern)))
case SqlBaseParser.NULL if ctx.NOT != null =>
IsNotNull(e)
case SqlBaseParser.NULL =>
IsNull(e)
}
}
/**
* Create a binary arithmetic expression. The following arithmetic operators are supported:
* - Multiplication: '*'
* - Division: '/'
* - Hive Long Division: 'DIV'
* - Modulo: '%'
* - Addition: '+'
* - Subtraction: '-'
* - Binary AND: '&'
* - Binary XOR
* - Binary OR: '|'
*/
override def visitArithmeticBinary(ctx: ArithmeticBinaryContext): Expression = withOrigin(ctx) {
val left = expression(ctx.left)
val right = expression(ctx.right)
ctx.operator.getType match {
case SqlBaseParser.ASTERISK =>
Multiply(left, right)
case SqlBaseParser.SLASH =>
Divide(left, right)
case SqlBaseParser.PERCENT =>
Remainder(left, right)
case SqlBaseParser.DIV =>
Cast(Divide(left, right), LongType)
case SqlBaseParser.PLUS =>
Add(left, right)
case SqlBaseParser.MINUS =>
Subtract(left, right)
case SqlBaseParser.AMPERSAND =>
BitwiseAnd(left, right)
case SqlBaseParser.HAT =>
BitwiseXor(left, right)
case SqlBaseParser.PIPE =>
BitwiseOr(left, right)
}
}
/**
* Create a unary arithmetic expression. The following arithmetic operators are supported:
* - Plus: '+'
* - Minus: '-'
* - Bitwise Not: '~'
*/
override def visitArithmeticUnary(ctx: ArithmeticUnaryContext): Expression = withOrigin(ctx) {
val value = expression(ctx.valueExpression)
ctx.operator.getType match {
case SqlBaseParser.PLUS =>
value
case SqlBaseParser.MINUS =>
UnaryMinus(value)
case SqlBaseParser.TILDE =>
BitwiseNot(value)
}
}
/**
* Create a [[Cast]] expression.
*/
override def visitCast(ctx: CastContext): Expression = withOrigin(ctx) {
Cast(expression(ctx.expression), typedVisit(ctx.dataType))
}
/**
* Create a (windowed) Function expression.
*/
override def visitFunctionCall(ctx: FunctionCallContext): Expression = withOrigin(ctx) {
// Create the function call.
val name = ctx.qualifiedName.getText
val isDistinct = Option(ctx.setQuantifier()).exists(_.DISTINCT != null)
val arguments = ctx.expression().asScala.map(expression) match {
case Seq(UnresolvedStar(None)) if name.toLowerCase == "count" && !isDistinct =>
// Transform COUNT(*) into COUNT(1).
Seq(Literal(1))
case expressions =>
expressions
}
val function = UnresolvedFunction(visitFunctionName(ctx.qualifiedName), arguments, isDistinct)
// Check if the function is evaluated in a windowed context.
ctx.windowSpec match {
case spec: WindowRefContext =>
UnresolvedWindowExpression(function, visitWindowRef(spec))
case spec: WindowDefContext =>
WindowExpression(function, visitWindowDef(spec))
case _ => function
}
}
/**
* Create a current timestamp/date expression. These are different from regular function because
* they do not require the user to specify braces when calling them.
*/
override def visitTimeFunctionCall(ctx: TimeFunctionCallContext): Expression = withOrigin(ctx) {
ctx.name.getType match {
case SqlBaseParser.CURRENT_DATE =>
CurrentDate()
case SqlBaseParser.CURRENT_TIMESTAMP =>
CurrentTimestamp()
}
}
/**
* Create a function database (optional) and name pair.
*/
protected def visitFunctionName(ctx: QualifiedNameContext): FunctionIdentifier = {
ctx.identifier().asScala.map(_.getText) match {
case Seq(db, fn) => FunctionIdentifier(fn, Option(db))
case Seq(fn) => FunctionIdentifier(fn, None)
case other => throw new ParseException(s"Unsupported function name '${ctx.getText}'", ctx)
}
}
/**
* Create a reference to a window frame, i.e. [[WindowSpecReference]].
*/
override def visitWindowRef(ctx: WindowRefContext): WindowSpecReference = withOrigin(ctx) {
WindowSpecReference(ctx.identifier.getText)
}
/**
* Create a window definition, i.e. [[WindowSpecDefinition]].
*/
override def visitWindowDef(ctx: WindowDefContext): WindowSpecDefinition = withOrigin(ctx) {
// CLUSTER BY ... | PARTITION BY ... ORDER BY ...
val partition = ctx.partition.asScala.map(expression)
val order = ctx.sortItem.asScala.map(visitSortItem)
// RANGE/ROWS BETWEEN ...
val frameSpecOption = Option(ctx.windowFrame).map { frame =>
val frameType = frame.frameType.getType match {
case SqlBaseParser.RANGE => RangeFrame
case SqlBaseParser.ROWS => RowFrame
}
SpecifiedWindowFrame(
frameType,
visitFrameBound(frame.start),
Option(frame.end).map(visitFrameBound).getOrElse(CurrentRow))
}
WindowSpecDefinition(
partition,
order,
frameSpecOption.getOrElse(UnspecifiedFrame))
}
/**
* Create or resolve a [[FrameBoundary]]. Simple math expressions are allowed for Value
* Preceding/Following boundaries. These expressions must be constant (foldable) and return an
* integer value.
*/
override def visitFrameBound(ctx: FrameBoundContext): FrameBoundary = withOrigin(ctx) {
// We currently only allow foldable integers.
def value: Int = {
val e = expression(ctx.expression)
validate(e.resolved && e.foldable && e.dataType == IntegerType,
"Frame bound value must be a constant integer.",
ctx)
e.eval().asInstanceOf[Int]
}
// Create the FrameBoundary
ctx.boundType.getType match {
case SqlBaseParser.PRECEDING if ctx.UNBOUNDED != null =>
UnboundedPreceding
case SqlBaseParser.PRECEDING =>
ValuePreceding(value)
case SqlBaseParser.CURRENT =>
CurrentRow
case SqlBaseParser.FOLLOWING if ctx.UNBOUNDED != null =>
UnboundedFollowing
case SqlBaseParser.FOLLOWING =>
ValueFollowing(value)
}
}
/**
* Create a [[CreateStruct]] expression.
*/
override def visitRowConstructor(ctx: RowConstructorContext): Expression = withOrigin(ctx) {
CreateStruct(ctx.expression.asScala.map(expression))
}
/**
* Create a [[ScalarSubquery]] expression.
*/
override def visitSubqueryExpression(
ctx: SubqueryExpressionContext): Expression = withOrigin(ctx) {
ScalarSubquery(plan(ctx.query))
}
/**
* Create a value based [[CaseWhen]] expression. This has the following SQL form:
* {{{
* CASE [expression]
* WHEN [value] THEN [expression]
* ...
* ELSE [expression]
* END
* }}}
*/
override def visitSimpleCase(ctx: SimpleCaseContext): Expression = withOrigin(ctx) {
val e = expression(ctx.value)
val branches = ctx.whenClause.asScala.map { wCtx =>
(EqualTo(e, expression(wCtx.condition)), expression(wCtx.result))
}
CaseWhen(branches, Option(ctx.elseExpression).map(expression))
}
/**
* Create a condition based [[CaseWhen]] expression. This has the following SQL syntax:
* {{{
* CASE
* WHEN [predicate] THEN [expression]
* ...
* ELSE [expression]
* END
* }}}
*
* @param ctx the parse tree
* */
override def visitSearchedCase(ctx: SearchedCaseContext): Expression = withOrigin(ctx) {
val branches = ctx.whenClause.asScala.map { wCtx =>
(expression(wCtx.condition), expression(wCtx.result))
}
CaseWhen(branches, Option(ctx.elseExpression).map(expression))
}
/**
* Create a dereference expression. The return type depends on the type of the parent, this can
* either be a [[UnresolvedAttribute]] (if the parent is an [[UnresolvedAttribute]]), or an
* [[UnresolvedExtractValue]] if the parent is some expression.
*/
override def visitDereference(ctx: DereferenceContext): Expression = withOrigin(ctx) {
val attr = ctx.fieldName.getText
expression(ctx.base) match {
case UnresolvedAttribute(nameParts) =>
UnresolvedAttribute(nameParts :+ attr)
case e =>
UnresolvedExtractValue(e, Literal(attr))
}
}
/**
* Create an [[UnresolvedAttribute]] expression.
*/
override def visitColumnReference(ctx: ColumnReferenceContext): Expression = withOrigin(ctx) {
UnresolvedAttribute.quoted(ctx.getText)
}
/**
* Create an [[UnresolvedExtractValue]] expression, this is used for subscript access to an array.
*/
override def visitSubscript(ctx: SubscriptContext): Expression = withOrigin(ctx) {
UnresolvedExtractValue(expression(ctx.value), expression(ctx.index))
}
/**
* Create an expression for an expression between parentheses. This is need because the ANTLR
* visitor cannot automatically convert the nested context into an expression.
*/
override def visitParenthesizedExpression(
ctx: ParenthesizedExpressionContext): Expression = withOrigin(ctx) {
expression(ctx.expression)
}
/**
* Create a [[SortOrder]] expression.
*/
override def visitSortItem(ctx: SortItemContext): SortOrder = withOrigin(ctx) {
if (ctx.DESC != null) {
SortOrder(expression(ctx.expression), Descending)
} else {
SortOrder(expression(ctx.expression), Ascending)
}
}
/**
* Create a typed Literal expression. A typed literal has the following SQL syntax:
* {{{
* [TYPE] '[VALUE]'
* }}}
* Currently Date and Timestamp typed literals are supported.
*
* TODO what the added value of this over casting?
*/
override def visitTypeConstructor(ctx: TypeConstructorContext): Literal = withOrigin(ctx) {
val value = string(ctx.STRING)
ctx.identifier.getText.toUpperCase match {
case "DATE" =>
Literal(Date.valueOf(value))
case "TIMESTAMP" =>
Literal(Timestamp.valueOf(value))
case other =>
throw new ParseException(s"Literals of type '$other' are currently not supported.", ctx)
}
}
/**
* Create a NULL literal expression.
*/
override def visitNullLiteral(ctx: NullLiteralContext): Literal = withOrigin(ctx) {
Literal(null)
}
/**
* Create a Boolean literal expression.
*/
override def visitBooleanLiteral(ctx: BooleanLiteralContext): Literal = withOrigin(ctx) {
if (ctx.getText.toBoolean) {
Literal.TrueLiteral
} else {
Literal.FalseLiteral
}
}
/**
* Create an integral literal expression. The code selects the most narrow integral type
* possible, either a BigDecimal, a Long or an Integer is returned.
*/
override def visitIntegerLiteral(ctx: IntegerLiteralContext): Literal = withOrigin(ctx) {
BigDecimal(ctx.getText) match {
case v if v.isValidInt =>
Literal(v.intValue())
case v if v.isValidLong =>
Literal(v.longValue())
case v => Literal(v.underlying())
}
}
/**
* Create a double literal for a number denoted in scientific notation.
*/
override def visitScientificDecimalLiteral(
ctx: ScientificDecimalLiteralContext): Literal = withOrigin(ctx) {
Literal(ctx.getText.toDouble)
}
/**
* Create a decimal literal for a regular decimal number.
*/
override def visitDecimalLiteral(ctx: DecimalLiteralContext): Literal = withOrigin(ctx) {
Literal(BigDecimal(ctx.getText).underlying())
}
/** Create a numeric literal expression. */
private def numericLiteral
(ctx: NumberContext, minValue: BigDecimal, maxValue: BigDecimal, typeName: String)
(converter: String => Any): Literal = withOrigin(ctx) {
val rawStrippedQualifier = ctx.getText.substring(0, ctx.getText.length - 1)
try {
val rawBigDecimal = BigDecimal(rawStrippedQualifier)
if (rawBigDecimal < minValue || rawBigDecimal > maxValue) {
throw new ParseException(s"Numeric literal ${rawStrippedQualifier} does not " +
s"fit in range [${minValue}, ${maxValue}] for type ${typeName}", ctx)
}
Literal(converter(rawStrippedQualifier))
} catch {
case e: NumberFormatException =>
throw new ParseException(e.getMessage, ctx)
}
}
/**
* Create a Byte Literal expression.
*/
override def visitTinyIntLiteral(ctx: TinyIntLiteralContext): Literal = {
numericLiteral(ctx, Byte.MinValue, Byte.MaxValue, ByteType.simpleString)(_.toByte)
}
/**
* Create a Short Literal expression.
*/
override def visitSmallIntLiteral(ctx: SmallIntLiteralContext): Literal = {
numericLiteral(ctx, Short.MinValue, Short.MaxValue, ShortType.simpleString)(_.toShort)
}
/**
* Create a Long Literal expression.
*/
override def visitBigIntLiteral(ctx: BigIntLiteralContext): Literal = {
numericLiteral(ctx, Long.MinValue, Long.MaxValue, LongType.simpleString)(_.toLong)
}
/**
* Create a Double Literal expression.
*/
override def visitDoubleLiteral(ctx: DoubleLiteralContext): Literal = {
numericLiteral(ctx, Double.MinValue, Double.MaxValue, DoubleType.simpleString)(_.toDouble)
}
/**
* Create a BigDecimal Literal expression.
*/
override def visitBigDecimalLiteral(ctx: BigDecimalLiteralContext): Literal = {
val raw = ctx.getText.substring(0, ctx.getText.length - 2)
try {
Literal(BigDecimal(raw).underlying())
} catch {
case e: AnalysisException =>
throw new ParseException(e.message, ctx)
}
}
/**
* Create a String literal expression.
*/
override def visitStringLiteral(ctx: StringLiteralContext): Literal = withOrigin(ctx) {
Literal(createString(ctx))
}
/**
* Create a String from a string literal context. This supports multiple consecutive string
* literals, these are concatenated, for example this expression "'hello' 'world'" will be
* converted into "helloworld".
*
* Special characters can be escaped by using Hive/C-style escaping.
*/
private def createString(ctx: StringLiteralContext): String = {
ctx.STRING().asScala.map(string).mkString
}
/**
* Create a [[CalendarInterval]] literal expression. An interval expression can contain multiple
* unit value pairs, for instance: interval 2 months 2 days.
*/
override def visitInterval(ctx: IntervalContext): Literal = withOrigin(ctx) {
val intervals = ctx.intervalField.asScala.map(visitIntervalField)
validate(intervals.nonEmpty, "at least one time unit should be given for interval literal", ctx)
Literal(intervals.reduce(_.add(_)))
}
/**
* Create a [[CalendarInterval]] for a unit value pair. Two unit configuration types are
* supported:
* - Single unit.
* - From-To unit (only 'YEAR TO MONTH' and 'DAY TO SECOND' are supported).
*/
override def visitIntervalField(ctx: IntervalFieldContext): CalendarInterval = withOrigin(ctx) {
import ctx._
val s = value.getText
try {
val interval = (unit.getText.toLowerCase, Option(to).map(_.getText.toLowerCase)) match {
case (u, None) if u.endsWith("s") =>
// Handle plural forms, e.g: yearS/monthS/weekS/dayS/hourS/minuteS/hourS/...
CalendarInterval.fromSingleUnitString(u.substring(0, u.length - 1), s)
case (u, None) =>
CalendarInterval.fromSingleUnitString(u, s)
case ("year", Some("month")) =>
CalendarInterval.fromYearMonthString(s)
case ("day", Some("second")) =>
CalendarInterval.fromDayTimeString(s)
case (from, Some(t)) =>
throw new ParseException(s"Intervals FROM $from TO $t are not supported.", ctx)
}
validate(interval != null, "No interval can be constructed", ctx)
interval
} catch {
// Handle Exceptions thrown by CalendarInterval
case e: IllegalArgumentException =>
val pe = new ParseException(e.getMessage, ctx)
pe.setStackTrace(e.getStackTrace)
throw pe
}
}
/* ********************************************************************************************
* DataType parsing
* ******************************************************************************************** */
/**
* Resolve/create a primitive type.
*/
override def visitPrimitiveDataType(ctx: PrimitiveDataTypeContext): DataType = withOrigin(ctx) {
(ctx.identifier.getText.toLowerCase, ctx.INTEGER_VALUE().asScala.toList) match {
case ("boolean", Nil) => BooleanType
case ("tinyint" | "byte", Nil) => ByteType
case ("smallint" | "short", Nil) => ShortType
case ("int" | "integer", Nil) => IntegerType
case ("bigint" | "long", Nil) => LongType
case ("float", Nil) => FloatType
case ("double", Nil) => DoubleType
case ("date", Nil) => DateType
case ("timestamp", Nil) => TimestampType
case ("char" | "varchar" | "string", Nil) => StringType
case ("char" | "varchar", _ :: Nil) => StringType
case ("binary", Nil) => BinaryType
case ("decimal", Nil) => DecimalType.USER_DEFAULT
case ("decimal", precision :: Nil) => DecimalType(precision.getText.toInt, 0)
case ("decimal", precision :: scale :: Nil) =>
DecimalType(precision.getText.toInt, scale.getText.toInt)
case (dt, params) =>
throw new ParseException(
s"DataType $dt${params.mkString("(", ",", ")")} is not supported.", ctx)
}
}
/**
* Create a complex DataType. Arrays, Maps and Structures are supported.
*/
override def visitComplexDataType(ctx: ComplexDataTypeContext): DataType = withOrigin(ctx) {
ctx.complex.getType match {
case SqlBaseParser.ARRAY =>
ArrayType(typedVisit(ctx.dataType(0)))
case SqlBaseParser.MAP =>
MapType(typedVisit(ctx.dataType(0)), typedVisit(ctx.dataType(1)))
case SqlBaseParser.STRUCT =>
createStructType(ctx.colTypeList())
}
}
/**
* Create a [[StructType]] from a sequence of [[StructField]]s.
*/
protected def createStructType(ctx: ColTypeListContext): StructType = {
StructType(Option(ctx).toSeq.flatMap(visitColTypeList))
}
/**
* Create a [[StructType]] from a number of column definitions.
*/
override def visitColTypeList(ctx: ColTypeListContext): Seq[StructField] = withOrigin(ctx) {
ctx.colType().asScala.map(visitColType)
}
/**
* Create a [[StructField]] from a column definition.
*/
override def visitColType(ctx: ColTypeContext): StructField = withOrigin(ctx) {
import ctx._
// Add the comment to the metadata.
val builder = new MetadataBuilder
if (STRING != null) {
builder.putString("comment", string(STRING))
}
StructField(identifier.getText, typedVisit(dataType), nullable = true, builder.build())
}
}
| gioenn/xSpark | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/AstBuilder.scala | Scala | apache-2.0 | 51,800 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.codegen.calls
import org.apache.flink.table.planner.codegen.CodeGenUtils.{getEnum, primitiveTypeTermForType, qualifyMethod, TIMESTAMP_DATA}
import org.apache.flink.table.planner.codegen.GenerateUtils.generateCallIfArgsNotNull
import org.apache.flink.table.planner.codegen.{CodeGeneratorContext, GeneratedExpression}
import org.apache.flink.table.types.logical.{LogicalType, LogicalTypeRoot}
import org.apache.calcite.avatica.util.TimeUnitRange
import org.apache.calcite.avatica.util.TimeUnitRange._
import java.lang.reflect.Method
import java.util.TimeZone
/**
* Generates floor/ceil function calls.
*/
class FloorCeilCallGen(
arithmeticMethod: Method,
temporalMethod: Option[Method] = None)
extends MethodCallGen(arithmeticMethod) {
override def generate(
ctx: CodeGeneratorContext,
operands: Seq[GeneratedExpression],
returnType: LogicalType): GeneratedExpression = operands.size match {
// arithmetic
case 1 =>
operands.head.resultType.getTypeRoot match {
case LogicalTypeRoot.FLOAT | LogicalTypeRoot.DOUBLE =>
super.generate(ctx, operands, returnType)
case LogicalTypeRoot.DECIMAL =>
generateCallIfArgsNotNull(ctx, returnType, operands) {
operandResultTerms =>
s"${qualifyMethod(arithmeticMethod)}(${operandResultTerms.mkString(", ")})"
}
case _ =>
operands.head // no floor/ceil necessary
}
// temporal
case 2 =>
val operand = operands.head
val unit = getEnum(operands(1)).asInstanceOf[TimeUnitRange]
val internalType = primitiveTypeTermForType(operand.resultType)
val method = temporalMethod.getOrElse(arithmeticMethod)
generateCallIfArgsNotNull(ctx, operand.resultType, operands) {
terms =>
unit match {
// for Timestamp with timezone info
case YEAR | QUARTER | MONTH | DAY | HOUR
if terms.length + 1 == method.getParameterCount &&
method.getParameterTypes()(terms.length) == classOf[TimeZone] =>
val timeZone = ctx.addReusableSessionTimeZone()
val longTerm = s"${terms.head}.getMillisecond()"
s"""
|$TIMESTAMP_DATA.fromEpochMillis(
| ${qualifyMethod(temporalMethod.get)}(${terms(1)},
| $longTerm,
| $timeZone))
|""".stripMargin
// for Unix Date / Unix Time
case YEAR | MONTH =>
operand.resultType.getTypeRoot match {
case LogicalTypeRoot.TIMESTAMP_WITHOUT_TIME_ZONE =>
val longTerm = s"${terms.head}.getMillisecond()"
s"""
|$TIMESTAMP_DATA.fromEpochMillis(
| ${qualifyMethod(temporalMethod.get)}(${terms(1)}, $longTerm))
""".stripMargin
case _ =>
s"""
|($internalType) ${qualifyMethod(temporalMethod.get)}(
| ${terms(1)}, ${terms.head})
|""".stripMargin
}
case _ =>
operand.resultType.getTypeRoot match {
case LogicalTypeRoot.TIMESTAMP_WITHOUT_TIME_ZONE =>
val longTerm = s"${terms.head}.getMillisecond()"
s"""
|$TIMESTAMP_DATA.fromEpochMillis(${qualifyMethod(arithmeticMethod)}(
| $longTerm,
| (long) ${unit.startUnit.multiplier.intValue()}))
""".stripMargin
case _ =>
s"""
|${qualifyMethod(arithmeticMethod)}(
| ($internalType) ${terms.head},
| ($internalType) ${unit.startUnit.multiplier.intValue()})
|""".stripMargin
}
}
}
}
}
| jinglining/flink | flink-table/flink-table-planner-blink/src/main/scala/org/apache/flink/table/planner/codegen/calls/FloorCeilCallGen.scala | Scala | apache-2.0 | 4,756 |
package com.eharmony.aloha.models.exploration
import com.eharmony.aloha.audit.Auditor
import com.eharmony.aloha.factory._
import com.eharmony.aloha.id.ModelIdentity
import com.eharmony.aloha.models.{Submodel, SubmodelBase, Subvalue}
import com.eharmony.aloha.reflect.RefInfo
import com.eharmony.aloha.semantics.Semantics
import com.eharmony.aloha.semantics.func.GenAggFunc
import com.mwt.explorers.BootstrapExplorer
import com.mwt.policies.Policy
import spray.json.DefaultJsonProtocol.{jsonFormat3, immIndexedSeqFormat, JsValueFormat, StringJsonFormat}
import spray.json.{DeserializationException, JsValue, JsonFormat, JsonReader, RootJsonFormat}
import scala.annotation.tailrec
import scala.collection.JavaConversions.seqAsJavaList
import scala.collection.{immutable => sci}
/**
* Since explore-java has chosen to force the Policy to evaluate we can't just evaluate the model inside of the policy.
* We have to invert the control and evaluate the models first, and only upon all models successfully evaluating
* can we then do the policy evaluation. Because of this IOC the policy becomes a simple lookup into the model map.
* @param index the model to get the action for
*/
private[this] case class NumberedPolicy(index: Int) extends Policy[sci.IndexedSeq[Int]] {
override def chooseAction(actions: sci.IndexedSeq[Int]): Int = actions(index)
}
/**
* A model for performing bootstrap style exploration. This makes use of a number of policies. The algorithm chooses
* one policy and then uses the other to calculate the appropriate probability of choosing that action. Note that the
* models MUST return a value between 1 and the number of actions, and if not an exception will be thrown.
* @param modelId a model identifier
* @param models a set of models that generate Int's. These models MUST be deterministic for the probability to be correct.
* Each model must return a value in the range 1 to `classLabels.size` (inclusive).
* @param salt a function that generates a salt for the randomization layer. This salt allows the random choice of which policy
* to follow to be repeatable.
* @param classLabels a list of class labels to output for the final type. Also note that the size of this controls the
* number of actions. If the submodel returns a score < 1 or > classLabels.size (note the 1 offset)
* then a RuntimeException will be thrown.
* @tparam A model input type
* @tparam B model output type
*/
case class BootstrapModel[U, N, A, B <: U](
modelId: ModelIdentity,
models: sci.IndexedSeq[Submodel[Int, A, U]],
salt: GenAggFunc[A, Long],
classLabels: sci.IndexedSeq[N],
auditor: Auditor[U, N, B]
) extends SubmodelBase[U, N, A, B] {
@transient private[this] lazy val explorer = new BootstrapExplorer[sci.IndexedSeq[Int]](
models.indices.map(i => NumberedPolicy(i): Policy[sci.IndexedSeq[Int]]),
classLabels.size
)
override def subvalue(a: A): Subvalue[B, N] = combineScores(a, models)
/**
* We want to get either one failure or aggregate all the successes. This will do that and then return the correct
* type.
* @param a the item to score
* @param models the list of policies to evaluate a for
* @param subvalues all the scores generated so far
* @param successfulActions all the successfully generated submodel actions
* @return a final success if all the models evaluated properly or else a failure as soon as one fails
*/
@tailrec private[this] def combineScores(
a: A,
models: sci.IndexedSeq[Submodel[Int, A, U]],
subvalues: sci.IndexedSeq[U] = Vector.empty,
successfulActions: sci.IndexedSeq[Int] = Vector.empty): Subvalue[B, N] = {
// If models is empty then all models returned success.
if (models.isEmpty) {
val decision = explorer.chooseAction(salt(a), successfulActions)
val action = decision.getAction
// We want to return only those subscores that contributed to the chosen action. Hence
// we're going to filter out those successes (in this case actions) that are not the same
// as the action chosen by the explorer.
success(
naturalValue = classLabels(action - 1),
subvalues = subvalues.zip(successfulActions).collect{ case (ss, sa) if sa == action => ss },
prob = Option(decision.getProbability)
)
}
else {
val s = models.head.subvalue(a)
s.natural match {
case Some(act) => combineScores(a, models.tail, subvalues :+ s.audited, successfulActions :+ act)
case None =>
// Short-circuit on submodels that can't produce a score.
failure(
Seq(s"Bootstrap model failed becauase ${models.head.modelId} failed."),
Set.empty,
subvalues)
}
}
}
override def close(): Unit = models.foreach(_.close())
}
object BootstrapModel extends ParserProviderCompanion {
object Parser extends ModelSubmodelParsingPlugin {
val modelType = "BootstrapExploration"
protected[this] case class Ast[N: JsonReader](
policies: sci.IndexedSeq[JsValue],
salt: String,
classLabels: sci.IndexedSeq[N])
protected[this] implicit def astJsonFormat[N: JsonFormat]: RootJsonFormat[Ast[N]] =
jsonFormat3(Ast.apply[N]) // , "policies", "salt", "classLabels")
override def commonJsonReader[U, N, A, B <: U](
factory: SubmodelFactory[U, A],
semantics: Semantics[A],
auditor: Auditor[U, N, B])
(implicit r: RefInfo[N], jf: JsonFormat[N]): Option[JsonReader[BootstrapModel[U, N, A, B]]] = {
Some(new JsonReader[BootstrapModel[U, N, A, B]] {
override def read(json: JsValue): BootstrapModel[U, N, A, B] = {
val mId = getModelId(json).get
val ast = json.convertTo[Ast[N]]
// TODO: Determine if these should these be handled more gracefully. See ModelDecisionTree.
// TODO: Create common code: def submodels[N: RefInfo](s: Seq[JsValue]): Try[Seq[Submodel[N, A, U]]]
val models = ast.policies.map(p => factory.submodel[Int](p).get)
val saltFn = semantics.createFunction[Long](ast.salt).
fold(l => throw new DeserializationException(l.mkString("\\n")), identity)
BootstrapModel(mId, models, saltFn, ast.classLabels, auditor)
}
})
}
}
override def parser: ModelParser = Parser
//
// object Parser extends ModelParser {
// val modelType = "BootstrapExploration"
//
// import spray.json._
// import DefaultJsonProtocol._
//
// protected[this] case class Ast[B: JsonReader: ScoreConverter](policies: sci.IndexedSeq[JsValue], salt: String, classLabels: sci.IndexedSeq[B]) {
// def createModel[A, B](factory: ModelFactory, semantics: Semantics[A], modelId: ModelIdentity) = {
// val models = policies.map(factory.getModel(_, Option(semantics))(semantics.refInfoA, IntScoreConverter.ri, IntJsonFormat, IntScoreConverter).get)
// val saltFunc = semantics.createFunction[Long](salt).fold(l => throw new DeserializationException(l.mkString("\\n")), identity)
// BootstrapModel(modelId, models, saltFunc, classLabels)
// }
// }
//
// protected[this] def astJsonFormat[B: JsonFormat: ScoreConverter]: RootJsonFormat[Ast[B]] = jsonFormat(Ast.apply[B], "policies", "salt", "classLabels")
//
// /**
// * @param factory ModelFactory[Model[_, _] ]
// * @tparam A model input type
// * @tparam B model input type
// * @return
// */
// def modelJsonReader[A, B](factory: ModelFactory, semantics: Option[Semantics[A]])
// (implicit jr: JsonReader[B], sc: ScoreConverter[B]) = new JsonReader[BootstrapModel[A, B]] {
// def read(json: JsValue): BootstrapModel[A, B] = {
// import com.eharmony.aloha.factory.ScalaJsonFormats.lift
//
// val mId = getModelId(json).get
// val ast = json.convertTo[Ast[B]](astJsonFormat(lift(jr), sc))
//
// val model = ast.createModel[A, B](factory, semantics.get, mId)
//
// model
// }
// }
// }
}
| eHarmony/aloha | aloha-core/src/main/scala/com/eharmony/aloha/models/exploration/BootstrapModel.scala | Scala | mit | 8,147 |
package net.walend.disentangle.examples
import org.scalatest.{Matchers, FlatSpec}
import net.walend.disentangle.graph.IndexedLabelDigraph
import net.walend.disentangle.graph.mutable.MatrixLabelDigraph
import net.walend.disentangle.graph.semiring.{FirstStepsTrait, FloydWarshall}
import net.walend.disentangle.graph.SomeGraph._
/**
*
*
* @author dwalend
* @since v0.2.0
*/
class FloydWarshallExampleTest extends FlatSpec with Matchers {
val support = FloydWarshall.defaultSupport[String]
"The Floyd-Warshall example" should "produce expected results" in {
val expectedShortPathGraph = MatrixLabelDigraph(
edges = Vector((A,A,Some(support.FirstSteps(0,Set()))),
(A,B,Some(support.FirstSteps(1,Set(B)))),
(A,C,Some(support.FirstSteps(2,Set(B)))),
(A,D,Some(support.FirstSteps(3,Set(B)))),
(A,E,Some(support.FirstSteps(4,Set(B)))),
(A,H,Some(support.FirstSteps(5,Set(B)))),
(A,F,Some(support.FirstSteps(5,Set(B)))),
(B,B,Some(support.FirstSteps(0,Set()))),
(B,C,Some(support.FirstSteps(1,Set(C)))),
(B,D,Some(support.FirstSteps(2,Set(C)))),
(B,E,Some(support.FirstSteps(3,Set(C)))),
(B,H,Some(support.FirstSteps(4,Set(C)))),
(B,F,Some(support.FirstSteps(4,Set(C)))),
(C,B,Some(support.FirstSteps(3,Set(D)))),
(C,C,Some(support.FirstSteps(0,Set()))),
(C,D,Some(support.FirstSteps(1,Set(D)))),
(C,E,Some(support.FirstSteps(2,Set(D)))),
(C,H,Some(support.FirstSteps(3,Set(D)))),
(C,F,Some(support.FirstSteps(3,Set(D)))),
(D,B,Some(support.FirstSteps(2,Set(E)))),
(D,C,Some(support.FirstSteps(3,Set(E)))),
(D,D,Some(support.FirstSteps(0,Set()))),
(D,E,Some(support.FirstSteps(1,Set(E)))),
(D,H,Some(support.FirstSteps(2,Set(E)))),
(D,F,Some(support.FirstSteps(2,Set(E)))),
(E,B,Some(support.FirstSteps(1,Set(B)))),
(E,C,Some(support.FirstSteps(2,Set(B, H)))),
(E,D,Some(support.FirstSteps(3,Set(B, H)))),
(E,E,Some(support.FirstSteps(0,Set()))),
(E,H,Some(support.FirstSteps(1,Set(H)))),
(E,F,Some(support.FirstSteps(1,Set(F)))),
(H,B,Some(support.FirstSteps(4,Set(C)))),
(H,C,Some(support.FirstSteps(1,Set(C)))),
(H,D,Some(support.FirstSteps(2,Set(C)))),
(H,E,Some(support.FirstSteps(3,Set(C)))),
(H,H,Some(support.FirstSteps(0,Set()))),
(H,F,Some(support.FirstSteps(4,Set(C)))),
(F,F,Some(support.FirstSteps(0,Set())))
),
nodes = Seq(A, B, C, D, E, H, F),
noEdgeExistsValue = None
)
val shortPathGraph: IndexedLabelDigraph[String, Option[FirstStepsTrait[String, Int]]] = FloydWarshallExample.simpleShortPathGraph
shortPathGraph should be (expectedShortPathGraph)
val expectedSubgraphEdges = Set(
(shortPathGraph.innerNode(C).get,shortPathGraph.innerNode(D).get,Some(support.FirstSteps(1,Set(D)))),
(shortPathGraph.innerNode(E).get,shortPathGraph.innerNode(H).get,Some(support.FirstSteps(3,Set(B, H)))),
(shortPathGraph.innerNode(E).get,shortPathGraph.innerNode(B).get,Some(support.FirstSteps(3,Set(B, H)))),
(shortPathGraph.innerNode(B).get,shortPathGraph.innerNode(C).get,Some(support.FirstSteps(2,Set(C)))),
(shortPathGraph.innerNode(H).get,shortPathGraph.innerNode(C).get,Some(support.FirstSteps(2,Set(C))))
)
val subgraphEdges = FloydWarshallExample.subgraph
subgraphEdges should be (expectedSubgraphEdges)
val expectedPaths = Vector(
List(shortPathGraph.innerNode(E).get, shortPathGraph.innerNode(B).get, shortPathGraph.innerNode(C).get, shortPathGraph.innerNode(D).get),
List(shortPathGraph.innerNode(E).get, shortPathGraph.innerNode(H).get, shortPathGraph.innerNode(C).get, shortPathGraph.innerNode(D).get)
)
val paths = FloydWarshallExample.paths
paths should be (expectedPaths)
}
}
| dwalend/ScalaGraphMinimizer | examples/src/test/scala/net/walend/disentangle/examples/FloydWarshallExampleTest.scala | Scala | mit | 4,449 |
/*
* ******************************************************************************
* Copyright 2012-2013 SpotRight
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* ******************************************************************************
*/
package com.spotright.polidoro
package model
import scalaz._
import scalaz.Lens._
/**
* Cassandra Column
*/
case class Column[K: Manifest, N: Manifest, V: Manifest](colpath: ColumnPath[K,N], colval: V, ttl: Option[Int] = None)
extends Columnish[K,N,V] {
require(colval != null, "colval is null")
def !(ttl: Int): Column[K,N,V] = this.copy(ttl = Option(ttl))
val keyspace = colpath.rowpath.colfam.keyspace
val cfname = colpath.rowpath.colfam.cfname
val rowkey = colpath.rowpath.rowkey
val colname = colpath.colname
}
object Column {
def colpathL[K: Manifest,N: Manifest,V: Manifest]: Lens[Column[K,N,V],ColumnPath[K,N]] =
lensg(c => cp => c.copy(colpath = cp), _.colpath)
def keyspaceL[K: Manifest,N: Manifest,V: Manifest] = ColumnPath.keyspaceL[K,N] <=< colpathL[K,N,V]
def cfnameL[K: Manifest,N: Manifest,V: Manifest] = ColumnPath.cfnameL[K,N] <=< colpathL[K,N,V]
def rowkey[K: Manifest,N: Manifest,V: Manifest] = ColumnPath.rowkeyL[K,N] <=< colpathL[K,N,V]
def colnameL[K: Manifest,N: Manifest,V: Manifest] = ColumnPath.colnameL[K,N] <=< colpathL[K,N,V]
def colval[K: Manifest,N: Manifest,V: Manifest]: Lens[Column[K,N,V],V] =
lensg(c => v => c.copy(colval = v), _.colval)
}
| SpotRight/Polidoro | src/main/scala/com/spotright/polidoro/model/Column.scala | Scala | apache-2.0 | 2,013 |
// Jubatus: Online machine learning framework for distributed environment
// Copyright (C) 2014-2015 Preferred Networks and Nippon Telegraph and Telephone Corporation.
//
// This library is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
// License version 2.1 as published by the Free Software Foundation.
//
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
// License along with this library; if not, write to the Free Software
// Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
package us.jubat.yarn.test
import java.util
import java.util.{Random, Collections}
import us.jubat.classifier.{EstimateResult, ClassifierClient, LabeledDatum}
import us.jubat.common.{ClientBase, Datum}
import us.jubat.yarn.common.{Location, LearningMachineType}
import us.jubat.yarn.client.{Resource, JubatusYarnApplication}
import java.net.InetAddress
import org.apache.hadoop.fs.Path
import scala.util.{Success, Failure}
import scala.concurrent.Await
import scala.concurrent.duration.Duration
import scala.concurrent.ExecutionContext.Implicits.global
import scala.collection.JavaConverters._
// priority
object Test14 extends App {
def testcase(priority: Int): Unit = {
println("アプリケーションを起動します")
val tApplicationFuture = JubatusYarnApplication.start(
"shogun",
LearningMachineType.Classifier,
List(Location(InetAddress.getLocalHost, 2181)),
new Path(s"hdfs:///jubatus-on-yarn/sample/shogun.json"),
Resource(priority = 0, memory = 512, virtualCores = 1),
1
).andThen {
case Failure(e) =>
println(e.getMessage)
e.printStackTrace()
case Success(tApplication) =>
println(
"アプリケーションが起動しました\\n"
+ s"\\t${tApplication.jubatusProxy}\\n"
+ s"\\t${tApplication.jubatusServers}"
)
println("アプリケーションの状態を取得します")
val tStatus = tApplication.status
println(s"\\t${tStatus.jubatusProxy}")
println(s"\\t${tStatus.jubatusServers}")
println(s"\\t${tStatus.yarnApplication}")
// train
val tClient = new ClassifierClient(tApplication.jubatusProxy.hostAddress, tApplication.jubatusProxy.port, "shogun", 10)
trainHugeData(tClient)
println("モデルデータを保存します")
tApplication.saveModel(new Path("hdfs:///tmp/"), "test").get
Thread.sleep(1000)
println("モデルデータを読み込みます")
tApplication.loadModel(new Path("hdfs:///tmp/"), "test").get
println("アプリケーションを停止します")
Await.ready(
tApplication.stop().andThen {
case Failure(e) =>
println(e.getMessage)
e.printStackTrace()
case Success(_) =>
},
Duration.Inf
)
println("アプリケーションを停止しました")
}
Await.ready(tApplicationFuture, Duration.Inf)
}
private def trainHugeData(aClient: ClassifierClient): Unit = {
for(i <- 0 to 10000000) {
train(aClient, i)
if(i % 1000 == 0) println("train " + i)
}
}
private def train(aClient: ClassifierClient, aSeq: Int): Unit = {
def makeDatum(name: String): Datum = new Datum().addString("name", name)
def makeTrain(tag: String, name: String): LabeledDatum = new LabeledDatum(tag, makeDatum(name + aSeq))
val trainData = scala.collection.mutable.ArrayBuffer(
makeTrain("徳川", "家康"), makeTrain("徳川", "秀忠"), makeTrain("徳川", "家光"), makeTrain("徳川", "家綱"),
makeTrain("徳川", "綱吉"), makeTrain("徳川", "家宣"), makeTrain("徳川", "家継"), makeTrain("徳川", "吉宗"),
makeTrain("徳川", "家重"), makeTrain("徳川", "家治"), makeTrain("徳川", "家斉"), makeTrain("徳川", "家慶"),
makeTrain("徳川", "家定"), makeTrain("徳川", "家茂"), //makeTrain("徳川", "慶喜"),
makeTrain("足利", "尊氏"), makeTrain("足利", "義詮"), makeTrain("足利", "義満"), makeTrain("足利", "義持"),
makeTrain("足利", "義量"), makeTrain("足利", "義教"), makeTrain("足利", "義勝"), makeTrain("足利", "義政"),
makeTrain("足利", "義尚"), makeTrain("足利", "義稙"), makeTrain("足利", "義澄"), makeTrain("足利", "義稙"),
makeTrain("足利", "義晴"), makeTrain("足利", "義輝"), makeTrain("足利", "義栄"), //makeTrain("足利", "義昭"),
makeTrain("北条", "時政"), makeTrain("北条", "義時"), makeTrain("北条", "泰時"), makeTrain("北条", "経時"),
makeTrain("北条", "時頼"), makeTrain("北条", "長時"), makeTrain("北条", "政村"), makeTrain("北条", "時宗"),
makeTrain("北条", "貞時"), makeTrain("北条", "師時"), makeTrain("北条", "宗宣"), makeTrain("北条", "煕時"),
makeTrain("北条", "基時"), makeTrain("北条", "高時"), makeTrain("北条", "貞顕") //, makeTrain("北条", "守時")
)
val t = trainData.asJava
Collections.shuffle(t, new Random(0))
// run train
aClient.train(t)
}
private def predict(aClient: ClassifierClient): Unit = {
def makeDatum(name: String): Datum = new Datum().addString("name", name)
def findBestResult(res: List[EstimateResult]): Option[EstimateResult] = {
res match {
case List() => None
case _ => Some(res.maxBy(_.score))
}
}
// predict the last shogun
val data = Array[Datum](makeDatum("慶喜"), makeDatum("義昭"), makeDatum("守時"))
for (datum <- data) {
val res = aClient.classify(util.Arrays.asList(datum))
// get the predicted shogun name
println(findBestResult(res.get(0).asScala.toList).get.label + datum.stringValues.get(0).value)
}
}
// nice コマンドにわたしてるだけっぽくて、yarn 的最大/最小値不明
println("==========================================================")
println("No3")
testcase(0)
println("プログラムを終了します")
System.exit(0)
}
| jubatus/jubatus-on-yarn | jubatusonyarn/jubatus-on-yarn-test/src/main/scala/us/jubat/yarn/test/Test14.scala | Scala | lgpl-2.1 | 6,441 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.serving.utils
import org.apache.logging.log4j.LogManager
import redis.clients.jedis.exceptions.JedisConnectionException
import redis.clients.jedis.{Jedis, JedisPool, Pipeline, StreamEntryID}
import scala.collection.JavaConverters._
object RedisUtils {
val logger = LogManager.getLogger(getClass)
def createRedisGroupIfNotExist(jedis: Jedis, streamName: String): Unit = {
try {
jedis.xgroupCreate(streamName,
"serving", new StreamEntryID(0, 0), true)
} catch {
case e: Exception =>
logger.info(s"xgroupCreate raise [$e], " +
s"will not create new group.")
}
}
def checkMemory(db: Jedis, inputThreshold: Double, cutRatio: Double): Unit = {
var redisInfo = RedisUtils.getMapFromInfo(db.info())
if (redisInfo("used_memory").toLong >=
redisInfo("maxmemory").toLong * inputThreshold) {
this.synchronized {
redisInfo = RedisUtils.getMapFromInfo(db.info())
if (redisInfo("maxmemory").toLong > 0 && redisInfo("used_memory").toLong >=
redisInfo("maxmemory").toLong * inputThreshold) {
logger.warn(s"Used memory ${redisInfo("used_memory")}, " +
s"Max memory ${redisInfo("maxmemory")}. Your input data length is " +
s"${db.xlen(Conventions.SERVING_STREAM_DEFAULT_NAME)}. Removing old data...")
db.xtrim(Conventions.SERVING_STREAM_DEFAULT_NAME,
(db.xlen(Conventions.SERVING_STREAM_DEFAULT_NAME) * cutRatio).toLong, true)
logger.warn(s"Trimmed stream, now your serving stream length is " +
s"${db.xlen(Conventions.SERVING_STREAM_DEFAULT_NAME)}")
var cuttedRedisInfo = RedisUtils.getMapFromInfo(db.info())
while (cuttedRedisInfo("used_memory").toLong >=
cuttedRedisInfo("maxmemory").toLong * inputThreshold) {
logger.error(s"Used memory ${redisInfo("used_memory")}, " +
s"Max memory ${redisInfo("maxmemory")}. " +
s"Your result field has exceeded the limit, please dequeue. Will retry in 10 sec..")
cuttedRedisInfo = RedisUtils.getMapFromInfo(db.info())
Thread.sleep(10000)
}
}
}
}
}
def getMapFromInfo(info: String): Map[String, String] = {
var infoMap = Map[String, String]()
val tabs = info.split("#")
for (tab <- tabs) {
if (tab.length > 0) {
val keys = tab.split("\\r\\n")
for (key <- keys) {
if (key.split(":").size == 2) {
infoMap += (key.split(":").head ->
key.split(":").last)
}
}
}
}
return infoMap
}
def getRedisClient(redisPool: JedisPool): Jedis = {
var jedis: Jedis = null
var cnt: Int = 0
while (jedis == null) {
try {
jedis = redisPool.getResource
}
catch {
case e: JedisConnectionException =>
logger.info(
s"Redis client can not connect, maybe max number of clients is reached." +
"Waiting, if you always receive this, please stop your service and report bug.")
e.printStackTrace()
cnt += 1
if (cnt >= 10) {
throw new Error("can not get redis from the pool")
}
Thread.sleep(500)
}
Thread.sleep(10)
}
jedis
}
def writeHashMap(ppl: Pipeline, key: String, value: String, name: String): Unit = {
val hKey = Conventions.RESULT_PREFIX + name + ":" + key
val hValue = Map[String, String]("value" -> value).asJava
ppl.hmset(hKey, hValue)
}
def writeXstream(ppl: Pipeline, key: String, value: String, name: String): Unit = {
val streamKey = Conventions.RESULT_PREFIX + name + ":" + key
val streamValue = Map[String, String]("value" -> value).asJava
ppl.xadd(streamKey, StreamEntryID.NEW_ENTRY, streamValue)
}
}
| intel-analytics/BigDL | scala/serving/src/main/scala/com/intel/analytics/bigdl/serving/utils/RedisUtils.scala | Scala | apache-2.0 | 4,460 |
package org.elasticsearch.spark.sql.streaming
import java.lang.Boolean.{FALSE, TRUE}
import java.{lang => jl}
import java.{util => ju}
import org.apache.spark.sql.SparkSession
import org.junit.Assert
import org.junit.Test
import org.junit.runner.RunWith
import org.junit.runners.Parameterized
import org.junit.runners.Parameterized.Parameters
import org.mockito.Mockito
object StructuredStreamingVersionLockTest {
@Parameters
def testParams(): ju.Collection[Array[jl.Object]] = {
val list = new ju.ArrayList[Array[jl.Object]]()
list.add(Array("1.6.3", FALSE))
list.add(Array("2.0.0", FALSE))
list.add(Array("2.0.3", FALSE))
list.add(Array("2.1.0", FALSE))
list.add(Array("2.1.3", FALSE))
list.add(Array("2.2.0", TRUE))
list.add(Array("2.2.1", TRUE))
list.add(Array("2.2.3", TRUE))
list.add(Array("2.3.0", TRUE))
list.add(Array("2.3.1", TRUE))
list.add(Array("2.3.3", TRUE))
list.add(Array("2.5.0", TRUE))
list.add(Array("2.5.1", TRUE))
list.add(Array("3.0.0", TRUE))
list
}
}
@RunWith(classOf[Parameterized])
class StructuredStreamingVersionLockTest(version: String, expectsPass: Boolean) {
@Test
@throws[Exception]
def checkCompatibility(): Unit = {
val mockSession = Mockito.mock(classOf[SparkSession])
Mockito.when(mockSession.version).thenReturn(version)
var exception: Option[Exception] = None
try {
StructuredStreamingVersionLock.checkCompatibility(mockSession)
} catch {
case e: Exception => exception = Some(e)
}
(expectsPass, exception) match {
case (true, Some(e)) => throw e
case (false, None) => Assert.fail(s"Expected failure but didn't fail [$version]")
case _ => // We good
}
}
} | wangcy6/storm_app | Elasticsearch/elasticsearch-hadoop-master/spark/sql-20/src/test/scala/org/elasticsearch/spark/sql/streaming/StructuredStreamingVersionLockTest.scala | Scala | apache-2.0 | 1,747 |
package org.orbroker
import adapt._
import exception._
import java.sql.ResultSet
/**
* Query extractor. This is a unifying type for
* [[org.orbroker.RowExtractor]] and [[org.orbroker.JoinExtractor]]. This
* should not be implemented directly.
* @author Nils Kilden-Pedersen
*/
sealed trait QueryExtractor[T] {
private[orbroker] def mapResultSet[R](rs: ResultSet, receiver: Iterator[T] => R, adapter: BrokerAdapter): R
}
/**
* Interface for extracting user defined object
* from a single row.
* <p>Implement this row extractor if a query is a simple
* non-JOIN query and the type extracted will not be
* extracted as part of a JOIN from another query.
* @see JoinExtractor
* @author Nils Kilden-Pedersen
*/
trait RowExtractor[T] extends QueryExtractor[T] {
def extract(row: Row): T
private[orbroker] override def mapResultSet[R](rs: ResultSet, receiver: Iterator[T] => R, adapter: BrokerAdapter): R = {
receiver(new RowIterator(rs, adapter, extract))
}
}
trait OutParmExtractor[T] extends QueryExtractor[T] {
def extract(out: OutParms): T
private[orbroker] override final def mapResultSet[R](rs: ResultSet, receiver: Iterator[T] => R, adapter: BrokerAdapter): R =
throw new IllegalArgumentException("Cannot extract ResultSet using " + getClass)
}
/**
* Interface for extracting user defined object
* from a group of rows.
* Implement this join extractor if a query is a
* JOIN query <em>or</em> if this type needs to
* be extracted from another JOIN query.
* <p>NOTICE: Extraction should be done in
* the following sequence:
* <ol>
* <li>[[org.orbroker.Row]]</li>
* <li>[[org.orbroker.Join.extractOne]]</li>
* <li>[[org.orbroker.Join.extractGroup]] (or [[org.orbroker.Join.extractSeq]])</li>
* </ol>
* @see RowExtractor
* @author Nils Kilden-Pedersen
*/
trait JoinExtractor[T] extends QueryExtractor[T] {
/**
* The set of columns that uniquely distinguishes
* this object in a result set, typically the columns
* that compose the primary key. The query should be
* ordered by those columns.
*/
def key: Set[String]
def extract(row: Row, join: Join): T
private[orbroker] override final def mapResultSet[R](rs: ResultSet, receiver: Iterator[T] => R, adapter: BrokerAdapter): R = {
receiver(new JoinIterable(key, rs, adapter, extract).iterator)
}
}
private[orbroker] final class DefaultExtractor(id: Symbol) extends RowExtractor[Any] {
def extract(row: Row): Any = try {
row.columns.size match {
case 1 => row("1").opt[Any].orNull
case 2 => (row("1").opt[Any].orNull, row("2").opt[Any].orNull)
case 3 => (row("1").opt[Any].orNull, row("2").opt[Any].orNull, row("3").opt[Any].orNull)
case 4 => (row("1").opt[Any].orNull, row("2").opt[Any].orNull, row("3").opt[Any].orNull, row("4").opt[Any].orNull)
case 5 => (row("1").opt[Any].orNull, row("2").opt[Any].orNull, row("3").opt[Any].orNull, row("4").opt[Any].orNull, row("5").opt[Any].orNull)
case x => throw new ConfigurationException(s"$x columns available for '$id', and no RowExtractor registered")
}
} catch {
case e: NoSuchElementException => throw new ConfigurationException(s"Statement '$id' contains NULL values. Must register a RowExtractor")
}
}
private[orbroker] final class SafeJoinExtractor[T](val delegate: JoinExtractor[T]) extends JoinExtractor[T] {
require(!delegate.key.isEmpty, "No columns defined for key")
val key = delegate.key.map(_.toUpperCase)
def extract(row: Row, join: Join): T = delegate.extract(row, join)
override def equals(any: Any) = this.delegate eq any.asInstanceOf[SafeJoinExtractor[T]].delegate
override def hashCode = this.delegate.hashCode
}
| nilskp/orbroker | src/main/scala/org/orbroker/QueryExtractor.scala | Scala | mit | 3,679 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.api.scala
import org.apache.flink.annotation.PublicEvolving
import org.apache.flink.api.common.JobExecutionResult
import org.apache.flink.api.common.typeinfo.TypeInformation
import org.apache.flink.streaming.api.scala.{DataStream, StreamExecutionEnvironment}
import org.apache.flink.table.api.scala.internal.StreamTableEnvironmentImpl
import org.apache.flink.table.api.{TableEnvironment, _}
import org.apache.flink.table.descriptors.{ConnectorDescriptor, StreamTableDescriptor}
import org.apache.flink.table.expressions.Expression
import org.apache.flink.table.functions.{AggregateFunction, TableAggregateFunction, TableFunction}
import org.apache.flink.table.sinks.TableSink
/**
* This table environment is the entry point and central context for creating Table and SQL
* API programs that integrate with the Scala-specific [[DataStream]] API.
*
* It is unified for bounded and unbounded data processing.
*
* A stream table environment is responsible for:
*
* - Convert a [[DataStream]] into [[Table]] and vice-versa.
* - Connecting to external systems.
* - Registering and retrieving [[Table]]s and other meta objects from a catalog.
* - Executing SQL statements.
* - Offering further configuration options.
*
* Note: If you don't intend to use the [[DataStream]] API, [[TableEnvironment]] is meant for pure
* table programs.
*/
@PublicEvolving
trait StreamTableEnvironment extends TableEnvironment {
/**
* Registers a [[TableFunction]] under a unique name in the TableEnvironment's catalog.
* Registered functions can be referenced in SQL queries.
*
* @param name The name under which the function is registered.
* @param tf The TableFunction to register
*/
def registerFunction[T: TypeInformation](name: String, tf: TableFunction[T]): Unit
/**
* Registers an [[AggregateFunction]] under a unique name in the TableEnvironment's catalog.
* Registered functions can be referenced in Table API and SQL queries.
*
* @param name The name under which the function is registered.
* @param f The AggregateFunction to register.
* @tparam T The type of the output value.
* @tparam ACC The type of aggregate accumulator.
*/
def registerFunction[T: TypeInformation, ACC: TypeInformation](
name: String,
f: AggregateFunction[T, ACC]): Unit
/**
* Registers an [[TableAggregateFunction]] under a unique name in the TableEnvironment's catalog.
* Registered functions can only be referenced in Table API.
*
* @param name The name under which the function is registered.
* @param f The TableAggregateFunction to register.
* @tparam T The type of the output value.
* @tparam ACC The type of aggregate accumulator.
*/
def registerFunction[T: TypeInformation, ACC: TypeInformation](
name: String,
f: TableAggregateFunction[T, ACC]): Unit
/**
* Converts the given [[DataStream]] into a [[Table]].
*
* The field names of the [[Table]] are automatically derived from the type of the
* [[DataStream]].
*
* @param dataStream The [[DataStream]] to be converted.
* @tparam T The type of the [[DataStream]].
* @return The converted [[Table]].
*/
def fromDataStream[T](dataStream: DataStream[T]): Table
/**
* Converts the given [[DataStream]] into a [[Table]] with specified field names.
*
* Example:
*
* {{{
* val stream: DataStream[(String, Long)] = ...
* val tab: Table = tableEnv.fromDataStream(stream, 'a, 'b)
* }}}
*
* @param dataStream The [[DataStream]] to be converted.
* @param fields The field names of the resulting [[Table]].
* @tparam T The type of the [[DataStream]].
* @return The converted [[Table]].
*/
def fromDataStream[T](dataStream: DataStream[T], fields: Expression*): Table
/**
* Creates a view from the given [[DataStream]].
* Registered views can be referenced in SQL queries.
*
* The field names of the [[Table]] are automatically derived
* from the type of the [[DataStream]].
*
* The view is registered in the namespace of the current catalog and database. To register the
* view in a different catalog use [[createTemporaryView]].
*
* Temporary objects can shadow permanent ones. If a permanent object in a given path exists,
* it will be inaccessible in the current session. To make the permanent object available again
* you can drop the corresponding temporary object.
*
* @param name The name under which the [[DataStream]] is registered in the catalog.
* @param dataStream The [[DataStream]] to register.
* @tparam T The type of the [[DataStream]] to register.
* @deprecated use [[createTemporaryView]]
*/
@deprecated
def registerDataStream[T](name: String, dataStream: DataStream[T]): Unit
/**
* Creates a view from the given [[DataStream]] in a given path.
* Registered tables can be referenced in SQL queries.
*
* The field names of the [[Table]] are automatically derived
* from the type of the [[DataStream]].
*
* Temporary objects can shadow permanent ones. If a permanent object in a given path exists,
* it will be inaccessible in the current session. To make the permanent object available again
* you can drop the corresponding temporary object.
*
* @param path The path under which the [[DataStream]] is created.
* See also the [[TableEnvironment]] class description for the format of the path.
* @param dataStream The [[DataStream]] out of which to create the view.
* @tparam T The type of the [[DataStream]].
*/
def createTemporaryView[T](path: String, dataStream: DataStream[T]): Unit
/**
* Creates a view from the given [[DataStream]] in a given path with specified field names.
* Registered views can be referenced in SQL queries.
*
* Example:
*
* {{{
* val stream: DataStream[(String, Long)] = ...
* tableEnv.registerDataStream("myTable", stream, 'a, 'b)
* }}}
*
* The view is registered in the namespace of the current catalog and database. To register the
* view in a different catalog use [[createTemporaryView]].
*
* Temporary objects can shadow permanent ones. If a permanent object in a given path exists,
* it will be inaccessible in the current session. To make the permanent object available again
* you can drop the corresponding temporary object.
*
* @param name The name under which the [[DataStream]] is registered in the catalog.
* @param dataStream The [[DataStream]] to register.
* @param fields The field names of the registered view.
* @tparam T The type of the [[DataStream]] to register.
* @deprecated use [[createTemporaryView]]
*/
@deprecated
def registerDataStream[T](name: String, dataStream: DataStream[T], fields: Expression*): Unit
/**
* Creates a view from the given [[DataStream]] in a given path with specified field names.
* Registered views can be referenced in SQL queries.
*
* Example:
*
* {{{
* val stream: DataStream[(String, Long)] = ...
* tableEnv.createTemporaryView("cat.db.myTable", stream, 'a, 'b)
* }}}
*
* Temporary objects can shadow permanent ones. If a permanent object in a given path exists,
* it will be inaccessible in the current session. To make the permanent object available again
* you can drop the corresponding temporary object.
*
* @param path The path under which the [[DataStream]] is created.
* See also the [[TableEnvironment]] class description for the format of the path.
* @param dataStream The [[DataStream]] out of which to create the view.
* @param fields The field names of the created view.
* @tparam T The type of the [[DataStream]].
*/
def createTemporaryView[T](path: String, dataStream: DataStream[T], fields: Expression*): Unit
/**
* Converts the given [[Table]] into an append [[DataStream]] of a specified type.
*
* The [[Table]] must only have insert (append) changes. If the [[Table]] is also modified
* by update or delete changes, the conversion will fail.
*
* The fields of the [[Table]] are mapped to [[DataStream]] fields as follows:
* - [[org.apache.flink.types.Row]] and Scala Tuple types: Fields are mapped by position, field
* types must match.
* - POJO [[DataStream]] types: Fields are mapped by field name, field types must match.
*
* @param table The [[Table]] to convert.
* @tparam T The type of the resulting [[DataStream]].
* @return The converted [[DataStream]].
*/
def toAppendStream[T: TypeInformation](table: Table): DataStream[T]
/**
* Converts the given [[Table]] into an append [[DataStream]] of a specified type.
*
* The [[Table]] must only have insert (append) changes. If the [[Table]] is also modified
* by update or delete changes, the conversion will fail.
*
* The fields of the [[Table]] are mapped to [[DataStream]] fields as follows:
* - [[org.apache.flink.types.Row]] and Scala Tuple types: Fields are mapped by position, field
* types must match.
* - POJO [[DataStream]] types: Fields are mapped by field name, field types must match.
*
* @param table The [[Table]] to convert.
* @param queryConfig The configuration of the query to generate.
* @tparam T The type of the resulting [[DataStream]].
* @return The converted [[DataStream]].
*/
def toAppendStream[T: TypeInformation](
table: Table,
queryConfig: StreamQueryConfig): DataStream[T]
/**
* Converts the given [[Table]] into a [[DataStream]] of add and retract messages.
* The message will be encoded as [[Tuple2]]. The first field is a [[Boolean]] flag,
* the second field holds the record of the specified type [[T]].
*
* A true [[Boolean]] flag indicates an add message, a false flag indicates a retract message.
*
* @param table The [[Table]] to convert.
* @tparam T The type of the requested data type.
* @return The converted [[DataStream]].
*/
def toRetractStream[T: TypeInformation](table: Table): DataStream[(Boolean, T)]
/**
* Converts the given [[Table]] into a [[DataStream]] of add and retract messages.
* The message will be encoded as [[Tuple2]]. The first field is a [[Boolean]] flag,
* the second field holds the record of the specified type [[T]].
*
* A true [[Boolean]] flag indicates an add message, a false flag indicates a retract message.
*
* @param table The [[Table]] to convert.
* @param queryConfig The configuration of the query to generate.
* @tparam T The type of the requested data type.
* @return The converted [[DataStream]].
*/
def toRetractStream[T: TypeInformation](
table: Table,
queryConfig: StreamQueryConfig): DataStream[(Boolean, T)]
/**
* Evaluates a SQL statement such as INSERT, UPDATE or DELETE; or a DDL statement;
* NOTE: Currently only SQL INSERT statements are supported.
*
* All tables referenced by the query must be registered in the TableEnvironment.
* A [[Table]] is automatically registered when its [[Table#toString()]] method is
* called, for example when it is embedded into a String.
* Hence, SQL queries can directly reference a [[Table]] as follows:
*
* {{{
* // register the configured table sink into which the result is inserted.
* tEnv.registerTableSink("sinkTable", configuredSink);
* Table sourceTable = ...
* String tableName = sourceTable.toString();
* // sourceTable is not registered to the table environment
* tEnv.sqlUpdate(s"INSERT INTO sinkTable SELECT * FROM tableName", config);
* }}}
*
* @param stmt The SQL statement to evaluate.
* @param config The [[QueryConfig]] to use.
*/
def sqlUpdate(stmt: String, config: StreamQueryConfig): Unit
/**
* Writes the [[Table]] to a [[TableSink]] that was registered under the specified name.
*
* See the documentation of TableEnvironment#useDatabase or
* TableEnvironment.useCatalog(String) for the rules on the path resolution.
*
* @param table The Table to write to the sink.
* @param queryConfig The [[StreamQueryConfig]] to use.
* @param sinkPath The first part of the path of the registered [[TableSink]] to
* which the [[Table]] is written. This is to ensure at least the name
* of the [[TableSink]] is provided.
* @param sinkPathContinued The remaining part of the path of the registered [[TableSink]] to
* which the [[Table]] is written.
* @deprecated use `TableEnvironment#insertInto(String, Table)`
*/
@deprecated
def insertInto(
table: Table,
queryConfig: StreamQueryConfig,
sinkPath: String,
sinkPathContinued: String*): Unit
/**
* Triggers the program execution. The environment will execute all parts of
* the program.
*
* The program execution will be logged and displayed with the provided name
*
* It calls the StreamExecutionEnvironment#execute on the underlying
* [[StreamExecutionEnvironment]]. In contrast to the [[TableEnvironment]] this
* environment translates queries eagerly.
*
* @param jobName Desired name of the job
* @return The result of the job execution, containing elapsed time and accumulators.
* @throws Exception which occurs during job execution.
*/
@throws[Exception]
override def execute(jobName: String): JobExecutionResult
/**
* Creates a table source and/or table sink from a descriptor.
*
* Descriptors allow for declaring the communication to external systems in an
* implementation-agnostic way. The classpath is scanned for suitable table factories that match
* the desired configuration.
*
* The following example shows how to read from a Kafka connector using a JSON format and
* registering a table source "MyTable" in append mode:
*
* {{{
*
* tableEnv
* .connect(
* new Kafka()
* .version("0.11")
* .topic("clicks")
* .property("zookeeper.connect", "localhost")
* .property("group.id", "click-group")
* .startFromEarliest())
* .withFormat(
* new Json()
* .jsonSchema("{...}")
* .failOnMissingField(false))
* .withSchema(
* new Schema()
* .field("user-name", "VARCHAR").from("u_name")
* .field("count", "DECIMAL")
* .field("proc-time", "TIMESTAMP").proctime())
* .inAppendMode()
* .createTemporaryTable("MyTable")
* }}}
*
* @param connectorDescriptor connector descriptor describing the external system
*/
override def connect(connectorDescriptor: ConnectorDescriptor): StreamTableDescriptor
}
object StreamTableEnvironment {
/**
* Creates a table environment that is the entry point and central context for creating Table and
* SQL API programs that integrate with the Scala-specific [[DataStream]] API.
*
* It is unified for bounded and unbounded data processing.
*
* A stream table environment is responsible for:
*
* - Convert a [[DataStream]] into [[Table]] and vice-versa.
* - Connecting to external systems.
* - Registering and retrieving [[Table]]s and other meta objects from a catalog.
* - Executing SQL statements.
* - Offering further configuration options.
*
* Note: If you don't intend to use the [[DataStream]] API, [[TableEnvironment]] is meant for
* pure table programs.
*
* @param executionEnvironment The Scala [[StreamExecutionEnvironment]] of the
* [[TableEnvironment]].
*/
def create(executionEnvironment: StreamExecutionEnvironment): StreamTableEnvironment = {
create(
executionEnvironment,
EnvironmentSettings.newInstance().build())
}
/**
* Creates a table environment that is the entry point and central context for creating Table and
* SQL API programs that integrate with the Scala-specific [[DataStream]] API.
*
* It is unified for bounded and unbounded data processing.
*
* A stream table environment is responsible for:
*
* - Convert a [[DataStream]] into [[Table]] and vice-versa.
* - Connecting to external systems.
* - Registering and retrieving [[Table]]s and other meta objects from a catalog.
* - Executing SQL statements.
* - Offering further configuration options.
*
* Note: If you don't intend to use the [[DataStream]] API, [[TableEnvironment]] is meant for
* pure table programs.
*
* @param executionEnvironment The Scala [[StreamExecutionEnvironment]] of the
* [[TableEnvironment]].
* @param settings The environment settings used to instantiate the [[TableEnvironment]].
*/
def create(
executionEnvironment: StreamExecutionEnvironment,
settings: EnvironmentSettings)
: StreamTableEnvironment = {
StreamTableEnvironmentImpl.create(executionEnvironment, settings, new TableConfig)
}
/**
* Creates a table environment that is the entry point and central context for creating Table and
* SQL API programs that integrate with the Scala-specific [[DataStream]] API.
*
* It is unified for bounded and unbounded data processing.
*
* A stream table environment is responsible for:
*
* - Convert a [[DataStream]] into [[Table]] and vice-versa.
* - Connecting to external systems.
* - Registering and retrieving [[Table]]s and other meta objects from a catalog.
* - Executing SQL statements.
* - Offering further configuration options.
*
* Note: If you don't intend to use the [[DataStream]] API, [[TableEnvironment]] is meant for
* pure table programs.
*
* @param executionEnvironment The Scala [[StreamExecutionEnvironment]] of the
* [[TableEnvironment]].
* @param tableConfig The configuration of the [[TableEnvironment]].
* @deprecated Use [[create(StreamExecutionEnvironment)]] and
* [[StreamTableEnvironment#getConfig()]] for manipulating the [[TableConfig]].
*/
@deprecated
def create(executionEnvironment: StreamExecutionEnvironment, tableConfig: TableConfig)
: StreamTableEnvironment = {
StreamTableEnvironmentImpl
.create(
executionEnvironment,
EnvironmentSettings.newInstance().build(),
tableConfig)
}
}
| gyfora/flink | flink-table/flink-table-api-scala-bridge/src/main/scala/org/apache/flink/table/api/scala/StreamTableEnvironment.scala | Scala | apache-2.0 | 19,502 |
/*
* Copyright (C) 2017 Vincibean <Andrea Bessi>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.vincibean.scala.impatient.chapter15.exercise7
import scala.annotation.tailrec
/**
* Give an example to show that the tail recursion optimization is not valid
* when a method can be overridden.
*/
class Tailrec {
@tailrec
final def tailrecSum(xs: Seq[Int], partial: BigInt): BigInt =
if (xs.isEmpty) partial else tailrecSum(xs.tail, xs.head + partial)
def sum(xs: Seq[Int], partial: BigInt): BigInt =
if (xs.isEmpty) partial else sum(xs.tail, xs.head + partial)
}
| Vincibean/ScalaForTheImpatient-Solutions | src/main/scala/org/vincibean/scala/impatient/chapter15/exercise7/Tailrec.scala | Scala | gpl-3.0 | 1,214 |
/*
* Copyright (c) 2014-2020 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.eval.internal
import java.util.concurrent.RejectedExecutionException
import cats.effect.{CancelToken, IO}
import monix.eval.Task.{Async, Context}
import monix.eval.{Coeval, Task}
import monix.execution.atomic.AtomicInt
import monix.execution.exceptions.CallbackCalledMultipleTimesException
import monix.execution.internal.Platform
import monix.execution.schedulers.{StartAsyncBatchRunnable, TrampolinedRunnable}
import monix.execution.{Callback, Cancelable, Scheduler, UncaughtExceptionReporter}
import scala.util.control.NonFatal
private[eval] object TaskCreate {
/**
* Implementation for `cats.effect.Concurrent#cancelable`.
*/
def cancelableEffect[A](k: (Either[Throwable, A] => Unit) => CancelToken[Task]): Task[A] =
cancelable0((_, cb) => k(cb))
/**
* Implementation for `Task.cancelable`
*/
def cancelable0[A](fn: (Scheduler, Callback[Throwable, A]) => CancelToken[Task]): Task[A] = {
val start = new Cancelable0Start[A, CancelToken[Task]](fn) {
def setConnection(ref: TaskConnectionRef, token: CancelToken[Task])(implicit s: Scheduler): Unit =
ref := token
}
Async(
start,
trampolineBefore = false,
trampolineAfter = false
)
}
/**
* Implementation for `Task.create`, used via `TaskBuilder`.
*/
def cancelableIO[A](start: (Scheduler, Callback[Throwable, A]) => CancelToken[IO]): Task[A] =
cancelable0((sc, cb) => Task.from(start(sc, cb)))
/**
* Implementation for `Task.create`, used via `TaskBuilder`.
*/
def cancelableCancelable[A](fn: (Scheduler, Callback[Throwable, A]) => Cancelable): Task[A] = {
val start = new Cancelable0Start[A, Cancelable](fn) {
def setConnection(ref: TaskConnectionRef, token: Cancelable)(implicit s: Scheduler): Unit =
ref := token
}
Async(start, trampolineBefore = false, trampolineAfter = false)
}
/**
* Implementation for `Task.create`, used via `TaskBuilder`.
*/
def cancelableCoeval[A](start: (Scheduler, Callback[Throwable, A]) => Coeval[Unit]): Task[A] =
cancelable0((sc, cb) => Task.from(start(sc, cb)))
/**
* Implementation for `Task.async0`
*/
def async0[A](fn: (Scheduler, Callback[Throwable, A]) => Any): Task[A] = {
val start = (ctx: Context, cb: Callback[Throwable, A]) => {
implicit val s = ctx.scheduler
val cbProtected = new CallbackForCreate(ctx, shouldPop = false, cb)
try {
fn(s, cbProtected)
()
} catch {
case e if NonFatal(e) =>
if (!cbProtected.tryOnError(e)) {
s.reportFailure(e)
}
}
}
Async(start, trampolineBefore = false, trampolineAfter = false)
}
/**
* Implementation for `cats.effect.Async#async`.
*
* It duplicates the implementation of `Task.async0` with the purpose
* of avoiding extraneous callback allocations.
*/
def async[A](k: Callback[Throwable, A] => Unit): Task[A] = {
val start = (ctx: Context, cb: Callback[Throwable, A]) => {
implicit val s = ctx.scheduler
val cbProtected = new CallbackForCreate(ctx, shouldPop = false, cb)
try {
k(cbProtected)
} catch {
case e if NonFatal(e) =>
if (!cbProtected.tryOnError(e)) {
s.reportFailure(e)
}
}
}
Async(start, trampolineBefore = false, trampolineAfter = false)
}
/**
* Implementation for `Task.asyncF`.
*/
def asyncF[A](k: Callback[Throwable, A] => Task[Unit]): Task[A] = {
val start = (ctx: Context, cb: Callback[Throwable, A]) => {
implicit val s = ctx.scheduler
// Creating new connection, because we can have a race condition
// between the bind continuation and executing the generated task
val ctx2 = Context(ctx.scheduler, ctx.options)
val conn = ctx.connection
conn.push(ctx2.connection.cancel)
val cbProtected = new CallbackForCreate(ctx, shouldPop = true, cb)
try {
// Provided callback takes care of `conn.pop()`
val task = k(cbProtected)
Task.unsafeStartNow(task, ctx2, new ForwardErrorCallback(cbProtected))
} catch {
case e if NonFatal(e) =>
if (!cbProtected.tryOnError(e)) {
s.reportFailure(e)
}
}
}
Async(start, trampolineBefore = false, trampolineAfter = false)
}
private abstract class Cancelable0Start[A, Token](fn: (Scheduler, Callback[Throwable, A]) => Token)
extends ((Context, Callback[Throwable, A]) => Unit) {
def setConnection(ref: TaskConnectionRef, token: Token)(implicit s: Scheduler): Unit
final def apply(ctx: Context, cb: Callback[Throwable, A]): Unit = {
implicit val s = ctx.scheduler
val conn = ctx.connection
val cancelable = TaskConnectionRef()
conn push cancelable.cancel
val cbProtected = new CallbackForCreate(ctx, shouldPop = true, cb)
try {
val ref = fn(s, cbProtected)
// Optimization to skip the assignment, as it's expensive
if (!ref.isInstanceOf[Cancelable.IsDummy])
setConnection(cancelable, ref)
} catch {
case e if NonFatal(e) =>
if (!cbProtected.tryOnError(e)) {
s.reportFailure(e)
}
}
}
}
private final class ForwardErrorCallback(cb: Callback[Throwable, _])(implicit r: UncaughtExceptionReporter)
extends Callback[Throwable, Unit] {
override def onSuccess(value: Unit): Unit = ()
override def onError(e: Throwable): Unit =
if (!cb.tryOnError(e)) {
r.reportFailure(e)
}
}
private final class CallbackForCreate[A](ctx: Context, threadId: Long, shouldPop: Boolean, cb: Callback[Throwable, A])
extends Callback[Throwable, A] with TrampolinedRunnable {
private[this] val state = AtomicInt(0)
private[this] var value: A = _
private[this] var error: Throwable = _
private[this] var isSameThread = false
def this(ctx: Context, shouldPop: Boolean, cb: Callback[Throwable, A]) =
this(ctx, Platform.currentThreadId(), shouldPop, cb)
override def onSuccess(value: A): Unit =
if (!tryOnSuccess(value)) {
throw new CallbackCalledMultipleTimesException("onSuccess")
}
override def tryOnSuccess(value: A): Boolean = {
if (state.compareAndSet(0, 1)) {
this.value = value
startExecution()
true
} else {
false
}
}
override def onError(e: Throwable): Unit =
if (!tryOnError(e)) {
throw new CallbackCalledMultipleTimesException("onError", e)
}
override def tryOnError(e: Throwable): Boolean = {
if (state.compareAndSet(0, 2)) {
this.error = e
startExecution()
true
} else {
false
}
}
private def startExecution(): Unit = {
// Cleanup of the current finalizer
if (shouldPop) ctx.connection.pop()
// Optimization — if the callback was called on the same thread
// where it was created, then we are not going to fork
// This is not safe to do when localContextPropagation enabled
isSameThread = Platform.currentThreadId() == threadId
try {
ctx.scheduler.execute(
if (isSameThread && !ctx.options.localContextPropagation)
this
else
StartAsyncBatchRunnable(this, ctx.scheduler)
)
} catch {
case e: RejectedExecutionException =>
forceErrorReport(e)
}
}
override def run(): Unit = {
if (!isSameThread) {
ctx.frameRef.reset()
}
state.get() match {
case 1 =>
val v = value
value = null.asInstanceOf[A]
cb.onSuccess(v)
case 2 =>
val e = error
error = null
cb.onError(e)
}
}
private def forceErrorReport(e: RejectedExecutionException): Unit = {
value = null.asInstanceOf[A]
if (error != null) {
val e = error
error = null
ctx.scheduler.reportFailure(e)
}
Callback.signalErrorTrampolined(cb, e)
}
}
}
| alexandru/monifu | monix-eval/shared/src/main/scala/monix/eval/internal/TaskCreate.scala | Scala | apache-2.0 | 8,776 |
package org.sameersingh.mf
import math._
import scala.collection.mutable
/**
* @author sameer
*/
trait Evaluator {
def predictValue: PredictValue
def evaluate(predTruths: Seq[(Double, Double)], prefix: String = ""): Seq[(String, Double)] = eval(predTruths.map(_._1), predTruths.map(_._2), prefix)
def eval(pred: Seq[Double], truth: Seq[Double], prefix: String): Seq[(String, Double)]
def eval(cells: Seq[Cell], prefix: String): Seq[(String, Double)] = eval(cells.map(c => predictValue.pred(c)), cells.map(_.value.double), prefix)
def evalTrain(m: ObservedMatrix) = eval(m.trainCells, "Train ")
def evalTest(m: ObservedMatrix) = eval(m.testCells, "Test ")
}
trait Error extends Evaluator {
// for stochastic estimation, the value for a cell
def value(c: Cell): Double = if (c.inMatrix == predictValue.target) {
value(predictValue.pred(c), c.value.double)
} else 0.0
def value(pred: Double, truth: Double): Double
def avgValue(cells: Seq[Cell]): Double = cells.foldLeft(0.0)(_ + value(_)) / cells.size.toDouble
def avgValue(pred: Seq[Double], truth: Seq[Double]): Double = pred.zip(truth).foldLeft(0.0)((s, p) => s + value(p._1, p._2)) / pred.size.toDouble
def name: String
def eval(pred: Seq[Double], truth: Seq[Double], prefix: String) = Seq(prefix + name -> avgValue(pred, truth))
}
trait L2 extends Error {
val name = "L2"
def value(pred: Double, truth: Double): Double = StrictMath.pow(truth - pred, 2.0)
}
trait Hamming extends Error {
val name = "Hamming"
def predictValue: PredictProb
def threshold = 0.5
def value(pred: Double, truth: Double): Double = {
val prob = pred
if ((prob > threshold && truth > 0.5) || (prob <= threshold && truth <= 0.5))
0.0
else
100.0
}
}
trait NLL extends Error {
val name = "NLL"
def predictValue: PredictProb
// log prob of c.value
def value(pred: Double, truth: Double): Double = {
val lprob = log(pred)
val liprob = log(1.0 - pred)
assert(truth >= 0.0 || truth <= 1.0)
-(truth * lprob + (1.0 - truth) * liprob) // negative log likelihood
}
}
trait PerCellF1 extends Evaluator {
def predictValue: PredictProb
def threshold = 0.5
def eval(pred: Double, truth: Double) = {
if (pred > threshold) {
if (truth > 0.5) tp += 1.0
else fp += 1.0
} else {
if (truth > 0.5) fn += 1.0
}
}
var tp: Double = 0.0
var fp: Double = 0.0
var fn: Double = 0.0
def precNumerator: Double = tp
def precDenominator: Double = tp + fp
def recallNumerator: Double = tp
def recallDenominator: Double = tp + fn
def precision: Double = {
if (precDenominator == 0.0) {
1.0
} else {
precNumerator / precDenominator
}
}
def recall: Double = {
if (recallDenominator == 0.0) {
1.0
} else {
recallNumerator / recallDenominator
}
}
def f1: Double = {
val r: Double = recall
val p: Double = precision
if (p + r == 0.0) 0.0
else (2 * p * r) / (p + r)
}
def reset = {
tp = 0.0
fp = 0.0
fn = 0.0
}
def eval(pred: Seq[Double], truth: Seq[Double], prefix: String): Seq[(String, Double)] = {
reset
for (p <- pred.zip(truth)) eval(p._1, p._2)
Seq("Prec" -> precision, "Recall" -> recall, "F1" -> f1).map(p => (prefix + p._1, p._2 * 100.0))
}
}
class Evaluators(val evals: Seq[Evaluator]) {
val dotValues = evals.groupBy(_.predictValue)
def eval(m: ObservedMatrix, additionalTestCells: Seq[Cell]): Map[String, Seq[(String, Double)]] = {
val result = new mutable.HashMap[String, Seq[(String, Double)]]
// prepare data
val combinedTest = m.testCells ++ additionalTestCells
// go through the dot values
for (dv <- dotValues.keys; if (dv.target == m)) {
val additionalCells = additionalTestCells.filter(_.inMatrix == m)
// predict values for each cell only once
val trainPreds = "Train " -> m.trainCells.map(c => (dv.pred(c), c.value.double))
val testPred = "Test " -> m.testCells.map(c => (dv.pred(c), c.value.double))
val additionalPreds = "Additional " -> additionalCells.map(c => (dv.pred(c), c.value.double))
val combinedPreds = "Combined " -> (testPred._2 ++ additionalPreds._2)
val evalPreds = Seq(trainPreds, testPred, additionalPreds, combinedPreds)
for (evalPred <- evalPreds) {
result(evalPred._1) = evals.map(e => e.evaluate(evalPred._2)).flatten
}
}
result.toMap
}
def string(evalResults: Map[String, Seq[(String, Double)]]): String = {
val sb = new StringBuffer
sb append ("%14s %s\\n" format("Data type", evalResults.head._2.map(_._1).map(s => "%13s" format (s)).mkString(" ")))
sb append ("%s %s\\n" format(("%14s" format("")).replace(' ', '-'), evalResults.head._2.map(_._1).map(s => "%13s" format ("")).map(_.replace(' ', '-')).mkString(" ")))
for ((data, results) <- evalResults) {
sb append ("%15s%s\\n" format(data, results.map(_._2).map(s => "%13.7f" format (s)).mkString(" ")))
}
sb append ("%s %s\\n" format(("%14s" format("")).replace(' ', '-'), evalResults.head._2.map(_._1).map(s => "%13s" format ("")).map(_.replace(' ', '-')).mkString(" ")))
sb.toString
}
def string(m: ObservedMatrix, additionalTestCells: Seq[Cell]): String = string(eval(m, additionalTestCells))
}
object Evaluators {
def apply(evals: Evaluator*) = new Evaluators(evals)
}
| sameersingh/mf | src/main/scala/org/sameersingh/mf/Evaluators.scala | Scala | apache-2.0 | 5,395 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive.execution
import java.sql.{Date, Timestamp}
import scala.collection.JavaConversions._
import org.apache.spark.sql._
import org.apache.spark.sql.catalyst.DefaultParserDialect
import org.apache.spark.sql.catalyst.analysis.{FunctionRegistry, EliminateSubQueries}
import org.apache.spark.sql.catalyst.errors.DialectException
import org.apache.spark.sql.execution.datasources.LogicalRelation
import org.apache.spark.sql.hive.test.TestHive
import org.apache.spark.sql.hive.test.TestHive._
import org.apache.spark.sql.hive.test.TestHive.implicits._
import org.apache.spark.sql.hive.{HiveContext, HiveQLDialect, MetastoreRelation}
import org.apache.spark.sql.execution.datasources.parquet.ParquetRelation
import org.apache.spark.sql.test.SQLTestUtils
import org.apache.spark.sql.types._
import org.apache.spark.unsafe.types.CalendarInterval
case class Nested1(f1: Nested2)
case class Nested2(f2: Nested3)
case class Nested3(f3: Int)
case class NestedArray2(b: Seq[Int])
case class NestedArray1(a: NestedArray2)
case class Order(
id: Int,
make: String,
`type`: String,
price: Int,
pdate: String,
customer: String,
city: String,
state: String,
month: Int)
case class WindowData(
month: Int,
area: String,
product: Int)
/**
* A SQL Dialect for testing purpose, and it can not be nested type
* 用于测试目的的SQL方言,不能嵌套类型
* */
class MyDialect extends DefaultParserDialect
/**
* A collection of hive query tests where we generate the answers ourselves instead of depending on
* Hive to generate them (in contrast to HiveQuerySuite). Often this is because the query is
* valid, but Hive currently cannot execute it.
* Hive查询测试的集合,我们自己生成答案,而不是依赖Hive来生成答案(与HiveQuerySuite相比),
* 通常这是因为查询是有效的,但Hive目前不能执行它。
*/
class SQLQuerySuite extends QueryTest with SQLTestUtils {
override def _sqlContext: SQLContext = TestHive
private val sqlContext = _sqlContext
test("UDTF") {
val jarPath = TestHive.getHiveFile("TestUDTF.jar").getCanonicalPath
// SPARK-11595 Fixes ADD JAR when input path contains URL scheme
//当输入路径包含URL方案时修复ADD JAR
val jarURL = s"file://$jarPath"
sql(s"ADD JAR $jarURL")
// The function source code can be found at:
//函数源代码可以在以下位置找到:
// https://cwiki.apache.org/confluence/display/Hive/DeveloperGuide+UDTF
sql(
"""
|CREATE TEMPORARY FUNCTION udtf_count2
|AS 'org.apache.spark.sql.hive.execution.GenericUDTFCount2'
""".stripMargin)
checkAnswer(
sql("SELECT key, cc FROM src LATERAL VIEW udtf_count2(value) dd AS cc"),
Row(97, 500) :: Row(97, 500) :: Nil)
checkAnswer(
sql("SELECT udtf_count2(a) FROM (SELECT 1 AS a FROM src LIMIT 3) t"),
Row(3) :: Row(3) :: Nil)
}
//udtf在侧面看
test("SPARK-6835: udtf in lateral view") {
val df = Seq((1, 1)).toDF("c1", "c2")
df.registerTempTable("table1")
val query = sql("SELECT c1, v FROM table1 LATERAL VIEW stack(3, 1, c1 + 1, c1 + 2) d AS v")
checkAnswer(query, Row(1, 1) :: Row(1, 2) :: Row(1, 3) :: Nil)
}
//自动转换parquet tables
test("SPARK-6851: Self-joined converted parquet tables") {
val orders = Seq(
Order(1, "Atlas", "MTB", 234, "2015-01-07", "John D", "Pacifica", "CA", 20151),
Order(3, "Swift", "MTB", 285, "2015-01-17", "John S", "Redwood City", "CA", 20151),
Order(4, "Atlas", "Hybrid", 303, "2015-01-23", "Jones S", "San Mateo", "CA", 20151),
Order(7, "Next", "MTB", 356, "2015-01-04", "Jane D", "Daly City", "CA", 20151),
Order(10, "Next", "YFlikr", 187, "2015-01-09", "John D", "Fremont", "CA", 20151),
Order(11, "Swift", "YFlikr", 187, "2015-01-23", "John D", "Hayward", "CA", 20151),
Order(2, "Next", "Hybrid", 324, "2015-02-03", "Jane D", "Daly City", "CA", 20152),
Order(5, "Next", "Street", 187, "2015-02-08", "John D", "Fremont", "CA", 20152),
Order(6, "Atlas", "Street", 154, "2015-02-09", "John D", "Pacifica", "CA", 20152),
Order(8, "Swift", "Hybrid", 485, "2015-02-19", "John S", "Redwood City", "CA", 20152),
Order(9, "Atlas", "Split", 303, "2015-02-28", "Jones S", "San Mateo", "CA", 20152))
val orderUpdates = Seq(
Order(1, "Atlas", "MTB", 434, "2015-01-07", "John D", "Pacifica", "CA", 20151),
Order(11, "Swift", "YFlikr", 137, "2015-01-23", "John D", "Hayward", "CA", 20151))
orders.toDF.registerTempTable("orders1")
orderUpdates.toDF.registerTempTable("orderupdates1")
sql(
"""CREATE TABLE orders(
| id INT,
| make String,
| type String,
| price INT,
| pdate String,
| customer String,
| city String)
|PARTITIONED BY (state STRING, month INT)
|STORED AS PARQUET
""".stripMargin)
sql(
"""CREATE TABLE orderupdates(
| id INT,
| make String,
| type String,
| price INT,
| pdate String,
| customer String,
| city String)
|PARTITIONED BY (state STRING, month INT)
|STORED AS PARQUET
""".stripMargin)
sql("set hive.exec.dynamic.partition.mode=nonstrict")
sql("INSERT INTO TABLE orders PARTITION(state, month) SELECT * FROM orders1")
sql("INSERT INTO TABLE orderupdates PARTITION(state, month) SELECT * FROM orderupdates1")
checkAnswer(
sql(
"""
|select orders.state, orders.month
|from orders
|join (
| select distinct orders.state,orders.month
| from orders
| join orderupdates
| on orderupdates.id = orders.id) ao
| on ao.state = orders.state and ao.month = orders.month
""".stripMargin),
(1 to 6).map(_ => Row("CA", 20151)))
}
//显示函数
test("show functions") {
sql("SHOW functions").show(2000,false)
val allFunctions =
(FunctionRegistry.builtin.listFunction().toSet[String] ++
org.apache.hadoop.hive.ql.exec.FunctionRegistry.getFunctionNames).toList.sorted
checkAnswer(sql("SHOW functions"), allFunctions.map(Row(_)))
checkAnswer(sql("SHOW functions abs"), Row("abs"))
checkAnswer(sql("SHOW functions 'abs'"), Row("abs"))
checkAnswer(sql("SHOW functions abc.abs"), Row("abs"))
checkAnswer(sql("SHOW functions `abc`.`abs`"), Row("abs"))
checkAnswer(sql("SHOW functions `abc`.`abs`"), Row("abs"))
checkAnswer(sql("SHOW functions `~`"), Row("~"))
checkAnswer(sql("SHOW functions `a function doens't exist`"), Nil)
checkAnswer(sql("SHOW functions `weekofyea.*`"), Row("weekofyear"))
// this probably will failed if we add more function with `sha` prefixing.
checkAnswer(sql("SHOW functions `sha.*`"), Row("sha") :: Row("sha1") :: Row("sha2") :: Nil)
}
//描述功能
test("describe functions") {
// The Spark SQL built-in functions
//Spark SQL内置函数
checkExistence(sql("describe function extended upper"), true,
"Function: upper",
"Class: org.apache.spark.sql.catalyst.expressions.Upper",
"Usage: upper(str) - Returns str with all characters changed to uppercase",
"Extended Usage:",
"> SELECT upper('SparkSql')",
"'SPARKSQL'")
checkExistence(sql("describe functioN Upper"), true,
"Function: upper",
"Class: org.apache.spark.sql.catalyst.expressions.Upper",
"Usage: upper(str) - Returns str with all characters changed to uppercase")
checkExistence(sql("describe functioN Upper"), false,
"Extended Usage")
checkExistence(sql("describe functioN abcadf"), true,
"Function: abcadf is not found.")
checkExistence(sql("describe functioN `~`"), true,
"Function: ~",
"Class: org.apache.hadoop.hive.ql.udf.UDFOPBitNot",
"Usage: ~ n - Bitwise not")
}
//与null和sum结合
test("SPARK-5371: union with null and sum") {
val df = Seq((1, 1)).toDF("c1", "c2")
df.registerTempTable("table1")
val query = sql(
"""
|SELECT
| MIN(c1),
| MIN(c2)
|FROM (
| SELECT
| SUM(c1) c1,
| NULL c2
| FROM table1
| UNION ALL
| SELECT
| NULL c1,
| SUM(c2) c2
| FROM table1
|) a
""".stripMargin)
checkAnswer(query, Row(1, 1) :: Nil)
}
//CTAS WITH WITH子句
test("CTAS with WITH clause") {
val df = Seq((1, 1)).toDF("c1", "c2")
df.registerTempTable("table1")
sql(
"""
|CREATE TABLE with_table1 AS
|WITH T AS (
| SELECT *
| FROM table1
|)
|SELECT *
|FROM T
""".stripMargin)
val query = sql("SELECT * FROM with_table1")
checkAnswer(query, Row(1, 1) :: Nil)
}
//爆炸嵌套字段
test("explode nested Field") {
Seq(NestedArray1(NestedArray2(Seq(1, 2, 3)))).toDF.registerTempTable("nestedArray")
checkAnswer(
sql("SELECT ints FROM nestedArray LATERAL VIEW explode(a.b) a AS ints"),
Row(1) :: Row(2) :: Row(3) :: Nil)
}
//修复使用SORT BY时的属性参考解析错误
test("SPARK-4512 Fix attribute reference resolution error when using SORT BY") {
checkAnswer(
sql("SELECT * FROM (SELECT key + key AS a FROM src SORT BY value) t ORDER BY t.a"),
sql("SELECT key + key as a FROM src ORDER BY a").collect().toSeq
)
}
//没有serde的CTAS
test("CTAS without serde") {
def checkRelation(tableName: String, isDataSourceParquet: Boolean): Unit = {
val relation = EliminateSubQueries(catalog.lookupRelation(Seq(tableName)))
relation match {
case LogicalRelation(r: ParquetRelation, _) =>
if (!isDataSourceParquet) {
fail(
s"${classOf[MetastoreRelation].getCanonicalName} is expected, but found " +
s"${ParquetRelation.getClass.getCanonicalName}.")
}
case r: MetastoreRelation =>
if (isDataSourceParquet) {
fail(
s"${ParquetRelation.getClass.getCanonicalName} is expected, but found " +
s"${classOf[MetastoreRelation].getCanonicalName}.")
}
}
}
val originalConf = convertCTAS
setConf(HiveContext.CONVERT_CTAS, true)
try {
sql("CREATE TABLE ctas1 AS SELECT key k, value FROM src ORDER BY k, value")
sql("CREATE TABLE IF NOT EXISTS ctas1 AS SELECT key k, value FROM src ORDER BY k, value")
var message = intercept[AnalysisException] {
sql("CREATE TABLE ctas1 AS SELECT key k, value FROM src ORDER BY k, value")
}.getMessage
assert(message.contains("ctas1 already exists"))
checkRelation("ctas1", true)
sql("DROP TABLE ctas1")
// Specifying database name for query can be converted to data source write path
// is not allowed right now.
message = intercept[AnalysisException] {
sql("CREATE TABLE default.ctas1 AS SELECT key k, value FROM src ORDER BY k, value")
}.getMessage
assert(
message.contains("Cannot specify database name in a CTAS statement"),
"When spark.sql.hive.convertCTAS is true, we should not allow " +
"database name specified.")
sql("CREATE TABLE ctas1 stored as textfile" +
" AS SELECT key k, value FROM src ORDER BY k, value")
checkRelation("ctas1", true)
sql("DROP TABLE ctas1")
sql("CREATE TABLE ctas1 stored as sequencefile" +
" AS SELECT key k, value FROM src ORDER BY k, value")
checkRelation("ctas1", true)
sql("DROP TABLE ctas1")
sql("CREATE TABLE ctas1 stored as rcfile AS SELECT key k, value FROM src ORDER BY k, value")
checkRelation("ctas1", false)
sql("DROP TABLE ctas1")
sql("CREATE TABLE ctas1 stored as orc AS SELECT key k, value FROM src ORDER BY k, value")
checkRelation("ctas1", false)
sql("DROP TABLE ctas1")
sql("CREATE TABLE ctas1 stored as parquet AS SELECT key k, value FROM src ORDER BY k, value")
checkRelation("ctas1", false)
sql("DROP TABLE ctas1")
} finally {
setConf(HiveContext.CONVERT_CTAS, originalConf)
sql("DROP TABLE IF EXISTS ctas1")
}
}
//SQL方言切换
test("SQL Dialect Switching") {
assert(getSQLDialect().getClass === classOf[HiveQLDialect])
setConf("spark.sql.dialect", classOf[MyDialect].getCanonicalName())
assert(getSQLDialect().getClass === classOf[MyDialect])
assert(sql("SELECT 1").collect() === Array(Row(1)))
// set the dialect back to the DefaultSQLDialect
//将方言设置回DefaultSQLDialect
sql("SET spark.sql.dialect=sql")
assert(getSQLDialect().getClass === classOf[DefaultParserDialect])
sql("SET spark.sql.dialect=hiveql")
assert(getSQLDialect().getClass === classOf[HiveQLDialect])
// set invalid dialect 设置无效方言
sql("SET spark.sql.dialect.abc=MyTestClass")
sql("SET spark.sql.dialect=abc")
intercept[Exception] {
sql("SELECT 1")
}
// test if the dialect set back to HiveQLDialect
//测试方言是否回到HiveQLDialect
getSQLDialect().getClass === classOf[HiveQLDialect]
sql("SET spark.sql.dialect=MyTestClass")
intercept[DialectException] {
sql("SELECT 1")
}
// test if the dialect set back to HiveQLDialect
//测试方言是否回到HiveQLDialect
assert(getSQLDialect().getClass === classOf[HiveQLDialect])
}
//CTAS与serde
test("CTAS with serde") {
sql("CREATE TABLE ctas1 AS SELECT key k, value FROM src ORDER BY k, value").collect()
sql(
"""CREATE TABLE ctas2
| ROW FORMAT SERDE "org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe"
| WITH SERDEPROPERTIES("serde_p1"="p1","serde_p2"="p2")
| STORED AS RCFile
| TBLPROPERTIES("tbl_p1"="p11", "tbl_p2"="p22")
| AS
| SELECT key, value
| FROM src
| ORDER BY key, value""".stripMargin).collect()
sql(
"""CREATE TABLE ctas3
| ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' LINES TERMINATED BY '\\012'
| STORED AS textfile AS
| SELECT key, value
| FROM src
| ORDER BY key, value""".stripMargin).collect()
// the table schema may like (key: integer, value: string)
//表格可能会喜欢(key:integer,value:string)
sql(
"""CREATE TABLE IF NOT EXISTS ctas4 AS
| SELECT 1 AS key, value FROM src LIMIT 1""".stripMargin).collect()
// do nothing cause the table ctas4 already existed.
sql(
"""CREATE TABLE IF NOT EXISTS ctas4 AS
| SELECT key, value FROM src ORDER BY key, value""".stripMargin).collect()
checkAnswer(
sql("SELECT k, value FROM ctas1 ORDER BY k, value"),
sql("SELECT key, value FROM src ORDER BY key, value").collect().toSeq)
checkAnswer(
sql("SELECT key, value FROM ctas2 ORDER BY key, value"),
sql(
"""
SELECT key, value
FROM src
ORDER BY key, value""").collect().toSeq)
checkAnswer(
sql("SELECT key, value FROM ctas3 ORDER BY key, value"),
sql(
"""
SELECT key, value
FROM src
ORDER BY key, value""").collect().toSeq)
intercept[AnalysisException] {
sql(
"""CREATE TABLE ctas4 AS
| SELECT key, value FROM src ORDER BY key, value""".stripMargin).collect()
}
checkAnswer(
sql("SELECT key, value FROM ctas4 ORDER BY key, value"),
sql("SELECT key, value FROM ctas4 LIMIT 1").collect().toSeq)
checkExistence(sql("DESC EXTENDED ctas2"), true,
"name:key", "type:string", "name:value", "ctas2",
"org.apache.hadoop.hive.ql.io.RCFileInputFormat",
"org.apache.hadoop.hive.ql.io.RCFileOutputFormat",
"org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe",
"serde_p1=p1", "serde_p2=p2", "tbl_p1=p11", "tbl_p2=p22", "MANAGED_TABLE"
)
sql(
"""CREATE TABLE ctas5
| STORED AS parquet AS
| SELECT key, value
| FROM src
| ORDER BY key, value""".stripMargin).collect()
withSQLConf(HiveContext.CONVERT_METASTORE_PARQUET.key -> "false") {
checkExistence(sql("DESC EXTENDED ctas5"), true,
"name:key", "type:string", "name:value", "ctas5",
"org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat",
"org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat",
"org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe",
"MANAGED_TABLE"
)
}
// use the Hive SerDe for parquet tables
//使用Hive SerDe进行parquet表
withSQLConf(HiveContext.CONVERT_METASTORE_PARQUET.key -> "false") {
checkAnswer(
sql("SELECT key, value FROM ctas5 ORDER BY key, value"),
sql("SELECT key, value FROM src ORDER BY key, value").collect().toSeq)
}
}
//指定CTAS的列列表
test("specifying the column list for CTAS") {
Seq((1, "111111"), (2, "222222")).toDF("key", "value").registerTempTable("mytable1")
sql("create table gen__tmp(a int, b string) as select key, value from mytable1")
checkAnswer(
sql("SELECT a, b from gen__tmp"),
sql("select key, value from mytable1").collect())
sql("DROP TABLE gen__tmp")
sql("create table gen__tmp(a double, b double) as select key, value from mytable1")
checkAnswer(
sql("SELECT a, b from gen__tmp"),
sql("select cast(key as double), cast(value as double) from mytable1").collect())
sql("DROP TABLE gen__tmp")
sql("drop table mytable1")
}
//命令替换
test("command substitution") {
sql("set tbl=src")
checkAnswer(
sql("SELECT key FROM ${hiveconf:tbl} ORDER BY key, value limit 1"),
sql("SELECT key FROM src ORDER BY key, value limit 1").collect().toSeq)
sql("set hive.variable.substitute=false") // disable the substitution
sql("set tbl2=src")
intercept[Exception] {
sql("SELECT key FROM ${hiveconf:tbl2} ORDER BY key, value limit 1").collect()
}
sql("set hive.variable.substitute=true") // enable the substitution
checkAnswer(
sql("SELECT key FROM ${hiveconf:tbl2} ORDER BY key, value limit 1"),
sql("SELECT key FROM src ORDER BY key, value limit 1").collect().toSeq)
}
//排序不在选择
test("ordering not in select") {
checkAnswer(
sql("SELECT key FROM src ORDER BY value"),
sql("SELECT key FROM (SELECT key, value FROM src ORDER BY value) a").collect().toSeq)
}
//排序不在agg
test("ordering not in agg") {
checkAnswer(
sql("SELECT key FROM src GROUP BY key, value ORDER BY value"),
sql("""
SELECT key
FROM (
SELECT key, value
FROM src
GROUP BY key, value
ORDER BY value) a""").collect().toSeq)
}
//double嵌套数据
test("double nested data") {
sparkContext.parallelize(Nested1(Nested2(Nested3(1))) :: Nil)
.toDF().registerTempTable("nested")
checkAnswer(
sql("SELECT f1.f2.f3 FROM nested"),
Row(1))
checkAnswer(sql("CREATE TABLE test_ctas_1234 AS SELECT * from nested"),
Seq.empty[Row])
checkAnswer(
sql("SELECT * FROM test_ctas_1234"),
sql("SELECT * FROM nested").collect().toSeq)
intercept[AnalysisException] {
sql("CREATE TABLE test_ctas_12345 AS SELECT * from notexists").collect()
}
}
//测试CTAS
test("test CTAS") {
checkAnswer(sql("CREATE TABLE test_ctas_123 AS SELECT key, value FROM src"), Seq.empty[Row])
checkAnswer(
sql("SELECT key, value FROM test_ctas_123 ORDER BY key"),
sql("SELECT key, value FROM src ORDER BY key").collect().toSeq)
}
//保存加入表
test("SPARK-4825 save join to table") {
val testData = sparkContext.parallelize(1 to 10).map(i => TestData(i, i.toString)).toDF()
sql("CREATE TABLE test1 (key INT, value STRING)")
testData.write.mode(SaveMode.Append).insertInto("test1")
sql("CREATE TABLE test2 (key INT, value STRING)")
testData.write.mode(SaveMode.Append).insertInto("test2")
testData.write.mode(SaveMode.Append).insertInto("test2")
sql("CREATE TABLE test AS SELECT COUNT(a.value) FROM test1 a JOIN test2 b ON a.key = b.key")
checkAnswer(
table("test"),
sql("SELECT COUNT(a.value) FROM test1 a JOIN test2 b ON a.key = b.key").collect().toSeq)
}
//反引号没有正确处理是别名
test("SPARK-3708 Backticks aren't handled correctly is aliases") {
checkAnswer(
sql("SELECT k FROM (SELECT `key` AS `k` FROM src) a"),
sql("SELECT `key` FROM src").collect().toSeq)
}
//在子查询别名中反引号不正确处理
test("SPARK-3834 Backticks not correctly handled in subquery aliases") {
checkAnswer(
sql("SELECT a.key FROM (SELECT key FROM src) `a`"),
sql("SELECT `key` FROM src").collect().toSeq)
}
//支持Bitwise和运算符
test("SPARK-3814 Support Bitwise & operator") {
checkAnswer(
sql("SELECT case when 1&1=1 then 1 else 0 end FROM src"),
sql("SELECT 1 FROM src").collect().toSeq)
}
//支持Bitwise |操作者
test("SPARK-3814 Support Bitwise | operator") {
checkAnswer(
sql("SELECT case when 1|0=1 then 1 else 0 end FROM src"),
sql("SELECT 1 FROM src").collect().toSeq)
}
//支持Bitwise ^运算符
test("SPARK-3814 Support Bitwise ^ operator") {
checkAnswer(
sql("SELECT case when 1^0=1 then 1 else 0 end FROM src"),
sql("SELECT 1 FROM src").collect().toSeq)
}
//支持Bitwise〜operator
test("SPARK-3814 Support Bitwise ~ operator") {
checkAnswer(
sql("SELECT case when ~1=-2 then 1 else 0 end FROM src"),
sql("SELECT 1 FROM src").collect().toSeq)
}
//如果Spark SQL和HQL中的“不在”之间,则查询不起作用
test("SPARK-4154 Query does not work if it has 'not between' in Spark SQL and HQL") {
checkAnswer(sql("SELECT key FROM src WHERE key not between 0 and 10 order by key"),
sql("SELECT key FROM src WHERE key between 11 and 500 order by key").collect().toSeq)
}
//SumDistinct部分聚合
test("SPARK-2554 SumDistinct partial aggregation") {
checkAnswer(sql("SELECT sum( distinct key) FROM src group by key order by key"),
sql("SELECT distinct key FROM src order by key").collect().toSeq)
}
//可变行上的DataFrame示例返回错误的结果
test("SPARK-4963 DataFrame sample on mutable row return wrong result") {
sql("SELECT * FROM src WHERE key % 2 = 0")
.sample(withReplacement = false, fraction = 0.3)
.registerTempTable("sampled")
(1 to 10).foreach { i =>
checkAnswer(
sql("SELECT * FROM sampled WHERE key % 2 = 1"),
Seq.empty[Row])
}
}
//默认情况下,HiveContext不区分大小写
test("SPARK-4699 HiveContext should be case insensitive by default") {
checkAnswer(
sql("SELECT KEY FROM Src ORDER BY value"),
sql("SELECT key FROM src ORDER BY value").collect().toSeq)
}
//当内部复杂类型字段具有空值时,插入Hive会抛出NPE
test("SPARK-5284 Insert into Hive throws NPE when a inner complex type field has a null value") {
val schema = StructType(
StructField("s",
StructType(
StructField("innerStruct", StructType(StructField("s1", StringType, true) :: Nil)) ::
StructField("innerArray", ArrayType(IntegerType), true) ::
StructField("innerMap", MapType(StringType, IntegerType)) :: Nil), true) :: Nil)
val row = Row(Row(null, null, null))
val rowRdd = sparkContext.parallelize(row :: Nil)
TestHive.createDataFrame(rowRdd, schema).registerTempTable("testTable")
sql(
"""CREATE TABLE nullValuesInInnerComplexTypes
| (s struct<innerStruct: struct<s1:string>,
| innerArray:array<int>,
| innerMap: map<string, int>>)
""".stripMargin).collect()
sql(
"""
|INSERT OVERWRITE TABLE nullValuesInInnerComplexTypes
|SELECT * FROM testTable
""".stripMargin)
checkAnswer(
sql("SELECT * FROM nullValuesInInnerComplexTypes"),
Row(Row(null, null, null))
)
sql("DROP TABLE nullValuesInInnerComplexTypes")
dropTempTable("testTable")
}
//将Hive UDF分组为子表达式
test("SPARK-4296 Grouping field with Hive UDF as sub expression") {
val rdd = sparkContext.makeRDD( """{"a": "str", "b":"1", "c":"1970-01-01 00:00:00"}""" :: Nil)
read.json(rdd).registerTempTable("data")
checkAnswer(
sql("SELECT concat(a, '-', b), year(c) FROM data GROUP BY concat(a, '-', b), year(c)"),
Row("str-1", 1970))
dropTempTable("data")
read.json(rdd).registerTempTable("data")
checkAnswer(sql("SELECT year(c) + 1 FROM data GROUP BY year(c) + 1"), Row(1971))
dropTempTable("data")
}
//解决投影#1中的udtf
test("resolve udtf in projection #1") {
val rdd = sparkContext.makeRDD((1 to 5).map(i => s"""{"a":[$i, ${i + 1}]}"""))
read.json(rdd).registerTempTable("data")
val df = sql("SELECT explode(a) AS val FROM data")
val col = df("val")
}
//在投影#2中解析udtf
test("resolve udtf in projection #2") {
val rdd = sparkContext.makeRDD((1 to 2).map(i => s"""{"a":[$i, ${i + 1}]}"""))
read.json(rdd).registerTempTable("data")
checkAnswer(sql("SELECT explode(map(1, 1)) FROM data LIMIT 1"), Row(1, 1) :: Nil)
checkAnswer(sql("SELECT explode(map(1, 1)) as (k1, k2) FROM data LIMIT 1"), Row(1, 1) :: Nil)
intercept[AnalysisException] {
sql("SELECT explode(map(1, 1)) as k1 FROM data LIMIT 1")
}
intercept[AnalysisException] {
sql("SELECT explode(map(1, 1)) as (k1, k2, k3) FROM data LIMIT 1")
}
}
// TGF with non-TGF in project is allowed in Spark SQL, but not in Hive
//在项目中使用非TGF-β的TGF-β在Spark SQL中允许,但不在Hive中
test("TGF with non-TGF in projection") {
val rdd = sparkContext.makeRDD( """{"a": "1", "b":"1"}""" :: Nil)
read.json(rdd).registerTempTable("data")
checkAnswer(
sql("SELECT explode(map(a, b)) as (k1, k2), a, b FROM data"),
Row("1", "1", "1", "1") :: Nil)
}
//如果它包含聚合或生成器,则不应该解决它
test("logical.Project should not be resolved if it contains aggregates or generators") {
// This test is used to test the fix of SPARK-5875.
//该测试用于测试SPARK-5875的修复
// The original issue was that Project's resolved will be true when it contains
// AggregateExpressions or Generators. However, in this case, the Project
// is not in a valid state (cannot be executed). Because of this bug, the analysis rule of
// PreInsertionCasts will actually start to work before ImplicitGenerate and then
// generates an invalid query plan.
//原来的问题是,当它包含AggregateExpressions或Generator时,项目的解决将成立,
// 但是,在这种情况下,项目不在有效状态(无法执行),由于这个错误,
// PreInsertionCasts的分析规则实际上将在ImplicitGenerate之前开始工作,然后生成无效的查询计划。
val rdd = sparkContext.makeRDD((1 to 5).map(i => s"""{"a":[$i, ${i + 1}]}"""))
read.json(rdd).registerTempTable("data")
val originalConf = convertCTAS
setConf(HiveContext.CONVERT_CTAS, false)
try {
sql("CREATE TABLE explodeTest (key bigInt)")
table("explodeTest").queryExecution.analyzed match {
case metastoreRelation: MetastoreRelation => // OK
case _ =>
fail("To correctly test the fix of SPARK-5875, explodeTest should be a MetastoreRelation")
}
sql(s"INSERT OVERWRITE TABLE explodeTest SELECT explode(a) AS val FROM data")
checkAnswer(
sql("SELECT key from explodeTest"),
(1 to 5).flatMap(i => Row(i) :: Row(i + 1) :: Nil)
)
sql("DROP TABLE explodeTest")
dropTempTable("data")
} finally {
setConf(HiveContext.CONVERT_CTAS, originalConf)
}
}
//理性测试
test("sanity test for SPARK-6618") {
(1 to 100).par.map { i =>
val tableName = s"SPARK_6618_table_$i"
sql(s"CREATE TABLE $tableName (col1 string)")
catalog.lookupRelation(Seq(tableName))
table(tableName)
tables()
sql(s"DROP TABLE $tableName")
}
}
//与不同的十进制精度联合
test("SPARK-5203 union with different decimal precision") {
Seq.empty[(Decimal, Decimal)]
.toDF("d1", "d2")
.select($"d1".cast(DecimalType(10, 5)).as("d"))
.registerTempTable("dn")
sql("select d from dn union all select d * 2 from dn")
.queryExecution.analyzed
}
//测试脚本转换为stdout
test("test script transform for stdout") {
val data = (1 to 100000).map { i => (i, i, i) }
data.toDF("d1", "d2", "d3").registerTempTable("script_trans")
assert(100000 ===
sql("SELECT TRANSFORM (d1, d2, d3) USING 'cat' AS (a,b,c) FROM script_trans")
.queryExecution.toRdd.count())
}
test("test script transform for stderr") {
val data = (1 to 100000).map { i => (i, i, i) }
data.toDF("d1", "d2", "d3").registerTempTable("script_trans")
assert(0 ===
sql("SELECT TRANSFORM (d1, d2, d3) USING 'cat 1>&2' AS (a,b,c) FROM script_trans")
.queryExecution.toRdd.count())
}
//测试脚本转换数据类型
test("test script transform data type") {
val data = (1 to 5).map { i => (i, i) }
data.toDF("key", "value").registerTempTable("test")
checkAnswer(
sql("""FROM
|(FROM test SELECT TRANSFORM(key, value) USING 'cat' AS (thing1 int, thing2 string)) t
|SELECT thing1 + 1
""".stripMargin), (2 to 6).map(i => Row(i)))
}
//窗口函数:udaf与聚合表达式
test("window function: udaf with aggregate expressin") {
val data = Seq(
WindowData(1, "a", 5),
WindowData(2, "a", 6),
WindowData(3, "b", 7),
WindowData(4, "b", 8),
WindowData(5, "c", 9),
WindowData(6, "c", 10)
)
sparkContext.parallelize(data).toDF().registerTempTable("windowData")
checkAnswer(
sql(
"""
|select area, sum(product), sum(sum(product)) over (partition by area)
|from windowData group by month, area
""".stripMargin),
Seq(
("a", 5, 11),
("a", 6, 11),
("b", 7, 15),
("b", 8, 15),
("c", 9, 19),
("c", 10, 19)
).map(i => Row(i._1, i._2, i._3)))
checkAnswer(
sql(
"""
|select area, sum(product) - 1, sum(sum(product)) over (partition by area)
|from windowData group by month, area
""".stripMargin),
Seq(
("a", 4, 11),
("a", 5, 11),
("b", 6, 15),
("b", 7, 15),
("c", 8, 19),
("c", 9, 19)
).map(i => Row(i._1, i._2, i._3)))
checkAnswer(
sql(
"""
|select area, sum(product), sum(product) / sum(sum(product)) over (partition by area)
|from windowData group by month, area
""".stripMargin),
Seq(
("a", 5, 5d/11),
("a", 6, 6d/11),
("b", 7, 7d/15),
("b", 8, 8d/15),
("c", 10, 10d/19),
("c", 9, 9d/19)
).map(i => Row(i._1, i._2, i._3)))
checkAnswer(
sql(
"""
|select area, sum(product), sum(product) / sum(sum(product) - 1) over (partition by area)
|from windowData group by month, area
""".stripMargin),
Seq(
("a", 5, 5d/9),
("a", 6, 6d/9),
("b", 7, 7d/13),
("b", 8, 8d/13),
("c", 10, 10d/17),
("c", 9, 9d/17)
).map(i => Row(i._1, i._2, i._3)))
}
//窗口功能:在内部选择块中引用列
test("window function: refer column in inner select block") {
val data = Seq(
WindowData(1, "a", 5),
WindowData(2, "a", 6),
WindowData(3, "b", 7),
WindowData(4, "b", 8),
WindowData(5, "c", 9),
WindowData(6, "c", 10)
)
sparkContext.parallelize(data).toDF().registerTempTable("windowData")
checkAnswer(
//rank 会对相同数值,输出相同的序号,而且下一个序号不间断;
//
sql(
"""
|select area, rank() over (partition by area order by tmp.month) + tmp.tmp1 as c1
|from (select month, area, product, 1 as tmp1 from windowData) tmp
""".stripMargin),
Seq(
("a", 2),
("a", 3),
("b", 2),
("b", 3),
("c", 2),
("c", 3)
).map(i => Row(i._1, i._2)))
}
//窗口功能:分区和顺序表达式
test("window function: partition and order expressions") {
val data = Seq(
WindowData(1, "a", 5),
WindowData(2, "a", 6),
WindowData(3, "b", 7),
WindowData(4, "b", 8),
WindowData(5, "c", 9),
WindowData(6, "c", 10)
)
sparkContext.parallelize(data).toDF().registerTempTable("windowData")
checkAnswer(
sql(
"""
|select month, area, product, sum(product + 1) over (partition by 1 order by 2)
|from windowData
""".stripMargin),
Seq(
(1, "a", 5, 51),
(2, "a", 6, 51),
(3, "b", 7, 51),
(4, "b", 8, 51),
(5, "c", 9, 51),
(6, "c", 10, 51)
).map(i => Row(i._1, i._2, i._3, i._4)))
checkAnswer(
sql(
"""
|select month, area, product, sum(product)
|over (partition by month % 2 order by 10 - product)
|from windowData
""".stripMargin),
Seq(
(1, "a", 5, 21),
(2, "a", 6, 24),
(3, "b", 7, 16),
(4, "b", 8, 18),
(5, "c", 9, 9),
(6, "c", 10, 10)
).map(i => Row(i._1, i._2, i._3, i._4)))
}
//窗口函数:窗口参数中的表达式
test("window function: expressions in arguments of a window functions") {
val data = Seq(
WindowData(1, "a", 5),
WindowData(2, "a", 6),
WindowData(3, "b", 7),
WindowData(4, "b", 8),
WindowData(5, "c", 9),
WindowData(6, "c", 10)
)
sparkContext.parallelize(data).toDF().registerTempTable("windowData")
checkAnswer(
sql(
"""
|select month, area, month % 2,
|lag(product, 1 + 1, product) over (partition by month % 2 order by area)
|from windowData
""".stripMargin),
Seq(
(1, "a", 1, 5),
(2, "a", 0, 6),
(3, "b", 1, 7),
(4, "b", 0, 8),
(5, "c", 1, 5),
(6, "c", 0, 6)
).map(i => Row(i._1, i._2, i._3, i._4)))
}
//窗口函数:单个表达式中的多个窗口表达式
test("window function: multiple window expressions in a single expression") {
val nums = sparkContext.parallelize(1 to 10).map(x => (x, x % 2)).toDF("x", "y")
nums.registerTempTable("nums")
val expected =
Row(1, 1, 1, 55, 1, 57) ::
Row(0, 2, 3, 55, 2, 60) ::
Row(1, 3, 6, 55, 4, 65) ::
Row(0, 4, 10, 55, 6, 71) ::
Row(1, 5, 15, 55, 9, 79) ::
Row(0, 6, 21, 55, 12, 88) ::
Row(1, 7, 28, 55, 16, 99) ::
Row(0, 8, 36, 55, 20, 111) ::
Row(1, 9, 45, 55, 25, 125) ::
Row(0, 10, 55, 55, 30, 140) :: Nil
val actual = sql(
"""
|SELECT
| y,
| x,
| sum(x) OVER w1 AS running_sum,
| sum(x) OVER w2 AS total_sum,
| sum(x) OVER w3 AS running_sum_per_y,
| ((sum(x) OVER w1) + (sum(x) OVER w2) + (sum(x) OVER w3)) as combined2
|FROM nums
|WINDOW w1 AS (ORDER BY x ROWS BETWEEN UnBOUNDED PRECEDiNG AND CuRRENT RoW),
| w2 AS (ORDER BY x ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOuNDED FoLLOWING),
| w3 AS (PARTITION BY y ORDER BY x ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW)
""".stripMargin)
checkAnswer(actual, expected)
dropTempTable("nums")
}
//测试用例键时
test("test case key when") {
(1 to 5).map(i => (i, i.toString)).toDF("k", "v").registerTempTable("t")
checkAnswer(
sql("SELECT CASE k WHEN 2 THEN 22 WHEN 4 THEN 44 ELSE 0 END, v FROM t"),
Row(0, "1") :: Row(22, "2") :: Row(0, "3") :: Row(44, "4") :: Row(0, "5") :: Nil)
}
//窗口会导致自我解决失败
test("SPARK-7595: Window will cause resolve failed with self join") {
sql("SELECT * FROM src") // Force loading of src table.
checkAnswer(sql(
"""
|with
| v1 as (select key, count(value) over (partition by key) cnt_val from src),
| v2 as (select v1.key, v1_lag.cnt_val from v1, v1 v1_lag where v1.key = v1_lag.key)
| select * from v2 order by key limit 1
""".stripMargin), Row(0, 3))
}
//检查分析失败,不区分大小写
test("SPARK-7269 Check analysis failed in case in-sensitive") {
Seq(1, 2, 3).map { i =>
(i.toString, i.toString)
}.toDF("key", "value").registerTempTable("df_analysis")
sql("SELECT kEy from df_analysis group by key").collect()
sql("SELECT kEy+3 from df_analysis group by key+3").collect()
sql("SELECT kEy+3, a.kEy, A.kEy from df_analysis A group by key").collect()
sql("SELECT cast(kEy+1 as Int) from df_analysis A group by cast(key+1 as int)").collect()
sql("SELECT cast(kEy+1 as Int) from df_analysis A group by key+1").collect()
sql("SELECT 2 from df_analysis A group by key+1").collect()
intercept[AnalysisException] {
sql("SELECT kEy+1 from df_analysis group by key+3")
}
intercept[AnalysisException] {
sql("SELECT cast(key+2 as Int) from df_analysis A group by cast(key+1 as int)")
}
}
//将STRING投放到BIGINT
test("Cast STRING to BIGINT") {
checkAnswer(sql("SELECT CAST('775983671874188101' as BIGINT)"), Row(775983671874188101L))
}
// `Math.exp(1.0)` has different result for different jdk version, so not use createQueryTest
test("udf_java_method") {
checkAnswer(sql(
"""
|SELECT java_method("java.lang.String", "valueOf", 1),
| java_method("java.lang.String", "isEmpty"),
| java_method("java.lang.Math", "max", 2, 3),
| java_method("java.lang.Math", "min", 2, 3),
| java_method("java.lang.Math", "round", 2.5),
| java_method("java.lang.Math", "exp", 1.0),
| java_method("java.lang.Math", "floor", 1.9)
|FROM src tablesample (1 rows)
""".stripMargin),
Row(
"1",
"true",
java.lang.Math.max(2, 3).toString,
java.lang.Math.min(2, 3).toString,
java.lang.Math.round(2.5).toString,
java.lang.Math.exp(1.0).toString,
java.lang.Math.floor(1.9).toString))
}
//动态分区值测试
test("dynamic partition value test") {
try {
sql("set hive.exec.dynamic.partition.mode=nonstrict")
// date
sql("drop table if exists dynparttest1")
sql("create table dynparttest1 (value int) partitioned by (pdate date)")
sql(
"""
|insert into table dynparttest1 partition(pdate)
| select count(*), cast('2015-05-21' as date) as pdate from src
""".stripMargin)
checkAnswer(
sql("select * from dynparttest1"),
Seq(Row(500, java.sql.Date.valueOf("2015-05-21"))))
// decimal
sql("drop table if exists dynparttest2")
sql("create table dynparttest2 (value int) partitioned by (pdec decimal(5, 1))")
sql(
"""
|insert into table dynparttest2 partition(pdec)
| select count(*), cast('100.12' as decimal(5, 1)) as pdec from src
""".stripMargin)
checkAnswer(
sql("select * from dynparttest2"),
Seq(Row(500, new java.math.BigDecimal("100.1"))))
} finally {
sql("drop table if exists dynparttest1")
sql("drop table if exists dynparttest2")
sql("set hive.exec.dynamic.partition.mode=strict")
}
}
//调用添加jar在不同的线程
test("Call add jar in a different thread (SPARK-8306)") {
@volatile var error: Option[Throwable] = None
val thread = new Thread {
override def run() {
// To make sure this test works, this jar should not be loaded in another place.
//为了确保这个测试工作,这个jar不应该被加载到别的地方。
TestHive.sql(
s"ADD JAR ${TestHive.getHiveFile("hive-contrib-0.13.1.jar").getCanonicalPath()}")
try {
TestHive.sql(
"""
|CREATE TEMPORARY FUNCTION example_max
|AS 'org.apache.hadoop.hive.contrib.udaf.example.UDAFExampleMax'
""".stripMargin)
} catch {
case throwable: Throwable =>
error = Some(throwable)
}
}
}
thread.start()
thread.join()
error match {
case Some(throwable) =>
fail("CREATE TEMPORARY FUNCTION should not fail.", throwable)
case None => // OK
}
}
//HiveQuerySuite - 日期比较测试2
test("SPARK-6785: HiveQuerySuite - Date comparison test 2") {
checkAnswer(
sql("SELECT CAST(CAST(0 AS timestamp) AS date) > CAST(0 AS timestamp) FROM src LIMIT 1"),
Row(false))
}
test("SPARK-6785: HiveQuerySuite - Date cast") {
// new Date(0) == 1970-01-01 00:00:00.0 GMT == 1969-12-31 16:00:00.0 PST
checkAnswer(
sql(
"""
| SELECT
| CAST(CAST(0 AS timestamp) AS date),
| CAST(CAST(CAST(0 AS timestamp) AS date) AS string),
| CAST(0 AS timestamp),
| CAST(CAST(0 AS timestamp) AS string),
| CAST(CAST(CAST('1970-01-01 23:00:00' AS timestamp) AS date) AS timestamp)
| FROM src LIMIT 1
""".stripMargin),
Row(
Date.valueOf("1969-12-31"),
String.valueOf("1969-12-31"),
Timestamp.valueOf("1969-12-31 16:00:00"),
String.valueOf("1969-12-31 16:00:00"),
Timestamp.valueOf("1970-01-01 00:00:00")))
}
//HiveTypeCoercion.inConversion启动的时间太早
test("SPARK-8588 HiveTypeCoercion.inConversion fires too early") {
val df =
//从本地Seq创建一个DataFrame
TestHive.createDataFrame(Seq((1, "2014-01-01"), (2, "2015-01-01"), (3, "2016-01-01")))
df.toDF("id", "datef").registerTempTable("test_SPARK8588")
checkAnswer(
//concat将两个字符串连接起来
TestHive.sql(
"""
|select id, concat(year(datef))
|from test_SPARK8588 where concat(year(datef), ' year') in ('2015 year', '2014 year')
""".stripMargin),
Row(1, "2014") :: Row(2, "2015") :: Nil
)
TestHive.dropTempTable("test_SPARK8588")
}
//修复hive上下文列名中特殊字符的支持
test("SPARK-9371: fix the support for special chars in column names for hive context") {
TestHive.read.json(TestHive.sparkContext.makeRDD(
"""{"a": {"c.b": 1}, "b.$q": [{"a@!.q": 1}], "q.w": {"w.i&": [1]}}""" :: Nil))
.registerTempTable("t")
checkAnswer(sql("SELECT a.`c.b`, `b.$q`[0].`a@!.q`, `q.w`.`w.i&`[0] FROM t"), Row(1, 1, 1))
}
//将hive间隔项转换为Literal of CalendarIntervalType
test("Convert hive interval term into Literal of CalendarIntervalType") {
checkAnswer(sql("select interval '10-9' year to month"),
//间隔10年9个月
Row(CalendarInterval.fromString("interval 10 years 9 months")))
checkAnswer(sql("select interval '20 15:40:32.99899999' day to second"),
Row(CalendarInterval.fromString("interval 2 weeks 6 days 15 hours 40 minutes " +
"32 seconds 99 milliseconds 899 microseconds")))
checkAnswer(sql("select interval '30' year"),
Row(CalendarInterval.fromString("interval 30 years")))
checkAnswer(sql("select interval '25' month"),
Row(CalendarInterval.fromString("interval 25 months")))
checkAnswer(sql("select interval '-100' day"),
Row(CalendarInterval.fromString("interval -14 weeks -2 days")))
checkAnswer(sql("select interval '40' hour"),
Row(CalendarInterval.fromString("interval 1 days 16 hours")))
checkAnswer(sql("select interval '80' minute"),
Row(CalendarInterval.fromString("interval 1 hour 20 minutes")))
checkAnswer(sql("select interval '299.889987299' second"),
Row(CalendarInterval.fromString(
"interval 4 minutes 59 seconds 889 milliseconds 987 microseconds")))
}
//不允许为临时表指定数据库名称
test("specifying database name for a temporary table is not allowed") {
withTempPath { dir =>
val path = dir.getCanonicalPath
val df =
sqlContext.sparkContext.parallelize(1 to 10).map(i => (i, i.toString)).toDF("num", "str")
df
.write
.format("parquet")
.save(path)
val message = intercept[AnalysisException] {
sqlContext.sql(
s"""
|CREATE TEMPORARY TABLE db.t
|USING parquet
|OPTIONS (
| path '$path'
|)
""".stripMargin)
}.getMessage
//不允许指定数据库名称或其他限定符
assert(message.contains("Specifying database name or other qualifiers are not allowed"))
// If you use backticks to quote the name of a temporary table having dot in it.
// 如果您使用反引号来引用具有点的临时表的名称
sqlContext.sql(
s"""
|CREATE TEMPORARY TABLE `db.t`
|USING parquet
|OPTIONS (
| path '$path'
|)
""".stripMargin)
checkAnswer(sqlContext.table("`db.t`"), df)
}
}
//侧视图中的列名相同
test("SPARK-10593 same column names in lateral view") {
val df = sqlContext.sql(
"""
|select
|insideLayer2.json as a2
|from (select '{"layer1": {"layer2": "text inside layer 2"}}' json) test
|lateral view json_tuple(json, 'layer1') insideLayer1 as json
|lateral view json_tuple(insideLayer1.json, 'layer2') insideLayer2 as json
""".stripMargin
)
checkAnswer(df, Row("text inside layer 2") :: Nil)
}
//脚本转换使用默认输入/输出SerDe和记录读写器
test("SPARK-10310: " +
"script transformation using default input/output SerDe and record reader/writer") {
sqlContext
.range(5)
.selectExpr("id AS a", "id AS b")
.registerTempTable("test")
checkAnswer(
sql(
"""FROM(
| FROM test SELECT TRANSFORM(a, b)
| USING 'python src/test/resources/data/scripts/test_transform.py "\\t"'
| AS (c STRING, d STRING)
|) t
|SELECT c
""".stripMargin),
(0 until 5).map(i => Row(i + "#")))
}
//脚本转换使用LazySimpleSerDe
test("SPARK-10310: script transformation using LazySimpleSerDe") {
sqlContext
.range(5)
.selectExpr("id AS a", "id AS b")
.registerTempTable("test")
val df = sql(
"""FROM test
|SELECT TRANSFORM(a, b)
|ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
|WITH SERDEPROPERTIES('field.delim' = '|')
|USING 'python src/test/resources/data/scripts/test_transform.py "|"'
|AS (c STRING, d STRING)
|ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
|WITH SERDEPROPERTIES('field.delim' = '|')
""".stripMargin)
checkAnswer(df, (0 until 5).map(i => Row(i + "#", i + "#")))
}
//使用parquet进行排序
test("SPARK-10741: Sort on Aggregate using parquet") {
withTable("test10741") {
withTempTable("src") {
Seq("a" -> 5, "a" -> 9, "b" -> 6).toDF().registerTempTable("src")
sql("CREATE TABLE test10741(c1 STRING, c2 INT) STORED AS PARQUET AS SELECT * FROM src")
}
checkAnswer(sql(
"""
|SELECT c1, AVG(c2) AS c_avg
|FROM test10741
|GROUP BY c1
|HAVING (AVG(c2) > 5) ORDER BY c1
""".stripMargin), Row("a", 7.0) :: Row("b", 6.0) :: Nil)
checkAnswer(sql(
"""
|SELECT c1, AVG(c2) AS c_avg
|FROM test10741
|GROUP BY c1
|ORDER BY AVG(c2)
""".stripMargin), Row("b", 6.0) :: Row("a", 7.0) :: Nil)
}
}
}
| tophua/spark1.52 | sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala | Scala | apache-2.0 | 48,929 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.plan.rules.physical.batch
import org.apache.flink.table.connector.source.ScanTableSource
import org.apache.flink.table.planner.plan.nodes.FlinkConventions
import org.apache.flink.table.planner.plan.nodes.logical.FlinkLogicalTableSourceScan
import org.apache.flink.table.planner.plan.nodes.physical.batch.BatchExecTableSourceScan
import org.apache.flink.table.planner.plan.schema.TableSourceTable
import org.apache.flink.table.runtime.connector.source.ScanRuntimeProviderContext
import org.apache.calcite.plan.RelOptRuleCall
import org.apache.calcite.rel.RelNode
import org.apache.calcite.rel.convert.ConverterRule
import org.apache.calcite.rel.core.TableScan
/**
* Rule that converts [[FlinkLogicalTableSourceScan]] to [[BatchExecTableSourceScan]].
*/
class BatchExecTableSourceScanRule
extends ConverterRule(
classOf[FlinkLogicalTableSourceScan],
FlinkConventions.LOGICAL,
FlinkConventions.BATCH_PHYSICAL,
"BatchExecTableSourceScanRule") {
/** Rule must only match if TableScan targets a bounded [[ScanTableSource]] */
override def matches(call: RelOptRuleCall): Boolean = {
val scan: TableScan = call.rel(0).asInstanceOf[TableScan]
val tableSourceTable = scan.getTable.unwrap(classOf[TableSourceTable])
tableSourceTable match {
case tst: TableSourceTable =>
tst.tableSource match {
case sts: ScanTableSource =>
sts.getScanRuntimeProvider(ScanRuntimeProviderContext.INSTANCE).isBounded
case _ => false
}
case _ => false
}
}
def convert(rel: RelNode): RelNode = {
val scan = rel.asInstanceOf[FlinkLogicalTableSourceScan]
val newTrait = rel.getTraitSet.replace(FlinkConventions.BATCH_PHYSICAL)
new BatchExecTableSourceScan(
rel.getCluster,
newTrait,
scan.getTable.asInstanceOf[TableSourceTable]
)
}
}
object BatchExecTableSourceScanRule {
val INSTANCE = new BatchExecTableSourceScanRule
}
| hequn8128/flink | flink-table/flink-table-planner-blink/src/main/scala/org/apache/flink/table/planner/plan/rules/physical/batch/BatchExecTableSourceScanRule.scala | Scala | apache-2.0 | 2,784 |
/** When this files is opened within the IDE, a typing error is reported. */
class A[B] extends TestIterable[B] {
import collection.convert.ImplicitConversionsToScala._
def iterator: other.TestIterator[Nothing] = ???
iterator./*!*/
}
object other {
trait TestIterator[T] {
def hasNext: Boolean
def next: T
}
}
| felixmulder/scala | test/files/presentation/ide-bug-1000531/src/CrashOnLoad.scala | Scala | bsd-3-clause | 330 |
/***********************************************************************
* Copyright (c) 2013-2015 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0 which
* accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
*************************************************************************/
package org.locationtech.geomesa.utils.geotools
import java.util.Date
import com.vividsolutions.jts.geom._
import org.geotools.data.FeatureReader
import org.geotools.data.simple.SimpleFeatureIterator
import org.geotools.factory.Hints
import org.geotools.feature.AttributeTypeBuilder
import org.geotools.geometry.DirectPosition2D
import org.geotools.temporal.`object`.{DefaultInstant, DefaultPeriod, DefaultPosition}
import org.geotools.util.{Converter, ConverterFactory}
import org.joda.time.DateTime
import org.joda.time.format.ISODateTimeFormat
import org.locationtech.geomesa.CURRENT_SCHEMA_VERSION
import org.locationtech.geomesa.utils.stats.Cardinality._
import org.locationtech.geomesa.utils.stats.IndexCoverage._
import org.locationtech.geomesa.utils.stats.{Cardinality, IndexCoverage}
import org.opengis.feature.`type`.AttributeDescriptor
import org.opengis.feature.simple.{SimpleFeature, SimpleFeatureType}
import org.opengis.temporal.Instant
import scala.reflect.ClassTag
import scala.util.Try
object Conversions {
class RichSimpleFeatureIterator(iter: SimpleFeatureIterator) extends SimpleFeatureIterator
with Iterator[SimpleFeature] {
private[this] var open = true
def isClosed = !open
def hasNext = {
if (isClosed) false
if(iter.hasNext) true else{close(); false}
}
def next() = iter.next
def close() { if(!isClosed) {iter.close(); open = false} }
}
implicit class RichSimpleFeatureReader(val r: FeatureReader[SimpleFeatureType, SimpleFeature]) extends AnyVal {
def getIterator: Iterator[SimpleFeature] = new Iterator[SimpleFeature] {
override def hasNext: Boolean = r.hasNext
override def next(): SimpleFeature = r.next()
}
}
implicit def toRichSimpleFeatureIterator(iter: SimpleFeatureIterator): RichSimpleFeatureIterator = new RichSimpleFeatureIterator(iter)
implicit def opengisInstantToJodaInstant(instant: Instant): org.joda.time.Instant = new DateTime(instant.getPosition.getDate).toInstant
implicit def jodaInstantToOpengisInstant(instant: org.joda.time.Instant): org.opengis.temporal.Instant = new DefaultInstant(new DefaultPosition(instant.toDate))
implicit def jodaIntervalToOpengisPeriod(interval: org.joda.time.Interval): org.opengis.temporal.Period =
new DefaultPeriod(interval.getStart.toInstant, interval.getEnd.toInstant)
implicit class RichCoord(val c: Coordinate) extends AnyVal {
def toPoint2D = new DirectPosition2D(c.x, c.y)
}
implicit class RichGeometry(val geom: Geometry) extends AnyVal {
def bufferMeters(meters: Double): Geometry = geom.buffer(distanceDegrees(meters))
def distanceDegrees(meters: Double) = GeometryUtils.distanceDegrees(geom, meters)
}
implicit class RichSimpleFeature(val sf: SimpleFeature) extends AnyVal {
def geometry = sf.getDefaultGeometry.asInstanceOf[Geometry]
def polygon = sf.getDefaultGeometry.asInstanceOf[Polygon]
def point = sf.getDefaultGeometry.asInstanceOf[Point]
def lineString = sf.getDefaultGeometry.asInstanceOf[LineString]
def multiPolygon = sf.getDefaultGeometry.asInstanceOf[MultiPolygon]
def multiPoint = sf.getDefaultGeometry.asInstanceOf[MultiPoint]
def multiLineString = sf.getDefaultGeometry.asInstanceOf[MultiLineString]
def get[T](i: Int) = sf.getAttribute(i).asInstanceOf[T]
def get[T](name: String) = sf.getAttribute(name).asInstanceOf[T]
def getDouble(str: String): Double = {
val ret = sf.getAttribute(str)
ret match {
case d: java.lang.Double => d
case f: java.lang.Float => f.toDouble
case i: java.lang.Integer => i.toDouble
case _ => throw new Exception(s"Input $ret is not a numeric type.")
}
}
def userData[T](key: AnyRef)(implicit ct: ClassTag[T]): Option[T] = {
Option(sf.getUserData.get(key)).flatMap {
case ct(x) => Some(x)
case _ => None
}
}
}
}
object RichIterator {
implicit class RichIterator[T](val iter: Iterator[T]) extends AnyVal {
def head = iter.next()
def headOption = if (iter.hasNext) Some(iter.next()) else None
}
}
/**
* Contains GeoMesa specific attribute descriptor information
*/
object RichAttributeDescriptors {
import org.locationtech.geomesa.utils.geotools.SimpleFeatureTypes._
//noinspection AccessorLikeMethodIsEmptyParen
implicit class RichAttributeDescriptor(val ad: AttributeDescriptor) extends AnyVal {
def setIndexCoverage(coverage: IndexCoverage): Unit = ad.getUserData.put(OPT_INDEX, coverage.toString)
def getIndexCoverage(): IndexCoverage =
Option(ad.getUserData.get(OPT_INDEX).asInstanceOf[String])
.flatMap(c => Try(IndexCoverage.withName(c)).toOption).getOrElse(IndexCoverage.NONE)
def setIndexValue(indexValue: Boolean): Unit = ad.getUserData.put(OPT_INDEX_VALUE, indexValue.toString)
def isIndexValue(): Boolean = Option(ad.getUserData.get(OPT_INDEX_VALUE)).exists(_ == "true")
def setCardinality(cardinality: Cardinality): Unit =
ad.getUserData.put(OPT_CARDINALITY, cardinality.toString)
def getCardinality(): Cardinality =
Option(ad.getUserData.get(OPT_CARDINALITY).asInstanceOf[String])
.flatMap(c => Try(Cardinality.withName(c)).toOption).getOrElse(Cardinality.UNKNOWN)
def setBinTrackId(opt: Boolean): Unit = ad.getUserData.put(OPT_BIN_TRACK_ID, opt.toString)
def isBinTrackId: Boolean = Option(ad.getUserData.get(OPT_BIN_TRACK_ID)).exists(_ == "true")
def setCollectionType(typ: Class[_]): Unit = ad.getUserData.put(USER_DATA_LIST_TYPE, typ)
def getCollectionType(): Option[Class[_]] =
Option(ad.getUserData.get(USER_DATA_LIST_TYPE)).map(_.asInstanceOf[Class[_]])
def setMapTypes(keyType: Class[_], valueType: Class[_]): Unit = {
ad.getUserData.put(USER_DATA_MAP_KEY_TYPE, keyType)
ad.getUserData.put(USER_DATA_MAP_VALUE_TYPE, valueType)
}
def getMapTypes(): Option[(Class[_], Class[_])] = for {
keyClass <- Option(ad.getUserData.get(USER_DATA_MAP_KEY_TYPE))
valueClass <- Option(ad.getUserData.get(USER_DATA_MAP_VALUE_TYPE))
} yield {
(keyClass.asInstanceOf[Class[_]], valueClass.asInstanceOf[Class[_]])
}
def isIndexed = getIndexCoverage() match {
case IndexCoverage.FULL | IndexCoverage.JOIN => true
case IndexCoverage.NONE => false
}
def isCollection = getCollectionType().isDefined
def isMap = getMapTypes().isDefined
def isMultiValued = isCollection || isMap
}
implicit class RichAttributeTypeBuilder(val builder: AttributeTypeBuilder) extends AnyVal {
def indexCoverage(coverage: IndexCoverage) = builder.userData(OPT_INDEX, coverage.toString)
def indexValue(indexValue: Boolean) = builder.userData(OPT_INDEX_VALUE, indexValue)
def cardinality(cardinality: Cardinality) = builder.userData(OPT_CARDINALITY, cardinality.toString)
def collectionType(typ: Class[_]) = builder.userData(USER_DATA_LIST_TYPE, typ)
def mapTypes(keyType: Class[_], valueType: Class[_]) =
builder.userData(USER_DATA_MAP_KEY_TYPE, keyType).userData(USER_DATA_MAP_VALUE_TYPE, valueType)
}
}
object RichSimpleFeatureType {
import RichAttributeDescriptors.RichAttributeDescriptor
import scala.collection.JavaConversions._
val SCHEMA_VERSION_KEY = "geomesa.version"
val TABLE_SHARING_KEY = "geomesa.table.sharing"
val SHARING_PREFIX_KEY = "geomesa.table.sharing.prefix"
val DEFAULT_DATE_KEY = "geomesa.index.dtg"
val ST_INDEX_SCHEMA_KEY = "geomesa.index.st.schema"
// in general we store everything as strings so that it's easy to pass to accumulo iterators
implicit class RichSimpleFeatureType(val sft: SimpleFeatureType) extends AnyVal {
def getGeomField: String = sft.getGeometryDescriptor.getLocalName
def getGeomIndex: Int = sft.indexOf(sft.getGeometryDescriptor.getLocalName)
def getDtgField: Option[String] = userData[String](DEFAULT_DATE_KEY)
def getDtgIndex: Option[Int] = getDtgField.map(sft.indexOf).filter(_ != -1)
def getDtgDescriptor = getDtgIndex.map(sft.getDescriptor)
def clearDtgField(): Unit = sft.getUserData.remove(DEFAULT_DATE_KEY)
def setDtgField(dtg: String): Unit = {
val descriptor = sft.getDescriptor(dtg)
require(descriptor != null && classOf[Date].isAssignableFrom(descriptor.getType.getBinding),
s"Invalid date field '$dtg' for schema $sft")
sft.getUserData.put(DEFAULT_DATE_KEY, dtg)
}
def getStIndexSchema: String = userData[String](ST_INDEX_SCHEMA_KEY).orNull
def setStIndexSchema(schema: String): Unit = sft.getUserData.put(ST_INDEX_SCHEMA_KEY, schema)
def getBinTrackId: Option[String] = sft.getAttributeDescriptors.find(_.isBinTrackId).map(_.getLocalName)
def getSchemaVersion: Int =
userData[String](SCHEMA_VERSION_KEY).map(_.toInt).getOrElse(CURRENT_SCHEMA_VERSION)
def setSchemaVersion(version: Int): Unit = sft.getUserData.put(SCHEMA_VERSION_KEY, version.toString)
def isPoints = sft.getGeometryDescriptor.getType.getBinding == classOf[Point]
def isLines = sft.getGeometryDescriptor.getType.getBinding == classOf[LineString]
// If no user data is specified when creating a new SFT, we should default to 'true'.
def isTableSharing: Boolean = userData[String](TABLE_SHARING_KEY).map(_.toBoolean).getOrElse(true)
def setTableSharing(sharing: Boolean): Unit = sft.getUserData.put(TABLE_SHARING_KEY, sharing.toString)
def getTableSharingPrefix: String = userData[String](SHARING_PREFIX_KEY).getOrElse("")
def setTableSharingPrefix(prefix: String): Unit = sft.getUserData.put(SHARING_PREFIX_KEY, prefix)
def getEnabledTables: String = userData[String](SimpleFeatureTypes.ENABLED_INDEXES).getOrElse("")
def setEnabledTables(tables: String): Unit = sft.getUserData.put(SimpleFeatureTypes.ENABLED_INDEXES, tables)
def userData[T](key: AnyRef): Option[T] = Option(sft.getUserData.get(key).asInstanceOf[T])
}
}
class JodaConverterFactory extends ConverterFactory {
private val df = ISODateTimeFormat.dateTime().withZoneUTC()
def createConverter(source: Class[_], target: Class[_], hints: Hints) =
if(classOf[java.util.Date].isAssignableFrom(source) && classOf[String].isAssignableFrom(target)) {
// Date => String
new Converter {
def convert[T](source: scala.Any, target: Class[T]): T =
df.print(new DateTime(source.asInstanceOf[java.util.Date])).asInstanceOf[T]
}
} else if(classOf[java.util.Date].isAssignableFrom(target) && classOf[String].isAssignableFrom(source)) {
// String => Date
new Converter {
def convert[T](source: scala.Any, target: Class[T]): T =
df.parseDateTime(source.asInstanceOf[String]).toDate.asInstanceOf[T]
}
} else null.asInstanceOf[Converter]
}
class ScalaCollectionsConverterFactory extends ConverterFactory {
def createConverter(source: Class[_], target: Class[_], hints: Hints): Converter =
if (classOf[Seq[_]].isAssignableFrom(source)
&& classOf[java.util.List[_]].isAssignableFrom(target)) {
new ListToListConverter(true)
} else if (classOf[java.util.List[_]].isAssignableFrom(source)
&& classOf[Seq[_]].isAssignableFrom(target)) {
new ListToListConverter(false)
} else if (classOf[Map[_, _]].isAssignableFrom(source)
&& classOf[java.util.Map[_, _]].isAssignableFrom(target)) {
new MapToMapConverter(true)
} else if (classOf[java.util.Map[_, _]].isAssignableFrom(source)
&& classOf[Map[_, _]].isAssignableFrom(target)) {
new MapToMapConverter(false)
} else {
null
}
}
/**
* Convert between scala and java lists
*
* @param scalaToJava
*/
class ListToListConverter(scalaToJava: Boolean) extends Converter {
import scala.collection.JavaConverters._
override def convert[T](source: scala.Any, target: Class[T]): T =
if (scalaToJava) {
source.asInstanceOf[Seq[_]].asJava.asInstanceOf[T]
} else {
source.asInstanceOf[java.util.List[_]].asScala.asInstanceOf[T]
}
}
/**
* Convert between scala and java maps
*
* @param scalaToJava
*/
class MapToMapConverter(scalaToJava: Boolean) extends Converter {
import scala.collection.JavaConverters._
override def convert[T](source: scala.Any, target: Class[T]): T =
if (scalaToJava) {
source.asInstanceOf[Map[_, _]].asJava.asInstanceOf[T]
} else {
source.asInstanceOf[java.util.Map[_, _]].asScala.asInstanceOf[T]
}
} | vpipkt/geomesa | geomesa-utils/src/main/scala/org/locationtech/geomesa/utils/geotools/Conversions.scala | Scala | apache-2.0 | 12,930 |
/*
* Copyright (C) 2014 - 2016 Softwaremill <http://softwaremill.com>
* Copyright (C) 2016 - 2019 Lightbend Inc. <http://www.lightbend.com>
*/
package akka.kafka.javadsl
import java.util.concurrent.CompletionStage
import akka.annotation.ApiMayChange
import akka.kafka.ConsumerMessage.Committable
import akka.kafka.ProducerMessage._
import akka.kafka.{scaladsl, CommitterSettings, ConsumerMessage, ProducerSettings}
import akka.stream.javadsl.{Flow, FlowWithContext, Sink}
import akka.{japi, Done, NotUsed}
import org.apache.kafka.clients.producer.ProducerRecord
import scala.compat.java8.FutureConverters._
/**
* Akka Stream connector for publishing messages to Kafka topics.
*/
object Producer {
/**
* Create a sink for publishing records to Kafka topics.
*
* The [[org.apache.kafka.clients.producer.ProducerRecord Kafka ProducerRecord]] contains the topic name to which the record is being sent, an optional
* partition number, and an optional key and value.
*/
def plainSink[K, V](settings: ProducerSettings[K, V]): Sink[ProducerRecord[K, V], CompletionStage[Done]] =
scaladsl.Producer
.plainSink(settings)
.mapMaterializedValue(_.toJava)
.asJava
/**
* Create a sink for publishing records to Kafka topics.
*
* The [[org.apache.kafka.clients.producer.ProducerRecord Kafka ProducerRecord]] contains the topic name to which the record is being sent, an optional
* partition number, and an optional key and value.
*
* Supports sharing a Kafka Producer instance.
*
* @deprecated Pass in external or shared producer using `ProducerSettings.withProducerFactory` or `ProducerSettings.withProducer`, since 2.0.0
*/
@Deprecated
def plainSink[K, V](
settings: ProducerSettings[K, V],
producer: org.apache.kafka.clients.producer.Producer[K, V]
): Sink[ProducerRecord[K, V], CompletionStage[Done]] =
plainSink(settings.withProducer(producer))
/**
* Create a sink that is aware of the [[ConsumerMessage.Committable committable offset]]
* from a [[Consumer.committableSource]]. It will commit the consumer offset when the message has
* been published successfully to the topic.
*
* It publishes records to Kafka topics conditionally:
*
* - [[akka.kafka.ProducerMessage.Message Message]] publishes a single message to its topic, and commits the offset
*
* - [[akka.kafka.ProducerMessage.MultiMessage MultiMessage]] publishes all messages in its `records` field, and commits the offset
*
* - [[akka.kafka.ProducerMessage.PassThroughMessage PassThroughMessage]] does not publish anything, but commits the offset
*
* Note that there is a risk that something fails after publishing but before
* committing, so it is "at-least once delivery" semantics.
*
* @deprecated use `committableSink(ProducerSettings, CommitterSettings)` instead, since 2.0.0
*/
@Deprecated
def committableSink[K, V, IN <: Envelope[K, V, ConsumerMessage.Committable]](
settings: ProducerSettings[K, V]
): Sink[IN, CompletionStage[Done]] =
scaladsl.Producer
.committableSink(settings)
.mapMaterializedValue(_.toJava)
.asJava
/**
* Create a sink that is aware of the [[ConsumerMessage.Committable committable offset]]
* from a [[Consumer.committableSource]]. It will commit the consumer offset when the message has
* been published successfully to the topic.
*
* It publishes records to Kafka topics conditionally:
*
* - [[akka.kafka.ProducerMessage.Message Message]] publishes a single message to its topic, and commits the offset
*
* - [[akka.kafka.ProducerMessage.MultiMessage MultiMessage]] publishes all messages in its `records` field, and commits the offset
*
* - [[akka.kafka.ProducerMessage.PassThroughMessage PassThroughMessage]] does not publish anything, but commits the offset
*
*
* Note that there is always a risk that something fails after publishing but before
* committing, so it is "at-least once delivery" semantics.
*
* Supports sharing a Kafka Producer instance.
*
* @deprecated use `committableSink(ProducerSettings, CommitterSettings)` instead, since 2.0.0
*/
@Deprecated
def committableSink[K, V](
settings: ProducerSettings[K, V],
producer: org.apache.kafka.clients.producer.Producer[K, V]
): Sink[Envelope[K, V, ConsumerMessage.Committable], CompletionStage[Done]] =
committableSink(settings.withProducer(producer))
/**
* Create a sink that is aware of the [[ConsumerMessage.Committable committable offset]]
* from a [[Consumer.committableSource]]. The offsets are batched and committed regularly.
*
* It publishes records to Kafka topics conditionally:
*
* - [[akka.kafka.ProducerMessage.Message Message]] publishes a single message to its topic, and commits the offset
*
* - [[akka.kafka.ProducerMessage.MultiMessage MultiMessage]] publishes all messages in its `records` field, and commits the offset
*
* - [[akka.kafka.ProducerMessage.PassThroughMessage PassThroughMessage]] does not publish anything, but commits the offset
*
* Note that there is a risk that something fails after publishing but before
* committing, so it is "at-least once delivery" semantics.
*/
def committableSink[K, V, IN <: Envelope[K, V, ConsumerMessage.Committable]](
producerSettings: ProducerSettings[K, V],
committerSettings: CommitterSettings
): Sink[IN, CompletionStage[Done]] =
scaladsl.Producer
.committableSink(producerSettings, committerSettings)
.mapMaterializedValue(_.toJava)
.asJava
/**
* Create a sink that is aware of the [[ConsumerMessage.Committable committable offset]] passed as
* context from a [[Consumer.sourceWithOffsetContext]]. The offsets are batched and committed regularly.
*
* It publishes records to Kafka topics conditionally:
*
* - [[akka.kafka.ProducerMessage.Message Message]] publishes a single message to its topic, and commits the offset
*
* - [[akka.kafka.ProducerMessage.MultiMessage MultiMessage]] publishes all messages in its `records` field, and commits the offset
*
* - [[akka.kafka.ProducerMessage.PassThroughMessage PassThroughMessage]] does not publish anything, but commits the offset
*
* Note that there is a risk that something fails after publishing but before
* committing, so it is "at-least once delivery" semantics.
*/
@ApiMayChange(issue = "https://github.com/akka/alpakka-kafka/issues/880")
def committableSinkWithOffsetContext[K, V, IN <: Envelope[K, V, _], C <: Committable](
producerSettings: ProducerSettings[K, V],
committerSettings: CommitterSettings
): Sink[akka.japi.Pair[IN, C], CompletionStage[Done]] =
committableSink(producerSettings, committerSettings)
.contramap(new akka.japi.function.Function[japi.Pair[IN, C], Envelope[K, V, C]] {
override def apply(p: japi.Pair[IN, C]) = p.first.withPassThrough(p.second)
})
/**
* Create a flow to publish records to Kafka topics and then pass it on.
*
* The records must be wrapped in a [[akka.kafka.ProducerMessage.Message Message]] and continue in the stream as [[akka.kafka.ProducerMessage.Result Result]].
*
* The messages support the possibility to pass through arbitrary data, which can for example be a [[ConsumerMessage.CommittableOffset CommittableOffset]]
* or [[ConsumerMessage.CommittableOffsetBatch CommittableOffsetBatch]] that can
* be committed later in the flow.
*
* @deprecated use `flexiFlow` instead, since 0.21
*/
@Deprecated
def flow[K, V, PassThrough](
settings: ProducerSettings[K, V]
): Flow[Message[K, V, PassThrough], Result[K, V, PassThrough], NotUsed] =
scaladsl.Producer
.flow(settings)
.asJava
.asInstanceOf[Flow[Message[K, V, PassThrough], Result[K, V, PassThrough], NotUsed]]
/**
* Create a flow to conditionally publish records to Kafka topics and then pass it on.
*
* It publishes records to Kafka topics conditionally:
*
* - [[akka.kafka.ProducerMessage.Message Message]] publishes a single message to its topic, and continues in the stream as [[akka.kafka.ProducerMessage.Result Result]]
*
* - [[akka.kafka.ProducerMessage.MultiMessage MultiMessage]] publishes all messages in its `records` field, and continues in the stream as [[akka.kafka.ProducerMessage.MultiResult MultiResult]]
*
* - [[akka.kafka.ProducerMessage.PassThroughMessage PassThroughMessage]] does not publish anything, and continues in the stream as [[akka.kafka.ProducerMessage.PassThroughResult PassThroughResult]]
*
* The messages support the possibility to pass through arbitrary data, which can for example be a [[ConsumerMessage.CommittableOffset CommittableOffset]]
* or [[ConsumerMessage.CommittableOffsetBatch CommittableOffsetBatch]] that can
* be committed later in the flow.
*/
def flexiFlow[K, V, PassThrough](
settings: ProducerSettings[K, V]
): Flow[Envelope[K, V, PassThrough], Results[K, V, PassThrough], NotUsed] =
scaladsl.Producer
.flexiFlow(settings)
.asJava
.asInstanceOf[Flow[Envelope[K, V, PassThrough], Results[K, V, PassThrough], NotUsed]]
/**
* API MAY CHANGE
*
* Create a flow to conditionally publish records to Kafka topics and then pass it on.
*
* It publishes records to Kafka topics conditionally:
*
* - [[akka.kafka.ProducerMessage.Message Message]] publishes a single message to its topic, and continues in the stream as [[akka.kafka.ProducerMessage.Result Result]]
*
* - [[akka.kafka.ProducerMessage.MultiMessage MultiMessage]] publishes all messages in its `records` field, and continues in the stream as [[akka.kafka.ProducerMessage.MultiResult MultiResult]]
*
* - [[akka.kafka.ProducerMessage.PassThroughMessage PassThroughMessage]] does not publish anything, and continues in the stream as [[akka.kafka.ProducerMessage.PassThroughResult PassThroughResult]]
*
* This flow is intended to be used with Akka's [flow with context](https://doc.akka.io/docs/akka/current/stream/operators/Flow/asFlowWithContext.html).
*
* @tparam C the flow context type
*/
@ApiMayChange(issue = "https://github.com/akka/alpakka-kafka/issues/880")
def flowWithContext[K, V, C](
settings: ProducerSettings[K, V]
): FlowWithContext[Envelope[K, V, NotUsed], C, Results[K, V, C], C, NotUsed] =
scaladsl.Producer.flowWithContext(settings).asJava
/**
* Create a flow to publish records to Kafka topics and then pass it on.
*
* The records must be wrapped in a [[akka.kafka.ProducerMessage.Message Message]] and continue in the stream as [[akka.kafka.ProducerMessage.Result Result]].
*
* The messages support the possibility to pass through arbitrary data, which can for example be a [[ConsumerMessage.CommittableOffset CommittableOffset]]
* or [[ConsumerMessage.CommittableOffsetBatch CommittableOffsetBatch]] that can
* be committed later in the flow.
*
* Supports sharing a Kafka Producer instance.
*
* @deprecated use `flexiFlow` instead, since 0.21
*/
@Deprecated
def flow[K, V, PassThrough](
settings: ProducerSettings[K, V],
producer: org.apache.kafka.clients.producer.Producer[K, V]
): Flow[Message[K, V, PassThrough], Result[K, V, PassThrough], NotUsed] =
flow(settings.withProducer(producer))
/**
* Create a flow to conditionally publish records to Kafka topics and then pass it on.
*
* It publishes records to Kafka topics conditionally:
*
* - [[akka.kafka.ProducerMessage.Message Message]] publishes a single message to its topic, and continues in the stream as [[akka.kafka.ProducerMessage.Result Result]]
*
* - [[akka.kafka.ProducerMessage.MultiMessage MultiMessage]] publishes all messages in its `records` field, and continues in the stream as [[akka.kafka.ProducerMessage.MultiResult MultiResult]]
*
* - [[akka.kafka.ProducerMessage.PassThroughMessage PassThroughMessage]] does not publish anything, and continues in the stream as [[akka.kafka.ProducerMessage.PassThroughResult PassThroughResult]]
*
* The messages support the possibility to pass through arbitrary data, which can for example be a [[ConsumerMessage.CommittableOffset CommittableOffset]]
* or [[ConsumerMessage.CommittableOffsetBatch CommittableOffsetBatch]] that can
* be committed later in the flow.
*
* Supports sharing a Kafka Producer instance.
*
* @deprecated Pass in external or shared producer using `ProducerSettings.withProducerFactory` or `ProducerSettings.withProducer`, since 2.0.0
*/
@Deprecated
def flexiFlow[K, V, PassThrough](
settings: ProducerSettings[K, V],
producer: org.apache.kafka.clients.producer.Producer[K, V]
): Flow[Envelope[K, V, PassThrough], Results[K, V, PassThrough], NotUsed] =
flexiFlow(settings.withProducer(producer))
/**
* API MAY CHANGE
*
* Create a flow to conditionally publish records to Kafka topics and then pass it on.
*
* It publishes records to Kafka topics conditionally:
*
* - [[akka.kafka.ProducerMessage.Message Message]] publishes a single message to its topic, and continues in the stream as [[akka.kafka.ProducerMessage.Result Result]]
*
* - [[akka.kafka.ProducerMessage.MultiMessage MultiMessage]] publishes all messages in its `records` field, and continues in the stream as [[akka.kafka.ProducerMessage.MultiResult MultiResult]]
*
* - [[akka.kafka.ProducerMessage.PassThroughMessage PassThroughMessage]] does not publish anything, and continues in the stream as [[akka.kafka.ProducerMessage.PassThroughResult PassThroughResult]]
*
* This flow is intended to be used with Akka's [flow with context](https://doc.akka.io/docs/akka/current/stream/operators/Flow/asFlowWithContext.html).
*
* Supports sharing a Kafka Producer instance.
*
* @tparam C the flow context type
*
* @deprecated Pass in external or shared producer using `ProducerSettings.withProducerFactory` or `ProducerSettings.withProducer`, since 2.0.0
*/
@Deprecated
@ApiMayChange(issue = "https://github.com/akka/alpakka-kafka/issues/880")
def flowWithContext[K, V, C](
settings: ProducerSettings[K, V],
producer: org.apache.kafka.clients.producer.Producer[K, V]
): FlowWithContext[Envelope[K, V, NotUsed], C, Results[K, V, C], C, NotUsed] =
flowWithContext(settings.withProducer(producer))
}
| softwaremill/reactive-kafka | core/src/main/scala/akka/kafka/javadsl/Producer.scala | Scala | apache-2.0 | 14,465 |
/*
* Copyright 2014 Databricks
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.databricks.spark
import org.apache.spark.sql.{DataFrame, DataFrameReader, DataFrameWriter}
package object avro {
/**
* Adds a method, `avro`, to DataFrameWriter that allows you to write avro files using
* the DataFileWriter
*/
implicit class AvroDataFrameWriter[T](writer: DataFrameWriter[T]) {
def avro: String => Unit = writer.format("com.databricks.spark.avro").save
}
/**
* Adds a method, `avro`, to DataFrameReader that allows you to read avro files using
* the DataFileReade
*/
implicit class AvroDataFrameReader(reader: DataFrameReader) {
def avro: String => DataFrame = reader.format("com.databricks.spark.avro").load
}
}
| CrazyJacky/spark-avro | src/main/scala/com/databricks/spark/avro/package.scala | Scala | apache-2.0 | 1,277 |
/*
* Copyright 2015 Heiko Seeberger
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package de.heikoseeberger.akkasse
package scaladsl
package model
import akka.util.ByteString
import java.nio.charset.StandardCharsets.UTF_8
import scala.annotation.tailrec
import scala.compat.java8.OptionConverters.RichOptionForJava8
object ServerSentEvent {
/**
* A [[ServerSentEvent]] with empty data which can be used as a heartbeat.
*/
val heartbeat: ServerSentEvent =
ServerSentEvent("")
/**
* Creates a [[ServerSentEvent]].
*
* @param data data, may span multiple lines
* @param type type, must not contain \\n or \\r
*/
def apply(data: String, `type`: String): ServerSentEvent =
new ServerSentEvent(data, Some(`type`))
/**
* Creates a [[ServerSentEvent]].
*
* @param data data, may span multiple lines
* @param type type, must not contain \\n or \\r
* @param id id, must not contain \\n or \\r
*/
def apply(data: String, `type`: String, id: String): ServerSentEvent =
new ServerSentEvent(data, Some(`type`), Some(id))
/**
* Creates a [[ServerSentEvent]].
*
* @param data data, may span multiple lines
* @param retry reconnection delay in milliseconds
*/
def apply(data: String, retry: Int): ServerSentEvent =
new ServerSentEvent(data, retry = Some(retry))
private def noNewLine(s: String) = s.forall(c => c != '\\n' && c != '\\r')
// Public domain algorithm: http://graphics.stanford.edu/~seander/bithacks.html#RoundUpPowerOf2.
// We want powers of two both because they typically work better with the allocator, and because we want to minimize
// reallocations/buffer growth.
private def nextPowerOfTwoBiggerThan(n: Int) = {
var m = n - 1
m |= m >> 1
m |= m >> 2
m |= m >> 4
m |= m >> 8
m |= m >> 16
m + 1
}
}
/**
* Representation of a server-sent event. According to the specification, an empty data field designates an event
* which is to be ignored which is useful for heartbeats.
*
* @param data data, may span multiple lines
* @param type optional type, must not contain \\n or \\r
* @param id optional id, must not contain \\n or \\r
* @param retry optional reconnection delay in milliseconds
*/
final case class ServerSentEvent(data: String,
`type`: Option[String] = None,
id: Option[String] = None,
retry: Option[Int] = None)
extends javadsl.model.ServerSentEvent {
import ServerSentEvent._
require(`type`.forall(noNewLine), "type must not contain \\\\n or \\\\r!")
require(id.forall(noNewLine), "id must not contain \\\\n or \\\\r!")
require(retry.forall(_ > 0), "retry must be a positive number!")
private[scaladsl] def encode = {
def s = { // Performance fun fact: change this to val and get an ~30% performance penalty!!!
// Why 8? "data:" == 5 + \\n\\n (1 data (at least) and 1 ending) == 2 and then we add 1 extra to allocate
// a bigger memory slab than data.length since we're going to add data ("data:" + "\\n") per line
// Why 7? "event:" + \\n == 7 chars
// Why 4? "id:" + \\n == 4 chars
// Why 17? "retry:" + \\n + Integer.Max decimal places
val builder =
new StringBuilder(
nextPowerOfTwoBiggerThan(
8 + data.length + `type`.fold(0)(_.length + 7) + id.fold(0)(_.length + 4) + retry.fold(0)(_ => 17)
)
)
@tailrec def appendData(s: String, index: Int = 0): Unit = {
@tailrec def addLine(index: Int): Int =
if (index >= s.length)
-1
else {
val c = s.charAt(index)
builder.append(c)
if (c == '\\n') index + 1 else addLine(index + 1)
}
builder.append("data:")
addLine(index) match {
case -1 => builder.append('\\n')
case i => appendData(s, i)
}
}
appendData(data)
if (`type`.isDefined && `type`.get.nonEmpty) builder.append("event:").append(`type`.get).append('\\n')
if (id.isDefined) builder.append("id:").append(id.get).append('\\n')
if (retry.isDefined) builder.append("retry:").append(retry.get).append('\\n')
builder.append('\\n').toString
}
ByteString(s, UTF_8.name)
}
override def getData = data
override def getType = `type`.asJava
override def getId = id.asJava
override def getRetry = retry.asPrimitive
}
| hseeberger/akka-sse | core/src/main/scala/de/heikoseeberger/akkasse/scaladsl/model/ServerSentEvent.scala | Scala | apache-2.0 | 4,981 |
package dispatch.oauth
import dispatch._
import org.asynchttpclient.oauth._
class SigningVerbs(val subject: Req) extends RequestVerbs {
val emptyToken = new RequestToken(null, "")
def sign(consumer: ConsumerKey, token: RequestToken = emptyToken) = {
val calc = new OAuthSignatureCalculator(consumer, token)
subject underlying { r =>
calc.calculateAndAddSignature(r.build, r)
r
}
}
def <@(consumer: ConsumerKey, token: RequestToken = emptyToken) =
sign(consumer, token)
}
| dispatch/reboot | core/src/main/scala/oauth/requests.scala | Scala | lgpl-3.0 | 512 |
/*
* Copyright (C) Lightbend Inc. <https://www.lightbend.com>
*/
package play.api.mvc
import java.net.URLEncoder
import org.specs2.specification.core.Fragments
import play.api.http.FlashConfiguration
import play.api.http.SecretConfiguration
import play.api.libs.crypto.CookieSignerProvider
class FlashCookieSpec extends org.specs2.mutable.Specification {
"Flash cookies" should {
"bake in a header and value" in {
val es = flash.encode(Map("a" -> "b"))
val m = flash.decode(es)
(m must haveSize(1)).and {
m.get("a") must beSome("b")
}
}
"bake in multiple headers and values" in {
val es = flash.encode(Map("a" -> "b", "c" -> "d"))
val m = flash.decode(es)
(m must haveSize(2)).and {
m.get("a") must beSome("b")
m.get("c") must beSome("d")
}
}
"bake in a header an empty value" in {
val es = flash.encode(Map("a" -> ""))
val m = flash.decode(es)
m must haveSize(1)
m.get("a") must beSome("")
}
"bake in a header a Unicode value" in {
val es = flash.encode(Map("a" -> "\\u0000"))
val m = flash.decode(es)
m must haveSize(1)
m.get("a") must beSome("\\u0000")
}
"bake in an empty map" in {
val es = flash.encode(Map.empty)
val m = flash.decode(es)
m must beEmpty
}
"encode values such that no extra keys can be created" in {
val es = flash.encode(Map("a" -> "b&c=d"))
val m = flash.decode(es)
m must haveSize(1)
m.get("a") must beSome("b&c=d")
}
"specifically exclude control chars" in {
for (i <- 0 until 32) {
val s = Character.toChars(i).toString
val es = flash.encode(Map("a" -> s))
es must not contain s
val m = flash.decode(es)
m must haveSize(1)
m.get("a") must beSome(s)
}
success
}
"specifically exclude special cookie chars" in {
val es = flash.encode(Map("a" -> " \\",;\\\\"))
es must not contain " "
es must not contain "\\""
es must not contain ","
es must not contain ";"
es must not contain "\\\\"
val m = flash.decode(es)
m must haveSize(1)
m.get("a") must beSome(" \\",;\\\\")
}
"decode values of the previously supported format" in {
val es = oldEncoder(Map("a" -> "b", "c" -> "d"))
flash.decode(es) must beEmpty
}
"decode values of the previously supported format with the new delimiters in them" in {
val es = oldEncoder(Map("a" -> "b&="))
flash.decode(es) must beEmpty
}
"decode values with gibberish in them" in {
flash.decode("asfjdlkasjdflk") must beEmpty
}
"put disallows null values" in {
val c = Flash(Map("foo" -> "bar"))
c + (("x", null)) must throwA(
new IllegalArgumentException("requirement failed: Flash value for x cannot be null")
)
}
"be insecure by default" in {
flash.encodeAsCookie(Flash()).secure must beFalse
}
"decode pair with value including '='" in {
flash.decode("a=foo=bar&b=lorem") must_== Map(
"a" -> "foo=bar",
"b" -> "lorem"
)
}
}
// ---
def oldEncoder(data: Map[String, String]): String = {
URLEncoder.encode(
data.map(d => d._1 + ":" + d._2).mkString("\\u0000"),
"UTF-8"
)
}
def flash: FlashCookieBaker = {
val secretConfiguration =
SecretConfiguration(secret = "vQU@MgnjTohP?w>jpu?X0oqvmz21o[AHP;/rPj?CB><YMFcl?xXfq]6o>1QuNcXU")
new DefaultFlashCookieBaker(
FlashConfiguration(),
secretConfiguration,
new CookieSignerProvider(secretConfiguration).get
)
}
}
| mkurz/playframework | core/play/src/test/scala/play/api/mvc/FlashCookieSpec.scala | Scala | apache-2.0 | 3,690 |
package models
import org.specs2.mutable._
import play.api.test._
import play.api.test.Helpers._
class RegionHashSpec extends Specification {
"RegionHashSpec" should {
"provide a method: #byName" >> {
"should return correct Regionhash" >> {
running(FakeApplication(additionalConfiguration = inMemoryDatabase())) {
val regionHash = RegionHash.byName("a")
regionHash.name must equalTo("a")
regionHash.hash must equalTo(RegionHash.hash("a"))
}
}
"should return not be equal to another hash" >> {
running(FakeApplication(additionalConfiguration = inMemoryDatabase())) {
val regionHashA = RegionHash.byName("a")
val regionHashB = RegionHash.byName("b")
regionHashA.name mustNotEqual(regionHashB)
}
}
}
"provide a method: #byHash" >> {
"should return -unkown-" >> {
running(FakeApplication(additionalConfiguration = inMemoryDatabase())) {
val regionHash = RegionHash.byHash(RegionHash.hash("c"))
regionHash.name must equalTo("-unknown-")
}
}
"should return c" >> {
running(FakeApplication(additionalConfiguration = inMemoryDatabase())) {
val md5Sum = RegionHash.hash("c")
RegionHash.byHash(md5Sum) // this should not be cached
RegionHash.byName("c") // this should be cached
val regionHash = RegionHash.byHash(md5Sum)
regionHash.name must equalTo("c")
}
}
}
}
} | Connexity/hannibal | test/models/RegionHashSpec.scala | Scala | apache-2.0 | 1,524 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.orc
import java.io.File
import java.nio.charset.StandardCharsets
import java.sql.Timestamp
import java.time.{LocalDateTime, ZoneOffset}
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.apache.hadoop.mapreduce.{JobID, TaskAttemptID, TaskID, TaskType}
import org.apache.hadoop.mapreduce.lib.input.FileSplit
import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl
import org.apache.orc.{OrcConf, OrcFile}
import org.apache.orc.OrcConf.COMPRESS
import org.apache.orc.mapred.OrcStruct
import org.apache.orc.mapreduce.OrcInputFormat
import org.apache.spark.{SparkConf, SparkException}
import org.apache.spark.sql._
import org.apache.spark.sql.catalyst.TableIdentifier
import org.apache.spark.sql.execution.FileSourceScanExec
import org.apache.spark.sql.execution.datasources.{HadoopFsRelation, LogicalRelation, RecordReaderIterator}
import org.apache.spark.sql.execution.datasources.v2.BatchScanExec
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.test.SharedSparkSession
import org.apache.spark.sql.types._
import org.apache.spark.util.Utils
case class AllDataTypesWithNonPrimitiveType(
stringField: String,
intField: Int,
longField: Long,
floatField: Float,
doubleField: Double,
shortField: Short,
byteField: Byte,
booleanField: Boolean,
array: Seq[Int],
arrayContainsNull: Seq[Option[Int]],
map: Map[Int, Long],
mapValueContainsNull: Map[Int, Option[Long]],
data: (Seq[Int], (Int, String)))
case class BinaryData(binaryData: Array[Byte])
case class Contact(name: String, phone: String)
case class Person(name: String, age: Int, contacts: Seq[Contact])
abstract class OrcQueryTest extends OrcTest {
import testImplicits._
test("Read/write All Types") {
val data = (0 to 255).map { i =>
(s"$i", i, i.toLong, i.toFloat, i.toDouble, i.toShort, i.toByte, i % 2 == 0)
}
withOrcFile(data) { file =>
checkAnswer(
spark.read.orc(file),
data.toDF().collect())
}
}
test("Read/write binary data") {
withOrcFile(BinaryData("test".getBytes(StandardCharsets.UTF_8)) :: Nil) { file =>
val bytes = spark.read.orc(file).head().getAs[Array[Byte]](0)
assert(new String(bytes, StandardCharsets.UTF_8) === "test")
}
}
test("Read/write all types with non-primitive type") {
val data: Seq[AllDataTypesWithNonPrimitiveType] = (0 to 255).map { i =>
AllDataTypesWithNonPrimitiveType(
s"$i", i, i.toLong, i.toFloat, i.toDouble, i.toShort, i.toByte, i % 2 == 0,
0 until i,
(0 until i).map(Option(_).filter(_ % 3 == 0)),
(0 until i).map(i => i -> i.toLong).toMap,
(0 until i).map(i => i -> Option(i.toLong)).toMap + (i -> None),
(0 until i, (i, s"$i")))
}
withOrcFile(data) { file =>
checkAnswer(
spark.read.orc(file),
data.toDF().collect())
}
}
test("Read/write UserDefinedType") {
withTempPath { path =>
val data = Seq((1, new TestUDT.MyDenseVector(Array(0.25, 2.25, 4.25))))
val udtDF = data.toDF("id", "vectors")
udtDF.write.orc(path.getAbsolutePath)
val readBack = spark.read.schema(udtDF.schema).orc(path.getAbsolutePath)
checkAnswer(udtDF, readBack)
}
}
test("Creating case class RDD table") {
val data = (1 to 100).map(i => (i, s"val_$i"))
sparkContext.parallelize(data).toDF().createOrReplaceTempView("t")
withTempView("t") {
checkAnswer(sql("SELECT * FROM t"), data.toDF().collect())
}
}
test("Simple selection form ORC table") {
val data = (1 to 10).map { i =>
Person(s"name_$i", i, (0 to 1).map { m => Contact(s"contact_$m", s"phone_$m") })
}
withOrcTable(data, "t") {
// ppd:
// leaf-0 = (LESS_THAN_EQUALS age 5)
// expr = leaf-0
assert(sql("SELECT name FROM t WHERE age <= 5").count() === 5)
// ppd:
// leaf-0 = (LESS_THAN_EQUALS age 5)
// expr = (not leaf-0)
assertResult(10) {
sql("SELECT name, contacts FROM t where age > 5")
.rdd
.flatMap(_.getAs[scala.collection.Seq[_]]("contacts"))
.count()
}
// ppd:
// leaf-0 = (LESS_THAN_EQUALS age 5)
// leaf-1 = (LESS_THAN age 8)
// expr = (and (not leaf-0) leaf-1)
{
val df = sql("SELECT name, contacts FROM t WHERE age > 5 AND age < 8")
assert(df.count() === 2)
assertResult(4) {
df.rdd.flatMap(_.getAs[scala.collection.Seq[_]]("contacts")).count()
}
}
// ppd:
// leaf-0 = (LESS_THAN age 2)
// leaf-1 = (LESS_THAN_EQUALS age 8)
// expr = (or leaf-0 (not leaf-1))
{
val df = sql("SELECT name, contacts FROM t WHERE age < 2 OR age > 8")
assert(df.count() === 3)
assertResult(6) {
df.rdd.flatMap(_.getAs[scala.collection.Seq[_]]("contacts")).count()
}
}
}
}
test("save and load case class RDD with `None`s as orc") {
val data = (
Option.empty[Int],
Option.empty[Long],
Option.empty[Float],
Option.empty[Double],
Option.empty[Boolean]
) :: Nil
withOrcFile(data) { file =>
checkAnswer(
spark.read.orc(file),
Row(Seq.fill(5)(null): _*))
}
}
test("SPARK-16610: Respect orc.compress (i.e., OrcConf.COMPRESS) when compression is unset") {
// Respect `orc.compress` (i.e., OrcConf.COMPRESS).
withTempPath { file =>
spark.range(0, 10).write
.option(COMPRESS.getAttribute, "ZLIB")
.orc(file.getCanonicalPath)
val maybeOrcFile = file.listFiles().find(_.getName.endsWith(".zlib.orc"))
assert(maybeOrcFile.isDefined)
val orcFilePath = new Path(maybeOrcFile.get.getAbsolutePath)
val conf = OrcFile.readerOptions(new Configuration())
Utils.tryWithResource(OrcFile.createReader(orcFilePath, conf)) { reader =>
assert("ZLIB" === reader.getCompressionKind.name)
}
}
// `compression` overrides `orc.compress`.
withTempPath { file =>
spark.range(0, 10).write
.option("compression", "ZLIB")
.option(COMPRESS.getAttribute, "SNAPPY")
.orc(file.getCanonicalPath)
val maybeOrcFile = file.listFiles().find(_.getName.endsWith(".zlib.orc"))
assert(maybeOrcFile.isDefined)
val orcFilePath = new Path(maybeOrcFile.get.getAbsolutePath)
val conf = OrcFile.readerOptions(new Configuration())
Utils.tryWithResource(OrcFile.createReader(orcFilePath, conf)) { reader =>
assert("ZLIB" === reader.getCompressionKind.name)
}
}
}
test("Compression options for writing to an ORC file (SNAPPY, ZLIB and NONE)") {
withTempPath { file =>
spark.range(0, 10).write
.option("compression", "ZLIB")
.orc(file.getCanonicalPath)
val maybeOrcFile = file.listFiles().find(_.getName.endsWith(".zlib.orc"))
assert(maybeOrcFile.isDefined)
val orcFilePath = new Path(maybeOrcFile.get.getAbsolutePath)
val conf = OrcFile.readerOptions(new Configuration())
Utils.tryWithResource(OrcFile.createReader(orcFilePath, conf)) { reader =>
assert("ZLIB" === reader.getCompressionKind.name)
}
}
withTempPath { file =>
spark.range(0, 10).write
.option("compression", "SNAPPY")
.orc(file.getCanonicalPath)
val maybeOrcFile = file.listFiles().find(_.getName.endsWith(".snappy.orc"))
assert(maybeOrcFile.isDefined)
val orcFilePath = new Path(maybeOrcFile.get.getAbsolutePath)
val conf = OrcFile.readerOptions(new Configuration())
Utils.tryWithResource(OrcFile.createReader(orcFilePath, conf)) { reader =>
assert("SNAPPY" === reader.getCompressionKind.name)
}
}
withTempPath { file =>
spark.range(0, 10).write
.option("compression", "NONE")
.orc(file.getCanonicalPath)
val maybeOrcFile = file.listFiles().find(_.getName.endsWith(".orc"))
assert(maybeOrcFile.isDefined)
val orcFilePath = new Path(maybeOrcFile.get.getAbsolutePath)
val conf = OrcFile.readerOptions(new Configuration())
Utils.tryWithResource(OrcFile.createReader(orcFilePath, conf)) { reader =>
assert("NONE" === reader.getCompressionKind.name)
}
}
}
test("simple select queries") {
withOrcTable((0 until 10).map(i => (i, i.toString)), "t") {
checkAnswer(
sql("SELECT `_1` FROM t where t.`_1` > 5"),
(6 until 10).map(Row.apply(_)))
checkAnswer(
sql("SELECT `_1` FROM t as tmp where tmp.`_1` < 5"),
(0 until 5).map(Row.apply(_)))
}
}
test("appending") {
val data = (0 until 10).map(i => (i, i.toString))
spark.createDataFrame(data).toDF("c1", "c2").createOrReplaceTempView("tmp")
withOrcFile(data) { file =>
withTempView("t") {
spark.read.orc(file).createOrReplaceTempView("t")
checkAnswer(spark.table("t"), data.map(Row.fromTuple))
sql("INSERT INTO TABLE t SELECT * FROM tmp")
checkAnswer(spark.table("t"), (data ++ data).map(Row.fromTuple))
}
}
spark.sessionState.catalog.dropTable(
TableIdentifier("tmp"),
ignoreIfNotExists = true,
purge = false)
}
test("overwriting") {
val data = (0 until 10).map(i => (i, i.toString))
spark.createDataFrame(data).toDF("c1", "c2").createOrReplaceTempView("tmp")
withOrcTable(data, "t") {
sql("INSERT OVERWRITE TABLE t SELECT * FROM tmp")
checkAnswer(spark.table("t"), data.map(Row.fromTuple))
}
spark.sessionState.catalog.dropTable(
TableIdentifier("tmp"),
ignoreIfNotExists = true,
purge = false)
}
test("self-join") {
// 4 rows, cells of column 1 of row 2 and row 4 are null
val data = (1 to 4).map { i =>
val maybeInt = if (i % 2 == 0) None else Some(i)
(maybeInt, i.toString)
}
withOrcTable(data, "t") {
val selfJoin = sql("SELECT * FROM t x JOIN t y WHERE x.`_1` = y.`_1`")
val queryOutput = selfJoin.queryExecution.analyzed.output
assertResult(4, "Field count mismatches")(queryOutput.size)
assertResult(2, s"Duplicated expression ID in query plan:\\n $selfJoin") {
queryOutput.filter(_.name == "_1").map(_.exprId).size
}
checkAnswer(selfJoin, List(Row(1, "1", 1, "1"), Row(3, "3", 3, "3")))
}
}
test("nested data - struct with array field") {
val data = (1 to 10).map(i => Tuple1((i, Seq(s"val_$i"))))
withOrcTable(data, "t") {
checkAnswer(sql("SELECT `_1`.`_2`[0] FROM t"), data.map {
case Tuple1((_, Seq(string))) => Row(string)
})
}
}
test("nested data - array of struct") {
val data = (1 to 10).map(i => Tuple1(Seq(i -> s"val_$i")))
withOrcTable(data, "t") {
checkAnswer(sql("SELECT `_1`[0].`_2` FROM t"), data.map {
case Tuple1(Seq((_, string))) => Row(string)
})
}
}
test("columns only referenced by pushed down filters should remain") {
withOrcTable((1 to 10).map(Tuple1.apply), "t") {
checkAnswer(sql("SELECT `_1` FROM t WHERE `_1` < 10"), (1 to 9).map(Row.apply(_)))
}
}
test("SPARK-5309 strings stored using dictionary compression in orc") {
withOrcTable((0 until 1000).map(i => ("same", "run_" + i / 100, 1)), "t") {
checkAnswer(
sql("SELECT `_1`, `_2`, SUM(`_3`) FROM t GROUP BY `_1`, `_2`"),
(0 until 10).map(i => Row("same", "run_" + i, 100)))
checkAnswer(
sql("SELECT `_1`, `_2`, SUM(`_3`) FROM t WHERE `_2` = 'run_5' GROUP BY `_1`, `_2`"),
List(Row("same", "run_5", 100)))
}
}
test("SPARK-9170: Don't implicitly lowercase of user-provided columns") {
withTempPath { dir =>
val path = dir.getCanonicalPath
spark.range(0, 10).select('id as "Acol").write.orc(path)
spark.read.orc(path).schema("Acol")
intercept[IllegalArgumentException] {
spark.read.orc(path).schema("acol")
}
checkAnswer(spark.read.orc(path).select("acol").sort("acol"),
(0 until 10).map(Row(_)))
}
}
test("SPARK-10623 Enable ORC PPD") {
withTempPath { dir =>
withSQLConf(SQLConf.ORC_FILTER_PUSHDOWN_ENABLED.key -> "true") {
import testImplicits._
val path = dir.getCanonicalPath
// For field "a", the first column has odds integers. This is to check the filtered count
// when `isNull` is performed. For Field "b", `isNotNull` of ORC file filters rows
// only when all the values are null (maybe this works differently when the data
// or query is complicated). So, simply here a column only having `null` is added.
val data = (0 until 10).map { i =>
val maybeInt = if (i % 2 == 0) None else Some(i)
val nullValue: Option[String] = None
(maybeInt, nullValue)
}
// It needs to repartition data so that we can have several ORC files
// in order to skip stripes in ORC.
spark.createDataFrame(data).toDF("a", "b").repartition(10).write.orc(path)
val df = spark.read.orc(path)
def checkPredicate(pred: Column, answer: Seq[Row]): Unit = {
val sourceDf = stripSparkFilter(df.where(pred))
val data = sourceDf.collect().toSet
val expectedData = answer.toSet
// When a filter is pushed to ORC, ORC can apply it to rows. So, we can check
// the number of rows returned from the ORC to make sure our filter pushdown work.
// A tricky part is, ORC does not process filter rows fully but return some possible
// results. So, this checks if the number of result is less than the original count
// of data, and then checks if it contains the expected data.
assert(
sourceDf.count < 10 && expectedData.subsetOf(data),
s"No data was filtered for predicate: $pred")
}
checkPredicate('a === 5, List(5).map(Row(_, null)))
checkPredicate('a <=> 5, List(5).map(Row(_, null)))
checkPredicate('a < 5, List(1, 3).map(Row(_, null)))
checkPredicate('a <= 5, List(1, 3, 5).map(Row(_, null)))
checkPredicate('a > 5, List(7, 9).map(Row(_, null)))
checkPredicate('a >= 5, List(5, 7, 9).map(Row(_, null)))
checkPredicate('a.isNull, List(null).map(Row(_, null)))
checkPredicate('b.isNotNull, List())
checkPredicate('a.isin(3, 5, 7), List(3, 5, 7).map(Row(_, null)))
checkPredicate('a > 0 && 'a < 3, List(1).map(Row(_, null)))
checkPredicate('a < 1 || 'a > 8, List(9).map(Row(_, null)))
checkPredicate(!('a > 3), List(1, 3).map(Row(_, null)))
checkPredicate(!('a > 0 && 'a < 3), List(3, 5, 7, 9).map(Row(_, null)))
}
}
}
test("SPARK-14962 Produce correct results on array type with isnotnull") {
withSQLConf(SQLConf.ORC_FILTER_PUSHDOWN_ENABLED.key -> "true") {
val data = (0 until 10).map(i => Tuple1(Array(i)))
withOrcFile(data) { file =>
val actual = spark
.read
.orc(file)
.where("_1 is not null")
val expected = data.toDF()
checkAnswer(actual, expected)
}
}
}
test("SPARK-15198 Support for pushing down filters for boolean types") {
withSQLConf(SQLConf.ORC_FILTER_PUSHDOWN_ENABLED.key -> "true") {
val data = (0 until 10).map(_ => (true, false))
withOrcFile(data) { file =>
val df = spark.read.orc(file).where("_2 == true")
val actual = stripSparkFilter(df).count()
// ORC filter should be applied and the total count should be 0.
assert(actual === 0)
}
}
}
test("Support for pushing down filters for decimal types") {
withSQLConf(SQLConf.ORC_FILTER_PUSHDOWN_ENABLED.key -> "true") {
val data = (0 until 10).map(i => Tuple1(BigDecimal.valueOf(i)))
checkPredicatePushDown(spark.createDataFrame(data).toDF("a"), 10, "a == 2")
}
}
test("Support for pushing down filters for timestamp types") {
withSQLConf(SQLConf.ORC_FILTER_PUSHDOWN_ENABLED.key -> "true") {
val timeString = "2015-08-20 14:57:00"
val data = (0 until 10).map { i =>
val milliseconds = Timestamp.valueOf(timeString).getTime + i * 3600
Tuple1(new Timestamp(milliseconds))
}
checkPredicatePushDown(spark.createDataFrame(data).toDF("a"), 10, s"a == '$timeString'")
}
}
test("column nullability and comment - write and then read") {
val schema = (new StructType)
.add("cl1", IntegerType, nullable = false, comment = "test")
.add("cl2", IntegerType, nullable = true)
.add("cl3", IntegerType, nullable = true)
val row = Row(3, null, 4)
val df = spark.createDataFrame(sparkContext.parallelize(row :: Nil), schema)
val tableName = "tab"
withTable(tableName) {
df.write.format("orc").mode("overwrite").saveAsTable(tableName)
// Verify the DDL command result: DESCRIBE TABLE
checkAnswer(
sql(s"desc $tableName").select("col_name", "comment").where($"comment" === "test"),
Row("cl1", "test") :: Nil)
// Verify the schema
val expectedFields = schema.fields.map(f => f.copy(nullable = true))
assert(spark.table(tableName).schema == schema.copy(fields = expectedFields))
}
}
test("Empty schema does not read data from ORC file") {
val data = Seq((1, 1), (2, 2))
withOrcFile(data) { path =>
val conf = new Configuration()
conf.set(OrcConf.INCLUDE_COLUMNS.getAttribute, "")
conf.setBoolean("hive.io.file.read.all.columns", false)
val orcRecordReader = {
val file = new File(path).listFiles().find(_.getName.endsWith(".snappy.orc")).head
val split = new FileSplit(new Path(file.toURI), 0, file.length, Array.empty[String])
val attemptId = new TaskAttemptID(new TaskID(new JobID(), TaskType.MAP, 0), 0)
val hadoopAttemptContext = new TaskAttemptContextImpl(conf, attemptId)
val oif = new OrcInputFormat[OrcStruct]
oif.createRecordReader(split, hadoopAttemptContext)
}
val recordsIterator = new RecordReaderIterator[OrcStruct](orcRecordReader)
try {
assert(recordsIterator.next().toString == "{null, null}")
} finally {
recordsIterator.close()
}
}
}
test("read from multiple orc input paths") {
val path1 = Utils.createTempDir()
val path2 = Utils.createTempDir()
makeOrcFile((1 to 10).map(Tuple1.apply), path1)
makeOrcFile((1 to 10).map(Tuple1.apply), path2)
val df = spark.read.orc(path1.getCanonicalPath, path2.getCanonicalPath)
assert(df.count() == 20)
}
test("Enabling/disabling ignoreCorruptFiles") {
def testIgnoreCorruptFiles(): Unit = {
withTempDir { dir =>
val basePath = dir.getCanonicalPath
spark.range(1).toDF("a").write.orc(new Path(basePath, "first").toString)
spark.range(1, 2).toDF("a").write.orc(new Path(basePath, "second").toString)
spark.range(2, 3).toDF("a").write.json(new Path(basePath, "third").toString)
val df = spark.read.orc(
new Path(basePath, "first").toString,
new Path(basePath, "second").toString,
new Path(basePath, "third").toString)
checkAnswer(df, Seq(Row(0), Row(1)))
}
}
def testIgnoreCorruptFilesWithoutSchemaInfer(): Unit = {
withTempDir { dir =>
val basePath = dir.getCanonicalPath
spark.range(1).toDF("a").write.orc(new Path(basePath, "first").toString)
spark.range(1, 2).toDF("a").write.orc(new Path(basePath, "second").toString)
spark.range(2, 3).toDF("a").write.json(new Path(basePath, "third").toString)
val df = spark.read.schema("a long").orc(
new Path(basePath, "first").toString,
new Path(basePath, "second").toString,
new Path(basePath, "third").toString)
checkAnswer(df, Seq(Row(0), Row(1)))
}
}
def testAllCorruptFiles(): Unit = {
withTempDir { dir =>
val basePath = dir.getCanonicalPath
spark.range(1).toDF("a").write.json(new Path(basePath, "first").toString)
spark.range(1, 2).toDF("a").write.json(new Path(basePath, "second").toString)
val df = spark.read.orc(
new Path(basePath, "first").toString,
new Path(basePath, "second").toString)
assert(df.count() == 0)
}
}
def testAllCorruptFilesWithoutSchemaInfer(): Unit = {
withTempDir { dir =>
val basePath = dir.getCanonicalPath
spark.range(1).toDF("a").write.json(new Path(basePath, "first").toString)
spark.range(1, 2).toDF("a").write.json(new Path(basePath, "second").toString)
val df = spark.read.schema("a long").orc(
new Path(basePath, "first").toString,
new Path(basePath, "second").toString)
assert(df.count() == 0)
}
}
withSQLConf(SQLConf.IGNORE_CORRUPT_FILES.key -> "true") {
testIgnoreCorruptFiles()
testIgnoreCorruptFilesWithoutSchemaInfer()
val m1 = intercept[AnalysisException] {
testAllCorruptFiles()
}.getMessage
assert(m1.contains("Unable to infer schema for ORC"))
testAllCorruptFilesWithoutSchemaInfer()
}
withSQLConf(SQLConf.IGNORE_CORRUPT_FILES.key -> "false") {
val e1 = intercept[SparkException] {
testIgnoreCorruptFiles()
}
assert(e1.getMessage.contains("Malformed ORC file"))
val e2 = intercept[SparkException] {
testIgnoreCorruptFilesWithoutSchemaInfer()
}
assert(e2.getMessage.contains("Malformed ORC file"))
val e3 = intercept[SparkException] {
testAllCorruptFiles()
}
assert(e3.getMessage.contains("Could not read footer for file"))
val e4 = intercept[SparkException] {
testAllCorruptFilesWithoutSchemaInfer()
}
assert(e4.getMessage.contains("Malformed ORC file"))
}
}
test("SPARK-27160 Predicate pushdown correctness on DecimalType for ORC") {
withTempPath { dir =>
withSQLConf(SQLConf.ORC_FILTER_PUSHDOWN_ENABLED.key -> "true") {
val path = dir.getCanonicalPath
Seq(BigDecimal(0.1), BigDecimal(0.2), BigDecimal(-0.3))
.toDF("x").write.orc(path)
val df = spark.read.orc(path)
checkAnswer(df.filter("x >= 0.1"), Seq(Row(0.1), Row(0.2)))
checkAnswer(df.filter("x > 0.1"), Seq(Row(0.2)))
checkAnswer(df.filter("x <= 0.15"), Seq(Row(0.1), Row(-0.3)))
checkAnswer(df.filter("x < 0.1"), Seq(Row(-0.3)))
checkAnswer(df.filter("x == 0.2"), Seq(Row(0.2)))
}
}
}
}
abstract class OrcQuerySuite extends OrcQueryTest with SharedSparkSession {
import testImplicits._
test("LZO compression options for writing to an ORC file") {
withTempPath { file =>
spark.range(0, 10).write
.option("compression", "LZO")
.orc(file.getCanonicalPath)
val maybeOrcFile = file.listFiles().find(_.getName.endsWith(".lzo.orc"))
assert(maybeOrcFile.isDefined)
val orcFilePath = new Path(maybeOrcFile.get.getAbsolutePath)
val conf = OrcFile.readerOptions(new Configuration())
Utils.tryWithResource(OrcFile.createReader(orcFilePath, conf)) { reader =>
assert("LZO" === reader.getCompressionKind.name)
}
}
}
test("Schema discovery on empty ORC files") {
// SPARK-8501 is fixed.
withTempPath { dir =>
val path = dir.getCanonicalPath
withTable("empty_orc") {
withTempView("empty", "single") {
spark.sql(
s"""CREATE TABLE empty_orc(key INT, value STRING)
|USING ORC
|LOCATION '${dir.toURI}'
""".stripMargin)
val emptyDF = Seq.empty[(Int, String)].toDF("key", "value").coalesce(1)
emptyDF.createOrReplaceTempView("empty")
// This creates 1 empty ORC file with ORC SerDe. We are using this trick because
// Spark SQL ORC data source always avoids write empty ORC files.
spark.sql(
s"""INSERT INTO TABLE empty_orc
|SELECT key, value FROM empty
""".stripMargin)
val df = spark.read.orc(path)
assert(df.schema === emptyDF.schema.asNullable)
checkAnswer(df, emptyDF)
}
}
}
}
test("SPARK-21791 ORC should support column names with dot") {
withTempDir { dir =>
val path = new File(dir, "orc").getCanonicalPath
Seq(Some(1), None).toDF("col.dots").write.orc(path)
assert(spark.read.orc(path).collect().length == 2)
}
}
test("SPARK-25579 ORC PPD should support column names with dot") {
withSQLConf(SQLConf.ORC_FILTER_PUSHDOWN_ENABLED.key -> "true") {
checkPredicatePushDown(spark.range(10).toDF("col.dot"), 10, "`col.dot` == 2")
}
}
test("SPARK-20728 Make ORCFileFormat configurable between sql/hive and sql/core") {
withSQLConf(SQLConf.ORC_IMPLEMENTATION.key -> "hive") {
val e = intercept[AnalysisException] {
sql("CREATE TABLE spark_20728(a INT) USING ORC")
}
assert(e.message.contains("Hive built-in ORC data source must be used with Hive support"))
}
withSQLConf(SQLConf.ORC_IMPLEMENTATION.key -> "native") {
withTable("spark_20728") {
sql("CREATE TABLE spark_20728(a INT) USING ORC")
val fileFormat = sql("SELECT * FROM spark_20728").queryExecution.analyzed.collectFirst {
case l: LogicalRelation => l.relation.asInstanceOf[HadoopFsRelation].fileFormat.getClass
}
assert(fileFormat == Some(classOf[OrcFileFormat]))
}
}
}
test("SPARK-34862: Support ORC vectorized reader for nested column") {
withTempPath { dir =>
val path = dir.getCanonicalPath
val df = spark.range(10).map { x =>
val stringColumn = s"$x" * 10
val structColumn = (x, s"$x" * 100)
val arrayColumn = (0 until 5).map(i => (x + i, s"$x" * 5))
val mapColumn = Map(
s"$x" -> (x * 0.1, (x, s"$x" * 100)),
(s"$x" * 2) -> (x * 0.2, (x, s"$x" * 200)),
(s"$x" * 3) -> (x * 0.3, (x, s"$x" * 300)))
(x, stringColumn, structColumn, arrayColumn, mapColumn)
}.toDF("int_col", "string_col", "struct_col", "array_col", "map_col")
df.write.format("orc").save(path)
withSQLConf(SQLConf.ORC_VECTORIZED_READER_NESTED_COLUMN_ENABLED.key -> "true") {
val readDf = spark.read.orc(path)
val vectorizationEnabled = readDf.queryExecution.executedPlan.find {
case scan @ (_: FileSourceScanExec | _: BatchScanExec) => scan.supportsColumnar
case _ => false
}.isDefined
assert(vectorizationEnabled)
checkAnswer(readDf, df)
}
}
}
test("SPARK-37728: Reading nested columns with ORC vectorized reader should not " +
"cause ArrayIndexOutOfBoundsException") {
withTempPath { dir =>
val path = dir.getCanonicalPath
val df = spark.range(100).map { _ =>
val arrayColumn = (0 until 50).map(_ => (0 until 1000).map(k => k.toString))
arrayColumn
}.toDF("record").repartition(1)
df.write.format("orc").save(path)
withSQLConf(SQLConf.ORC_VECTORIZED_READER_NESTED_COLUMN_ENABLED.key -> "true") {
val readDf = spark.read.orc(path)
val vectorizationEnabled = readDf.queryExecution.executedPlan.find {
case scan @ (_: FileSourceScanExec | _: BatchScanExec) => scan.supportsColumnar
case _ => false
}.isDefined
assert(vectorizationEnabled)
checkAnswer(readDf, df)
}
}
}
test("SPARK-36594: ORC vectorized reader should properly check maximal number of fields") {
withTempPath { dir =>
val path = dir.getCanonicalPath
val df = spark.range(10).map { x =>
val stringColumn = s"$x" * 10
val structColumn = (x, s"$x" * 100)
val arrayColumn = (0 until 5).map(i => (x + i, s"$x" * 5))
val mapColumn = Map(s"$x" -> (x * 0.1, (x, s"$x" * 100)))
(x, stringColumn, structColumn, arrayColumn, mapColumn)
}.toDF("int_col", "string_col", "struct_col", "array_col", "map_col")
df.write.format("orc").save(path)
Seq(("5", false), ("10", true)).foreach {
case (maxNumFields, vectorizedEnabled) =>
withSQLConf(SQLConf.ORC_VECTORIZED_READER_NESTED_COLUMN_ENABLED.key -> "true",
SQLConf.WHOLESTAGE_MAX_NUM_FIELDS.key -> maxNumFields) {
val scanPlan = spark.read.orc(path).queryExecution.executedPlan
assert(scanPlan.find {
case scan @ (_: FileSourceScanExec | _: BatchScanExec) => scan.supportsColumnar
case _ => false
}.isDefined == vectorizedEnabled)
}
}
}
}
test("Read/write all timestamp types") {
val data = (0 to 255).map { i =>
(new Timestamp(i), LocalDateTime.of(2019, 3, 21, 0, 2, 3, 456000000 + i))
} :+ (null, null)
withOrcFile(data) { file =>
withAllNativeOrcReaders {
checkAnswer(spark.read.orc(file), data.toDF().collect())
}
}
}
test("SPARK-36346: can't read TimestampLTZ as TimestampNTZ") {
val data = (1 to 10).map { i =>
val ts = new Timestamp(i)
Row(ts)
}
val answer = (1 to 10).map { i =>
// The second parameter is `nanoOfSecond`, while java.sql.Timestamp accepts milliseconds
// as input. So here we multiple the `nanoOfSecond` by NANOS_PER_MILLIS
val ts = LocalDateTime.ofEpochSecond(0, i * 1000000, ZoneOffset.UTC)
Row(ts)
}
val actualSchema = StructType(Seq(StructField("time", TimestampType, false)))
val providedSchema = StructType(Seq(StructField("time", TimestampNTZType, false)))
withTempPath { file =>
val df = spark.createDataFrame(sparkContext.parallelize(data), actualSchema)
df.write.orc(file.getCanonicalPath)
withAllNativeOrcReaders {
val msg = intercept[SparkException] {
spark.read.schema(providedSchema).orc(file.getCanonicalPath).collect()
}.getMessage
assert(msg.contains("Unable to convert timestamp of Orc to data type 'timestamp_ntz'"))
}
}
}
test("SPARK-36346: read TimestampNTZ as TimestampLTZ") {
val data = (1 to 10).map { i =>
// The second parameter is `nanoOfSecond`, while java.sql.Timestamp accepts milliseconds
// as input. So here we multiple the `nanoOfSecond` by NANOS_PER_MILLIS
val ts = LocalDateTime.ofEpochSecond(0, i * 1000000, ZoneOffset.UTC)
Row(ts)
}
val answer = (1 to 10).map { i =>
val ts = new java.sql.Timestamp(i)
Row(ts)
}
val actualSchema = StructType(Seq(StructField("time", TimestampNTZType, false)))
val providedSchema = StructType(Seq(StructField("time", TimestampType, false)))
withTempPath { file =>
val df = spark.createDataFrame(sparkContext.parallelize(data), actualSchema)
df.write.orc(file.getCanonicalPath)
withAllNativeOrcReaders {
checkAnswer(spark.read.schema(providedSchema).orc(file.getCanonicalPath), answer)
}
}
}
}
class OrcV1QuerySuite extends OrcQuerySuite {
override protected def sparkConf: SparkConf =
super
.sparkConf
.set(SQLConf.USE_V1_SOURCE_LIST, "orc")
}
class OrcV2QuerySuite extends OrcQuerySuite {
override protected def sparkConf: SparkConf =
super
.sparkConf
.set(SQLConf.USE_V1_SOURCE_LIST, "")
}
| shaneknapp/spark | sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/orc/OrcQuerySuite.scala | Scala | apache-2.0 | 32,261 |
package org.apache.spark.ml.dsl
import org.apache.spark.ml.feature._
/**
* Created by peng on 27/04/16.
*/
class AppendSuite extends AbstractDFDSuite {
import DFDComponent._
it("can automatically generate names") {
val flow = (
'input
:>> new Tokenizer()
:=>> new Tokenizer()
:-> new Tokenizer()
:>> new Tokenizer()
)
flow
.show(showID = false, compactionOpt = compactionOpt)
.treeNodeShouldBe(
"""
|\\ left >
|> ForwardNode (TAIL>) [input]
|+- > ForwardNode [input] > Tokenizer > [input$Tokenizer]
| +- > ForwardNode [input$Tokenizer] > Tokenizer > [input$Tokenizer$Tokenizer]
| +- > ForwardNode [input$Tokenizer$Tokenizer] > Tokenizer > [input$Tokenizer$Tokenizer$Tokenizer]
| +- > ForwardNode (HEAD)(<TAIL) [input$Tokenizer$Tokenizer$Tokenizer] > Tokenizer > [input$Tokenizer$Tokenizer$Tokenizer$Tokenizer]
|/ right <
|> ForwardNode (HEAD)(<TAIL) [input$Tokenizer$Tokenizer$Tokenizer] > Tokenizer > [input$Tokenizer$Tokenizer$Tokenizer$Tokenizer]
""".stripMargin
)
}
it("pincer topology can be defined by A :-> B <-: A") {
val input: DFDComponent = 'input
val flow = input :-> new VectorAssembler() <-: input
flow
.show(showID = false, forward = false, compactionOpt = compactionOpt)
.treeNodeShouldBe(
"""
|< BackwardNode (HEAD) [input,input] > VectorAssembler > [input$VectorAssembler]
|:- < BackwardNode (TAIL) [input]
|+- < BackwardNode (TAIL) [input]
""".stripMargin
)
}
it("A :-> B :-> Source is associative") {
val flow1 = 'input :-> new Tokenizer() :-> 'dummy // resolve to rebase then union
val flow2 = 'input :-> (new Tokenizer() :-> 'dummy) // resolve to union then rebase
flow1
.show(showID = false, compactionOpt = compactionOpt)
.treeNodeShouldBe(flow2.show(showID = false, compactionOpt = compactionOpt))
}
it("A <-: B <-: Source is associative") {
val flow1 = 'dummy <-: new Tokenizer() <-: 'input
val flow2 = 'dummy <-: (new Tokenizer() <-: 'input)
flow1
.show(showID = false, compactionOpt = compactionOpt)
.treeNodeShouldBe(flow2.show(showID = false, compactionOpt = compactionOpt))
}
it("A :-> B :-> detached Stage is associative") {
val flow1 = 'input :-> new Tokenizer() :-> new NGram() // resolve to rebase then union
val flow2 = 'input :-> (new Tokenizer() :-> new NGram()) // resolve to union then rebase
flow1
.show(showID = false, compactionOpt = compactionOpt)
.treeNodeShouldBe(flow2.show(showID = false, compactionOpt = compactionOpt))
}
it("A <-: B <-: detached Stage is associative") {
val flow1 = new NGram() <-: new Tokenizer() <-: 'input
val flow2 = new NGram() <-: (new Tokenizer() <-: 'input)
flow1
.show(showID = false, compactionOpt = compactionOpt)
.treeNodeShouldBe(flow2.show(showID = false, compactionOpt = compactionOpt))
}
it(":-> Stage is cast to rebase") {
val flow = (
(
'input
:-> new Tokenizer()
:-> new StopWordsRemover()
).from("Tokenizer")
.and("StopWordsRemover")
:-> new NGram()
)
flow
.show(showID = false, compactionOpt = compactionOpt)
.treeNodeShouldBe(
"""
|\\ left >
|> ForwardNode (TAIL>) [input]
|+- > ForwardNode [input] > Tokenizer > [input$Tokenizer]
| :- > ForwardNode (HEAD) [input$Tokenizer] > NGram > [input$Tokenizer$NGram]
| +- > ForwardNode [input$Tokenizer] > StopWordsRemover > [input$Tokenizer$StopWordsRemover]
| +- > ForwardNode (HEAD)(<TAIL) [input$Tokenizer$StopWordsRemover] > NGram > [input$Tokenizer$StopWordsRemover$NGram]
|/ right <
|> ForwardNode (HEAD)(<TAIL) [input$Tokenizer$StopWordsRemover] > NGram > [input$Tokenizer$StopWordsRemover$NGram]
""".stripMargin
)
}
it("<-: Stage is cast to rebase") {
val flow = (
new SQLTransformer() <-:
new NGram() <-: (
new StopWordsRemover() <-: new Tokenizer() <-: 'input
).from("Tokenizer")
.and("StopWordsRemover")
)
flow
.show(showID = false, compactionOpt = compactionOpt)
.treeNodeShouldBe(
"""
|\\ left >
|> ForwardNode (HEAD)(TAIL>) [input$Tokenizer$StopWordsRemover] > NGram > [input$Tokenizer$StopWordsRemover$NGram]
|+- > ForwardNode [] > SQLTransformer > []
|/ right <
|> ForwardNode (<TAIL) [input]
|+- > ForwardNode [input] > Tokenizer > [input$Tokenizer]
| :- > ForwardNode (HEAD) [input$Tokenizer] > NGram > [input$Tokenizer$NGram]
| : +- > ForwardNode [] > SQLTransformer > []
| +- > ForwardNode [input$Tokenizer] > StopWordsRemover > [input$Tokenizer$StopWordsRemover]
| +- > ForwardNode (HEAD)(TAIL>) [input$Tokenizer$StopWordsRemover] > NGram > [input$Tokenizer$StopWordsRemover$NGram]
| +- > ForwardNode [] > SQLTransformer > []
""".stripMargin
)
}
it(":-> Source is cast to union") {
val flow = 'input :-> new Tokenizer() :-> 'dummy
flow
.show(showID = false, compactionOpt = compactionOpt)
.treeNodeShouldBe(
"""
|\\ left >
|> ForwardNode (TAIL>) [input]
|+- > ForwardNode (HEAD)(<TAIL) [input] > Tokenizer > [input$Tokenizer]
|> ForwardNode (HEAD)(TAIL) [dummy]
|/ right <
|> ForwardNode (HEAD)(<TAIL) [input] > Tokenizer > [input$Tokenizer]
|> ForwardNode (HEAD)(TAIL) [dummy]
""".stripMargin
)
}
it("<-: Source is cast to union") {
val flow = 'dummy <-: new Tokenizer() <-: 'input
flow
.show(showID = false, compactionOpt = compactionOpt)
.treeNodeShouldBe(
"""
|\\ left >
|> ForwardNode (HEAD)(TAIL>) [input] > Tokenizer > [input$Tokenizer]
|> ForwardNode (HEAD)(TAIL) [dummy]
|/ right <
|> ForwardNode (<TAIL) [input]
|+- > ForwardNode (HEAD)(TAIL>) [input] > Tokenizer > [input$Tokenizer]
|> ForwardNode (HEAD)(TAIL) [dummy]
""".stripMargin
)
}
}
class AppendSuite_PruneDownPath extends AppendSuite with UsePruneDownPath
class AppendSuite_PruneDownPathKeepRoot extends AppendSuite with UsePruneDownPathKeepRoot
| tribbloid/spookystuff | mldsl/src/test/scala/org/apache/spark/ml/dsl/AppendSuite.scala | Scala | apache-2.0 | 6,420 |
/**
* Copyright (c) 2017-2018 BusyMachines
*
* See company homepage at: https://www.busymachines.com/
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package busymachines.rest_json_test.routes_to_test
/**
*
* Defining JSON encoders like this greatly increases compilation speed, and you only
* have to derive the top-most types anyway. Nested types of [[SomeTestDTOPost]], etc.
* are still derived automatically.
*
* @author Lorand Szakacs, lsz@lorandszakacs.com, lorand.szakacs@busymachines.com
* @since 19 Oct 2017
*
*/
private[rest_json_test] object SomeTestDTOJsonCodec extends SomeTestDTOJsonCodec
private[rest_json_test] trait SomeTestDTOJsonCodec {
import busymachines.json._
implicit val someTestDTOGetCodec: Codec[SomeTestDTOGet] = derive.codec[SomeTestDTOGet]
implicit val someTestDTOPostCodec: Codec[SomeTestDTOPost] = derive.codec[SomeTestDTOPost]
implicit val someTestDTOPutCodec: Codec[SomeTestDTOPut] = derive.codec[SomeTestDTOPut]
implicit val someTestDTOPatchCodec: Codec[SomeTestDTOPatch] = derive.codec[SomeTestDTOPatch]
}
| busymachines/busymachines-commons | rest-json-testkit/src/test/scala/busymachines/rest_json_test/routes_to_test/SomeTestDTOJsonCodec.scala | Scala | apache-2.0 | 1,618 |
package org.jetbrains.plugins.hocon.highlight
import com.intellij.lang.annotation.{AnnotationHolder, Annotator}
import com.intellij.psi.PsiElement
class HoconSyntaxHighlightingAnnotator extends Annotator {
import org.jetbrains.plugins.hocon.CommonUtil._
import org.jetbrains.plugins.hocon.lexer.HoconTokenType._
import org.jetbrains.plugins.hocon.parser.HoconElementSets._
import org.jetbrains.plugins.hocon.parser.HoconElementType._
def annotate(element: PsiElement, holder: AnnotationHolder): Unit = {
lazy val parentType = element.getParent.getNode.getElementType
lazy val firstChildType = element.getFirstChild.getNode.getElementType
element.getNode.getElementType match {
case Null =>
holder.createInfoAnnotation(element, null).setTextAttributes(HoconHighlighterColors.Null)
case Boolean =>
holder.createInfoAnnotation(element, null).setTextAttributes(HoconHighlighterColors.Boolean)
case Number =>
holder.createInfoAnnotation(element, null).setTextAttributes(HoconHighlighterColors.Number)
case UnquotedChars if parentType == Include =>
holder.createInfoAnnotation(element, null).setTextAttributes(HoconHighlighterColors.Include)
case UnquotedChars if parentType == Included || parentType == QualifiedIncluded =>
holder.createInfoAnnotation(element, null).setTextAttributes(HoconHighlighterColors.IncludeModifier)
case LParen | RParen if parentType == Included || parentType == QualifiedIncluded =>
holder.createInfoAnnotation(element, null).setTextAttributes(HoconHighlighterColors.IncludeModifierParens)
case KeyPart if firstChildType == UnquotedString =>
val textAttributesKey = element.getParent.getParent.getNode.getElementType match {
case Path => HoconHighlighterColors.SubstitutionKey
case KeyedField.extractor() => HoconHighlighterColors.EntryKey
}
holder.createInfoAnnotation(element, null).setTextAttributes(textAttributesKey)
case Period if parentType == Path || parentType == PrefixedField =>
holder.createInfoAnnotation(element, null).setTextAttributes(HoconHighlighterColors.PathSeparator)
case _ =>
}
}
}
| ghik/intellij-hocon | src/org/jetbrains/plugins/hocon/highlight/HoconSyntaxHighlightingAnnotator.scala | Scala | apache-2.0 | 2,222 |
package im.actor
import com.typesafe.sbt.SbtNativePackager._
import com.typesafe.sbt.packager.Keys._
import com.typesafe.sbt.packager.linux.LinuxPlugin.autoImport.packageMapping
import sbt.Keys._
import sbt._
private[actor] trait Packaging {
lazy val packagingSettings = Seq(
scriptClasspath := Seq("*"),
maintainer := "Actor LLC <oss@actor.im>",
packageSummary := "Messaging platform server",
packageDescription := "Open source messaging platform for team communications",
version in Debian := version.value,
debianPackageDependencies in Debian ++= Seq(
"java8-runtime-headless",
"libapr1",
"openssl (>= 1.0.2)"
),
daemonUser in Linux := "actor",
daemonGroup in Linux := (daemonUser in Linux).value,
bashScriptExtraDefines += """addJava "-Dlogback.configurationFile=${app_home}/../conf/logback.xml"""",
bashScriptExtraDefines += """addJava -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=${app_home}/../logs/dump-`date`.hprof""",
linuxPackageMappings += {
val initFiles = sourceDirectory.value / "linux" / "var" / "lib" / "actor"
packageMapping(initFiles -> "/var/lib/actor") withPerms "0644" withUser "actor" withGroup "actor" withContents()
},
linuxPackageMappings += {
packageMapping(baseDirectory.value / "templates" -> "/usr/share/actor/templates") withContents()
}
)
}
| ufosky-server/actor-platform | actor-server/project/Packaging.scala | Scala | agpl-3.0 | 1,383 |
/*
* Copyright 2017 FOLIO Co., Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.folio_sec.reladomo.scala_api
/**
* Represents a Scala facade of Reladomo's MithraTransactionalObject.
* The type must provide a MithraTransactionalObject which is consistent with the Scala object's state.
*/
trait NewTemporalTransactionalObject extends TemporalTransactionalObjectBase {
def insertUntil(exclusiveUntil: Timestamp)(implicit tx: Transaction): Unit = {
underlying.insertUntil(exclusiveUntil)
}
def cascadeInsertUntil(exclusiveUntil: Timestamp)(implicit tx: Transaction): Unit = {
underlying.cascadeInsertUntil(exclusiveUntil)
}
def insertWithIncrement()(implicit tx: Transaction): Unit = {
underlying.insertWithIncrement()
}
def insertWithIncrementUntil(exclusiveUntil: Timestamp)(implicit tx: Transaction): Unit = {
underlying.insertWithIncrementUntil(exclusiveUntil)
}
}
| folio-sec/reladomo-scala | reladomo-scala-common/src/main/scala/com/folio_sec/reladomo/scala_api/NewTemporalTransactionalObject.scala | Scala | apache-2.0 | 1,442 |
/*
* Copyright 2010 Michael Fortin <mike@brzy.org>
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this
* file except in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed
* under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific
* language governing permissions and limitations under the License.
*/
package org.brzy.webapp.action.response
import javax.servlet.{ServletResponse, ServletRequest, RequestDispatcher}
import org.springframework.mock.web.{MockHttpServletResponse, MockRequestDispatcher, MockHttpServletRequest, MockServletContext}
import javax.servlet.http.HttpServletResponse
import org.scalatest.WordSpec
import org.scalatest.matchers.ShouldMatchers
import org.brzy.mock.{MockUserStore, UserController}
import org.brzy.webapp.action.args.Arg
class DefaultReturnSpec extends WordSpec with ShouldMatchers with Fixtures {
"Response Return" should {
"default with no return" in {
val ctlr = new UserController with MockUserStore
val action = ctlr.actions.find(_.path == "").get
assert(action.view != null)
assert("list".equals( action.view.asInstanceOf[View].path))
val result = action.execute(Array.empty[Arg],new PrincipalMock)
assert(result != null)
var callCount = 0
val request = new MockHttpServletRequest(new MockServletContext()) {
override def getRequestDispatcher(path:String):RequestDispatcher = {
new MockRequestDispatcher(path) {
// TODO may be expecting the wrong thing, /user/list.ssp
assert("list.ssp".equals(path),s"expected list.ssp, but was $path")
callCount = callCount + 1
override def forward( fwdReq:ServletRequest, fwdRes:ServletResponse ){}
}
}
def startAsync() = null
def startAsync(p1: ServletRequest, p2: ServletResponse) = null
def isAsyncStarted = false
def isAsyncSupported = false
def getAsyncContext = null
def getDispatcherType = null
def authenticate(p1: HttpServletResponse) = false
def login(p1: String, p2: String) {}
def logout() {}
def getParts = null
def getPart(p1: String) = null
}
val response = new MockHttpServletResponse()
ResponseHandler(action,result,request,response)
assert(callCount == 1)
}
"return default view" in {
val ctlr = new UserController with MockUserStore
val action = ctlr.actions.find(_.path == "get").get
assert(action.view != null)
assert("get".equals( action.view.asInstanceOf[View].path))
val result = action.execute(Array.empty[Arg],new PrincipalMock)
assert(result != null)
var callCount = 0
val request = new MockHttpServletRequest(new MockServletContext()) {
override def getRequestDispatcher(path:String):RequestDispatcher = {
new MockRequestDispatcher(path) {
assert("/index.ssp".equals(path))
callCount = callCount + 1
override def forward( fwdReq:ServletRequest, fwdRes:ServletResponse ){}
}
}
def startAsync() = null
def startAsync(p1: ServletRequest, p2: ServletResponse) = null
def isAsyncStarted = false
def isAsyncSupported = false
def getAsyncContext = null
def getDispatcherType = null
def authenticate(p1: HttpServletResponse) = false
def login(p1: String, p2: String) {}
def logout() {}
def getParts = null
def getPart(p1: String) = null
}
val response = new MockHttpServletResponse()
ResponseHandler(action,result,request,response)
assert(callCount == 1)
}
"return default view again" in {
val ctlr = new UserController with MockUserStore
val action = ctlr.actions.find(_.path == "post").get
assert(action.view != null)
assert("post".equals( action.view.asInstanceOf[View].path))
val result = action.execute(Array.empty[Arg],new PrincipalMock)
assert(result != null)
var callCount = 0
val request = new MockHttpServletRequest(new MockServletContext()) {
override def getRequestDispatcher(path:String):RequestDispatcher = {
new MockRequestDispatcher(path) {
assert("/users/page.ssp".equals(path),s"expected /users/page.ssp, but was $path")
callCount = callCount + 1
override def forward( fwdReq:ServletRequest, fwdRes:ServletResponse ) {}
}
}
def startAsync() = null
def startAsync(p1: ServletRequest, p2: ServletResponse) = null
def isAsyncStarted = false
def isAsyncSupported = false
def getAsyncContext = null
def getDispatcherType = null
def authenticate(p1: HttpServletResponse) = false
def login(p1: String, p2: String) {}
def logout() {}
def getParts = null
def getPart(p1: String) = null
}
val response = new MockHttpServletResponse()
ResponseHandler(action,result,request,response)
assert(callCount == 1)
}
}
} | m410/brzy | src/test/scala/org/brzy/webapp/action/response/DefaultReturnSpec.scala | Scala | apache-2.0 | 5,378 |
val fun2: (Int*) ⇒ Int = args ⇒ { /*start*/args/*end*/.reduce((a, b) ⇒ a+b)}
val res = fun2(1,1,1,1,1)
println(s"Res= $res")
//Seq[Int] | whorbowicz/intellij-scala | testdata/typeInference/bugs5/SCL9857.scala | Scala | apache-2.0 | 142 |
package actors
import java.io.{FileWriter, BufferedWriter, Writer, File}
import actors.SampleFiles.Format
import akka.actor.{Actor, Status}
import ch.weisenburger.nlp.stanford.util.MaxEntClassifierFeatureFactory
import ch.weisenburger.uima.types.distantsupervision.skala._
import org.slf4j.LoggerFactory
class SampleSaverActor extends Actor {
private var log = LoggerFactory.getLogger(getClass)
private var extractionRunId: String = _
private var sampleFiles: SampleFiles = _
def receive = {
case OpenExtractionRun(extractionRunId) =>
openExtractionRunId(extractionRunId)
sender ! Status.Success("opened")
case CloseExtractionRun(extractionRunId) =>
closeExtractionRunId(extractionRunId)
sender ! Status.Success("closed")
case SavePositiveSamplesOfArticle(samples) =>
saveSamples(samples)
case SaveNegativeSamplesOfArticle(negativeSamples) =>
saveNegativeSamples(negativeSamples)
}
private def openExtractionRunId(extractionRunId: String) = {
this.extractionRunId = extractionRunId
this.sampleFiles = new SampleFiles(s"data/samples/$extractionRunId/")
log.info(s"Opened files for runId $extractionRunId")
}
private def closeExtractionRunId(extractionRunId: String) = {
// we don't support multiple extraction runs at the same time
assert(extractionRunId == this.extractionRunId)
sampleFiles.close
log.info(s"Closed files for runId $extractionRunId")
}
private def saveSamples(samples: Seq[Sample]) = for {
sample <- samples
} {
saveHumanReadableRepresentation(sample)
saveStanfordCRFTrainRepresentation(sample)
saveStanfordMaxEntTrainRepresentation(sample)
}
private def saveHumanReadableRepresentation(sample: Sample) = {
val sentenceText = sample.sentenceText
val sEntity = sample.sEntity
val sRelation = sample.sRelation
val sValue = sample.sValue
val sTimex = sample.sTimex
val quad = sample.quad
val qEntity = quad.entity
val qRelation = quad.relation
val qValue = quad.value
val qTimex = quad.timex
val revs = sample.revisionNumber.mkString(", ")
val es = s"(${sEntity.begin}, ${sEntity.end})".padTo(qEntity.length, " ").mkString
val rs = (sRelation match {
case None => "?"
case Some(r) => s"(${r.begin}, ${r.end})"
}).padTo(qRelation.length, " ").mkString
val vs = s"(${sValue.begin}, ${sValue.end})".padTo(qValue.length, " ").mkString
val ts = (sTimex match {
case None => "?"
case Some(t) => s"(${t.begin}, ${t.end})"
}).padTo(qTimex.getOrElse("?").length, " ").mkString
val textRepresentation =
s"""
|${sample.articleName}: $revs
|Sentence:
|${sentenceText}
| Quad: <$qEntity, $qRelation, $qValue, $qTimex>
| $es, $rs, $vs, $ts
""".stripMargin
sampleFiles.positive(qRelation, Format.Human, sTimex.isDefined)
.append(textRepresentation)
}
private def saveStanfordCRFTrainRepresentation(sample: Sample) = {
val hasTimex = sample.sTimex.isDefined
val relationURI = sample.quad.relation
val textRepresentation = toStanfordCRFTRepresentation(sample.tokens)
sampleFiles.positive(relationURI, Format.CRF, hasTimex)
.append(textRepresentation)
}
private def saveStanfordMaxEntTrainRepresentation(sample: Sample) = {
val relationURI = sample.quad.relation
val hasTimex = sample.sTimex.isDefined
val goldAnswer = models.Util.getLastUriComponent(relationURI)
val textRepresentation = toStanfordMaxEntRepresentation(goldAnswer, sample.tokens, sample.sValue)
sampleFiles.positive(relationURI, Format.MaxEnt, hasTimex)
.append(textRepresentation)
}
private def saveNegativeSamples(negativeSamples: Seq[NegativeSample]) = for {
negativeSample <- negativeSamples
} {
saveHumanReadableRepresentation(negativeSample)
saveStanfordCRFTrainRepresentation(negativeSample)
saveStanfordMaxEntTrainRepresentation(negativeSample)
}
private def saveHumanReadableRepresentation(negativeSample: NegativeSample) = {
val revs = negativeSample.revisionNumber.mkString(",")
val sentenceText = negativeSample.sentenceText
val formattedNumbers = negativeSample.formattedNumbers.map { n =>
s"${sentenceText.substring(n.begin, n.end)}; ${n.parsedNumericValue} (${n.begin}/${n.end})"
}.mkString("\\n ")
val textRepresentation =
s"""
|${negativeSample.articleName}: $revs
|Sentence:
|${sentenceText}
|Numbers:
| $formattedNumbers
""".stripMargin
sampleFiles.negative.human.append(textRepresentation)
}
private def saveStanfordCRFTrainRepresentation(negativeSample: NegativeSample) = {
val textRepresentation = toStanfordCRFTRepresentation(negativeSample.tokens)
sampleFiles.negative.crf.append(textRepresentation)
}
private def saveStanfordMaxEntTrainRepresentation(negativeSample: NegativeSample) = {
for (value <- negativeSample.formattedNumbers) {
val textRepresentation = toStanfordMaxEntRepresentation("O", negativeSample.tokens, value)
sampleFiles.negative.maxEnt.append(textRepresentation)
}
}
private def toStanfordCRFTRepresentation(tokens: Seq[Token]) = {
tokens
.map(t => Seq(t.relationValueType.getOrElse("O"), t.text, t.lemma, t.posTag, t.namedEntityType.getOrElse("O")))
.map(seq => seq.mkString("\\t")).mkString("\\n") + "\\n\\n" // add newline after end of sample
}
private def toStanfordMaxEntRepresentation (goldAnswer: String, tokens: Seq[Token], value: Value) = {
val features = MaxEntClassifierFeatureFactory.createFeatures(tokens, value)
(Seq (goldAnswer) ++ features).mkString ("\\t") + "\\n"
}
}
class SampleCandidateSaverActor extends Actor {
import ch.weisenburger.deprecated_ner.FileUtil
var log = LoggerFactory.getLogger(getClass)
var sampleCandidatesFileWriter: Writer = _
def receive = {
case OpenExtractionRun(extractionRunId) =>
openExtractionRunId(extractionRunId)
sender ! Status.Success
case CloseExtractionRun(extractionRunId) =>
closeExtractionRunId(extractionRunId)
sender ! Status.Success
case SavePositiveSampleCandidatesOfArticle(sampleCandidates) =>
log.info(s"received ${sampleCandidates.size}")
saveSampleCandidates(sampleCandidates)
}
private def openExtractionRunId(extractionRunId: String) = {
val sampleCandidatesFile = FileUtil.ensureExists(s"data/samples/$extractionRunId/sample_candidates.txt")
sampleCandidatesFileWriter = new BufferedWriter(new FileWriter(sampleCandidatesFile))
log.info(s"opened files for runId $extractionRunId")
}
private def closeExtractionRunId(extractionRunId: String) = {
sampleCandidatesFileWriter.close
sampleCandidatesFileWriter = null
log.info(s"closed files for runId $extractionRunId")
}
private def saveSampleCandidates(sampleCandidates: Seq[SampleCandidate]) = for {
sc <- sampleCandidates
sentenceText = sc.sentenceText
relations = sc.relations
values = sc.values
timexes = sc.timexes
entities = sc.entities
} {
val strRelations = relations.map(r =>
s"${r.dbpediaOntologyUri} (${r.begin}, ${r.end})").mkString("; ")
val strValues = values.map(v =>
s"${v.parsedNumericValue} (${v.begin}, ${v.end})").mkString("; ")
val strTimexes = timexes.map(t =>
s"${t.value} (${t.begin}, ${t.end})").mkString("; ")
val strEntities = entities.map(e =>
s"${e.dbpediaResourceUri} (${e.begin}, ${e.end})").mkString("; ")
val revs = sc.revisionNumber.mkString(", ")
sampleCandidatesFileWriter.append(
s"""
|${sc.articleName}: $revs
|Sentence:
| ${sentenceText}
| S:$strEntities
| P:$strRelations
| O:$strValues
| T:$strTimexes
""".stripMargin)
}
}
object SampleFiles {
object Format extends Enumeration {
type Format = Value
val Human, CRF, MaxEnt = Value
}
}
class SampleFiles(runDirPath: String) {
import SampleFiles.Format._
import ch.weisenburger.deprecated_ner.FileUtil
val posDirPath = runDirPath + "positive_samples/"
val negDirPath = runDirPath + "negative_samples/"
// ensure we don't have an old file structure present
FileUtil.deleteFolder(new File(posDirPath))
FileUtil.deleteFolder(new File(negDirPath))
private lazy val negSamplesWriters = NegativeSampleFileWriters(
newWriter(negDirPath + "human.txt"),
newWriter(negDirPath + "crf.tsv"),
newWriter(negDirPath + "maxent.tsv")
)
private val posSamplesWriters: collection.mutable.Map[String, PositiveSampleFileWritersOfRelation] = collection.mutable.HashMap.empty
def negative = negSamplesWriters
def positive(relationUri: String, format: Format, hasTimex: Boolean): Writer = {
val writers = positive(relationUri)
(hasTimex, format) match {
case (true, Human) => writers.withTimexHuman
case (false, Human) => writers.withoutTimexHuman
case (true, CRF) => writers.withTimexCRF
case (false, CRF) => writers.withoutTimexCRF
case (true, MaxEnt) => writers.withTimexMaxEnt
case (false, MaxEnt) => writers.withoutTimexMaxEnt
case _ => throw new IllegalArgumentException(
s"Invalid hasTimex, file format combination: $hasTimex; $format ")
}
}
def positive(relationURI: String) =
posSamplesWriters.getOrElse(relationURI, {
val relationFolderName = models.Util.getLastUriComponent(relationURI)
val writers = openForRelation(relationFolderName)
posSamplesWriters(relationURI) = writers
writers
})
private def openForRelation(relationFolderName: String) = {
PositiveSampleFileWritersOfRelation(
newWriter(posDirPath + s"$relationFolderName/withTimex.txt"),
newWriter(posDirPath + s"$relationFolderName/withoutTimex.txt"),
newWriter(posDirPath + s"$relationFolderName/withTimexCRF.tsv"),
newWriter(posDirPath + s"$relationFolderName/withoutTimexCRF.tsv"),
newWriter(posDirPath + s"$relationFolderName/withTimexMaxEnt.tsv"),
newWriter(posDirPath + s"$relationFolderName/withoutTimexMaxEnt.tsv")
)
}
private def newWriter(filePath: String) = {
val file = FileUtil.ensureExists(filePath)
new AlwaysFlushWriter(new BufferedWriter(new FileWriter(file)))
}
def close = {
posSamplesWriters.foreach { case (_, w) => w.close}
posSamplesWriters.clear
negSamplesWriters.close
}
}
class AlwaysFlushWriter(writer: Writer) extends Writer {
override def write(cbuf: Array[Char], off: Int, len: Int): Unit = {
writer.write(cbuf, off, len)
writer.flush
}
override def flush(): Unit = writer.flush
override def close(): Unit = writer.close
}
case class PositiveSampleFileWritersOfRelation(withTimexHuman: Writer, withoutTimexHuman: Writer, withTimexCRF: Writer, withoutTimexCRF: Writer, withTimexMaxEnt: Writer, withoutTimexMaxEnt: Writer) {
def close = {
withTimexHuman.close
withoutTimexHuman.close
withTimexCRF.close
withoutTimexCRF.close
withTimexMaxEnt.close
withoutTimexMaxEnt.close
}
}
case class NegativeSampleFileWriters(human: Writer, crf: Writer, maxEnt: Writer) {
def close = {
human.close
crf.close
maxEnt.close
}
}
case class OpenExtractionRun(extractionRunId: String)
case class CloseExtractionRun(extractionRunId: String)
case class SavePositiveSamplesOfArticle(samples: Seq[Sample])
case class SaveNegativeSamplesOfArticle(negativeSamples: Seq[NegativeSample])
case class SavePositiveSampleCandidatesOfArticle(sampleCandidates: Seq[SampleCandidate])
| normalerweise/mte | app/actors/sampleSaverActors.scala | Scala | gpl-2.0 | 11,675 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.codegen
import org.apache.calcite.rel.`type`.RelDataType
import org.apache.calcite.rel.core.AggregateCall
import org.apache.calcite.rex._
import org.apache.calcite.sql.SqlAggFunction
import org.apache.calcite.tools.RelBuilder
import org.apache.flink.api.common.functions.Function
import org.apache.flink.cep.functions.PatternProcessFunction
import org.apache.flink.cep.pattern.conditions.{IterativeCondition, RichIterativeCondition}
import org.apache.flink.configuration.Configuration
import org.apache.flink.table.api.{TableConfig, TableException}
import org.apache.flink.table.calcite.FlinkTypeFactory
import org.apache.flink.table.codegen.CodeGenUtils._
import org.apache.flink.table.codegen.GenerateUtils.{generateNullLiteral, generateRowtimeAccess}
import org.apache.flink.table.codegen.GeneratedExpression.{NEVER_NULL, NO_CODE}
import org.apache.flink.table.codegen.Indenter.toISC
import org.apache.flink.table.codegen.MatchCodeGenerator._
import org.apache.flink.table.codegen.agg.AggsHandlerCodeGenerator
import org.apache.flink.table.dataformat.{BaseRow, GenericRow}
import org.apache.flink.table.dataview.PerKeyStateDataViewStore
import org.apache.flink.table.functions.sql.FlinkSqlOperatorTable._
import org.apache.flink.table.generated.GeneratedFunction
import org.apache.flink.table.plan.util.AggregateUtil
import org.apache.flink.table.plan.util.MatchUtil.AggregationPatternVariableFinder
import org.apache.flink.table.runtime.`match`.{IterativeConditionRunner, PatternProcessFunctionRunner}
import org.apache.flink.table.types.logical.{RowType, TimestampKind, TimestampType}
import org.apache.flink.table.utils.EncodingUtils
import org.apache.flink.util.Collector
import org.apache.flink.util.MathUtils.checkedDownCast
import java.lang.{Long => JLong}
import java.util
import _root_.scala.collection.JavaConversions._
import _root_.scala.collection.JavaConverters._
import _root_.scala.collection.mutable
/**
* A code generator for generating CEP related functions.
*
* Aggregates are generated as follows:
* 1. all aggregate [[RexCall]]s are grouped by corresponding pattern variable
* 2. even if the same aggregation is used multiple times in an expression
* (e.g. SUM(A.price) > SUM(A.price) + 1) it will be calculated once. To do so [[AggBuilder]]
* keeps set of already seen different aggregation calls, and reuses the code to access
* appropriate field of aggregation result
* 3. after translating every expression (either in [[generateCondition]] or in
* [[generateOneRowPerMatchExpression]]) there will be generated code for
* - [[GeneratedFunction]], which will be an inner class
* - said [[GeneratedFunction]] will be instantiated in the ctor and opened/closed
* in corresponding methods of top level generated classes
* - function that transforms input rows (row by row) into aggregate input rows
* - function that calculates aggregates for variable, that uses the previous method
* The generated code will look similar to this:
*
* @param ctx the cotext of the code generator
* @param nullableInput input(s) can be null.
* @param patternNames sorted sequence of pattern variables
* @param currentPattern if generating condition the name of pattern, which the condition will
* be applied to
*/
class MatchCodeGenerator(
ctx: CodeGeneratorContext,
relBuilder: RelBuilder,
nullableInput: Boolean,
patternNames: Seq[String],
currentPattern: Option[String] = None,
collectorTerm: String = CodeGenUtils.DEFAULT_COLLECTOR_TERM)
extends ExprCodeGenerator(ctx, nullableInput) {
private case class GeneratedPatternList(resultTerm: String, code: String)
/**
* Used to assign unique names for list of events per pattern variable name. Those lists
* are treated as inputs and are needed by input access code.
*/
private val reusablePatternLists: mutable.HashMap[String, GeneratedPatternList] =
mutable.HashMap[String, GeneratedPatternList]()
/**
* Used to deduplicate aggregations calculation. The deduplication is performed by
* [[RexNode#toString]]. Those expressions needs to be accessible from splits, if such exists.
*/
private val reusableAggregationExpr = new mutable.HashMap[String, GeneratedExpression]()
/**
* Context information used by Pattern reference variable to index rows mapped to it.
* Indexes element at offset either from beginning or the end based on the value of first.
*/
private var offset: Int = 0
private var first : Boolean = false
/**
* Flags that tells if we generate expressions inside an aggregate. It tells how to access input
* row.
*/
private var isWithinAggExprState: Boolean = false
/**
* Used to collect all aggregates per pattern variable.
*/
private val aggregatesPerVariable = new mutable.HashMap[String, AggBuilder]
/**
* Name of term in function used to transform input row into aggregate input row.
*/
private val inputAggRowTerm = "inAgg"
/** Term for row for key extraction */
private val keyRowTerm = "keyRow"
/**
* @return term of pattern names
*/
private val patternNamesTerm = newName("patternNames")
private lazy val eventTypeTerm = boxedTypeTermForType(input1Type)
/**
* Sets the new reference variable indexing context. This should be used when resolving logical
* offsets = LAST/FIRST
*
* @param first true if indexing from the beginning, false otherwise
* @param offset offset from either beginning or the end
*/
private def updateOffsets(first: Boolean, offset: Int): Unit = {
this.first = first
this.offset = offset
}
/** Resets indexing context of Pattern variable. */
private def resetOffsets(): Unit = {
first = false
offset = 0
}
private def reusePatternLists(): String = {
reusablePatternLists.values.map(_.code).mkString("\\n")
}
private def addReusablePatternNames(): Unit = {
ctx.addReusableMember(s"private String[] $patternNamesTerm = new String[] { ${
patternNames.map(p => s""""${EncodingUtils.escapeJava(p)}"""").mkString(", ")
} };")
}
/**
* Generates a wrapper [[IterativeConditionRunner]] around code generated [[IterativeCondition]]
* for a single pattern definition defined in DEFINE clause.
*
* @param patternDefinition pattern definition as defined in DEFINE clause
* @return a code generated condition that can be used in constructing a
* [[org.apache.flink.cep.pattern.Pattern]]
*/
def generateIterativeCondition(patternDefinition: RexNode): IterativeCondition[BaseRow] = {
val condition = generateCondition(patternDefinition)
val body =
s"""
|${condition.code}
|return ${condition.resultTerm};
|""".stripMargin
val genCondition = generateMatchFunction(
"MatchRecognizeCondition",
classOf[RichIterativeCondition[BaseRow]],
body)
new IterativeConditionRunner(genCondition)
}
/**
* Generates a wrapper [[PatternProcessFunctionRunner]] around code generated
* [[PatternProcessFunction]] that transform found matches into expected output as defined
* in the MEASURES. It also accounts for fields used in PARTITION BY.
*
* @param returnType the row type of output row
* @param partitionKeys keys used for partitioning incoming data, they will be included in the
* output
* @param measures definitions from MEASURE clause
* @return a process function that can be applied to [[org.apache.flink.cep.PatternStream]]
*/
def generateOneRowPerMatchExpression(
returnType: RowType,
partitionKeys: util.List[RexNode],
measures: util.Map[String, RexNode])
: PatternProcessFunctionRunner = {
val resultExpression = generateOneRowPerMatchExpression(
partitionKeys,
measures,
returnType)
val body =
s"""
|${resultExpression.code}
|$collectorTerm.collect(${resultExpression.resultTerm});
|""".stripMargin
val genFunction = generateMatchFunction(
"MatchRecognizePatternProcessFunction",
classOf[PatternProcessFunction[BaseRow, BaseRow]],
body)
new PatternProcessFunctionRunner(genFunction)
}
/**
* Generates a [[org.apache.flink.api.common.functions.Function]] that can be passed to Java
* compiler.
*
* @param name Class name of the Function. Must not be unique but has to be a valid Java class
* identifier.
* @param clazz Flink Function to be generated.
* @param bodyCode code contents of the SAM (Single Abstract Method). Inputs, collector, or
* output record can be accessed via the given term methods.
* @tparam F Flink Function to be generated.
* @tparam T Return type of the Flink Function.
* @return instance of GeneratedFunction
*/
private def generateMatchFunction[F <: Function, T <: Any](
name: String,
clazz: Class[F],
bodyCode: String)
: GeneratedFunction[F] = {
val funcName = newName(name)
val collectorTypeTerm = classOf[Collector[Any]].getCanonicalName
val (functionClass, signature, inputStatements) =
if (clazz == classOf[RichIterativeCondition[_]]) {
val inputTypeTerm = boxedTypeTermForType(input1Type)
val baseClass = classOf[RichIterativeCondition[_]]
val contextType = classOf[IterativeCondition.Context[_]].getCanonicalName
(baseClass,
s"boolean filter(Object _in1, $contextType $contextTerm)",
List(s"$inputTypeTerm $input1Term = ($inputTypeTerm) _in1;"))
} else if (clazz == classOf[PatternProcessFunction[_, _]]) {
val baseClass = classOf[PatternProcessFunction[_, _]]
val inputTypeTerm =
s"java.util.Map<String, java.util.List<${boxedTypeTermForType(input1Type)}>>"
val contextTypeTerm = classOf[PatternProcessFunction.Context].getCanonicalName
(baseClass,
s"void processMatch($inputTypeTerm $input1Term, $contextTypeTerm $contextTerm, " +
s"$collectorTypeTerm $collectorTerm)",
List())
} else throw new CodeGenException("Unsupported Function.")
val funcCode =
j"""
public class $funcName extends ${functionClass.getCanonicalName} {
${ctx.reuseMemberCode()}
${ctx.reuseLocalVariableCode()}
public $funcName(Object[] references) throws Exception {
${ctx.reuseInitCode()}
}
@Override
public void open(${classOf[Configuration].getCanonicalName} parameters) throws Exception {
${ctx.reuseOpenCode()}
}
@Override
public $signature throws Exception {
${inputStatements.mkString("\\n")}
${reusePatternLists()}
${ctx.reusePerRecordCode()}
${ctx.reuseInputUnboxingCode()}
$bodyCode
}
@Override
public void close() throws Exception {
${ctx.reuseCloseCode()}
}
}
""".stripMargin
new GeneratedFunction[F](funcName, funcCode, ctx.references.toArray)
}
private def generateOneRowPerMatchExpression(
partitionKeys: java.util.List[RexNode],
measures: java.util.Map[String, RexNode],
returnType: RowType): GeneratedExpression = {
// For "ONE ROW PER MATCH", the output columns include:
// 1) the partition columns;
// 2) the columns defined in the measures clause.
val resultExprs =
partitionKeys.asScala.map { case inputRef: RexInputRef =>
generatePartitionKeyAccess(inputRef)
} ++ returnType.getFieldNames.filter(measures.containsKey(_)).map { fieldName =>
generateExpression(measures.get(fieldName))
}
val resultCodeGenerator = new ExprCodeGenerator(ctx, nullableInput)
.bindInput(input1Type, inputTerm = input1Term)
val resultExpression = resultCodeGenerator.generateResultExpression(
resultExprs,
returnType,
classOf[GenericRow])
aggregatesPerVariable.values.foreach(_.generateAggFunction())
resultExpression
}
private def generateCondition(call: RexNode): GeneratedExpression = {
val exp = call.accept(this)
aggregatesPerVariable.values.foreach(_.generateAggFunction())
exp
}
override def visitCall(call: RexCall): GeneratedExpression = {
call.getOperator match {
case PREV | NEXT =>
val countLiteral = call.getOperands.get(1).asInstanceOf[RexLiteral]
val count = checkedDownCast(countLiteral.getValueAs(classOf[JLong]))
if (count != 0) {
throw new TableException("Flink does not support physical offsets within partition.")
} else {
updateOffsets(first = false, 0)
val exp = call.getOperands.get(0).accept(this)
resetOffsets()
exp
}
case FIRST | LAST =>
val countLiteral = call.getOperands.get(1).asInstanceOf[RexLiteral]
val offset = checkedDownCast(countLiteral.getValueAs(classOf[JLong]))
updateOffsets(call.getOperator == FIRST, offset)
val patternExp = call.operands.get(0).accept(this)
resetOffsets()
patternExp
case FINAL => call.getOperands.get(0).accept(this)
case _: SqlAggFunction =>
val variable = call.accept(new AggregationPatternVariableFinder)
.getOrElse(throw new TableException("No pattern variable specified in aggregate"))
val matchAgg = aggregatesPerVariable.get(variable) match {
case Some(agg) => agg
case None =>
val agg = new AggBuilder(variable)
aggregatesPerVariable(variable) = agg
agg
}
matchAgg.generateDeduplicatedAggAccess(call)
case MATCH_PROCTIME =>
// attribute is proctime indicator.
// We use a null literal and generate a timestamp when we need it.
generateNullLiteral(
new TimestampType(true, TimestampKind.PROCTIME, 3),
ctx.nullCheck)
case MATCH_ROWTIME =>
generateRowtimeAccess(ctx, contextTerm)
case PROCTIME_MATERIALIZE =>
// override proctime materialize code generation
// because there is no timerService in PatternProcessFunction#Context
generateProctimeTimestamp()
case _ => super.visitCall(call)
}
}
private def generateProctimeTimestamp(): GeneratedExpression = {
val resultTerm = ctx.addReusableLocalVariable("long", "result")
val resultCode =
s"""
|$resultTerm = $contextTerm.currentProcessingTime();
|""".stripMargin.trim
// the proctime has been materialized, so it's TIMESTAMP now, not PROCTIME_INDICATOR
GeneratedExpression(resultTerm, NEVER_NULL, resultCode, new TimestampType(3))
}
/**
* Extracts partition keys from any element of the match
*
* @param partitionKey partition key to be extracted
* @return generated code for the given key
*/
private def generatePartitionKeyAccess(partitionKey: RexInputRef): GeneratedExpression = {
val keyRow = generateKeyRow()
GenerateUtils.generateFieldAccess(
ctx,
keyRow.resultType,
keyRow.resultTerm,
partitionKey.getIndex
)
}
private def generateKeyRow(): GeneratedExpression = {
val exp = ctx.getReusableInputUnboxingExprs(keyRowTerm, 0) match {
case Some(expr) =>
expr
case None =>
val nullTerm = newName("isNull")
ctx.addReusableMember(s"$eventTypeTerm $keyRowTerm;")
val keyCode =
j"""
|boolean $nullTerm = true;
|for (java.util.Map.Entry entry : $input1Term.entrySet()) {
| java.util.List value = (java.util.List) entry.getValue();
| if (value != null && value.size() > 0) {
| $keyRowTerm = ($eventTypeTerm) value.get(0);
| $nullTerm = false;
| break;
| }
|}
|""".stripMargin
val exp = GeneratedExpression(keyRowTerm, nullTerm, keyCode, input1Type)
ctx.addReusableInputUnboxingExprs(keyRowTerm, 0, exp)
exp
}
exp.copy(code = NO_CODE)
}
override def visitPatternFieldRef(fieldRef: RexPatternFieldRef): GeneratedExpression = {
if (isWithinAggExprState) {
GenerateUtils.generateFieldAccess(ctx, input1Type, inputAggRowTerm, fieldRef.getIndex)
} else {
if (fieldRef.getAlpha.equals(ALL_PATTERN_VARIABLE) &&
currentPattern.isDefined && offset == 0 && !first) {
GenerateUtils.generateInputAccess(
ctx, input1Type, input1Term, fieldRef.getIndex, nullableInput)
} else {
generatePatternFieldRef(fieldRef)
}
}
}
private def generateDefinePatternVariableExp(
patternName: String,
currentPattern: String)
: GeneratedPatternList = {
val Seq(listName, eventNameTerm) = newNames("patternEvents", "event")
ctx.addReusableMember(s"java.util.List $listName;")
val addCurrent = if (currentPattern == patternName || patternName == ALL_PATTERN_VARIABLE) {
j"""
|$listName.add($input1Term);
|""".stripMargin
} else {
""
}
val listCode = if (patternName == ALL_PATTERN_VARIABLE) {
addReusablePatternNames()
val patternTerm = newName("pattern")
j"""
|$listName = new java.util.ArrayList();
|for (String $patternTerm : $patternNamesTerm) {
| for ($eventTypeTerm $eventNameTerm :
| $contextTerm.getEventsForPattern($patternTerm)) {
| $listName.add($eventNameTerm);
| }
|}
|""".stripMargin
} else {
val escapedPatternName = EncodingUtils.escapeJava(patternName)
j"""
|$listName = new java.util.ArrayList();
|for ($eventTypeTerm $eventNameTerm :
| $contextTerm.getEventsForPattern("$escapedPatternName")) {
| $listName.add($eventNameTerm);
|}
|""".stripMargin
}
val code =
j"""
|$listCode
|$addCurrent
|""".stripMargin
GeneratedPatternList(listName, code)
}
private def generateMeasurePatternVariableExp(patternName: String): GeneratedPatternList = {
val Seq(listName, patternTerm) = newNames("patternEvents", "pattern")
ctx.addReusableMember(s"java.util.List $listName;")
val code = if (patternName == ALL_PATTERN_VARIABLE) {
addReusablePatternNames()
j"""
|$listName = new java.util.ArrayList();
|for (String $patternTerm : $patternNamesTerm) {
| java.util.List rows = (java.util.List) $input1Term.get($patternTerm);
| if (rows != null) {
| $listName.addAll(rows);
| }
|}
|""".stripMargin
} else {
val escapedPatternName = EncodingUtils.escapeJava(patternName)
j"""
|$listName = (java.util.List) $input1Term.get("$escapedPatternName");
|if ($listName == null) {
| $listName = java.util.Collections.emptyList();
|}
|""".stripMargin
}
GeneratedPatternList(listName, code)
}
private def findEventByLogicalPosition(patternFieldAlpha: String): GeneratedExpression = {
val Seq(rowNameTerm, isRowNull) = newNames("row", "isRowNull")
val listName = findEventsByPatternName(patternFieldAlpha).resultTerm
val resultIndex = if (first) {
j"""$offset"""
} else {
j"""$listName.size() - $offset - 1"""
}
val funcCode =
j"""
|$eventTypeTerm $rowNameTerm = null;
|boolean $isRowNull = true;
|if ($listName.size() > $offset) {
| $rowNameTerm = (($eventTypeTerm) $listName.get($resultIndex));
| $isRowNull = false;
|}
|""".stripMargin
GeneratedExpression(rowNameTerm, "", funcCode, input1Type)
}
private def findEventsByPatternName(patternFieldAlpha: String): GeneratedPatternList = {
reusablePatternLists.get(patternFieldAlpha) match {
case Some(expr) =>
expr
case None =>
val exp = currentPattern match {
case Some(p) => generateDefinePatternVariableExp(patternFieldAlpha, p)
case None => generateMeasurePatternVariableExp(patternFieldAlpha)
}
reusablePatternLists(patternFieldAlpha) = exp
exp
}
}
private def generatePatternFieldRef(fieldRef: RexPatternFieldRef): GeneratedExpression = {
val escapedAlpha = EncodingUtils.escapeJava(fieldRef.getAlpha)
val patternVariableRef = ctx.getReusableInputUnboxingExprs(
s"$escapedAlpha#$first", offset) match {
case Some(expr) =>
expr
case None =>
val exp = findEventByLogicalPosition(fieldRef.getAlpha)
ctx.addReusableInputUnboxingExprs(s"$escapedAlpha#$first", offset, exp)
exp
}
GenerateUtils.generateNullableInputFieldAccess(
ctx,
patternVariableRef.resultType,
patternVariableRef.resultTerm,
fieldRef.getIndex)
}
class AggBuilder(variable: String) {
private val aggregates = new mutable.ListBuffer[RexCall]()
private val variableUID = newName("variable")
private val calculateAggFuncName = s"calculateAgg_$variableUID"
def generateDeduplicatedAggAccess(aggCall: RexCall): GeneratedExpression = {
reusableAggregationExpr.get(aggCall.toString) match {
case Some(expr) =>
expr
case None =>
val exp: GeneratedExpression = generateAggAccess(aggCall)
aggregates += aggCall
reusableAggregationExpr(aggCall.toString) = exp
ctx.addReusablePerRecordStatement(exp.code)
exp.copy(code = NO_CODE)
}
}
private def generateAggAccess(aggCall: RexCall): GeneratedExpression = {
val singleAggResultTerm = newName("result")
val singleAggNullTerm = newName("nullTerm")
val singleAggResultType = FlinkTypeFactory.toLogicalType(aggCall.`type`)
val primitiveSingleAggResultTypeTerm = primitiveTypeTermForType(singleAggResultType)
val boxedSingleAggResultTypeTerm = boxedTypeTermForType(singleAggResultType)
val allAggRowTerm = s"aggRow_$variableUID"
val rowsForVariableCode = findEventsByPatternName(variable)
val codeForAgg =
j"""
|$GENERIC_ROW $allAggRowTerm = $calculateAggFuncName(${rowsForVariableCode.resultTerm});
|""".stripMargin
ctx.addReusablePerRecordStatement(codeForAgg)
val defaultValue = primitiveDefaultValue(singleAggResultType)
val codeForSingleAgg = if (ctx.nullCheck) {
j"""
|boolean $singleAggNullTerm;
|$primitiveSingleAggResultTypeTerm $singleAggResultTerm;
|if ($allAggRowTerm.getField(${aggregates.size}) != null) {
| $singleAggResultTerm = ($boxedSingleAggResultTypeTerm) $allAggRowTerm
| .getField(${aggregates.size});
| $singleAggNullTerm = false;
|} else {
| $singleAggNullTerm = true;
| $singleAggResultTerm = $defaultValue;
|}
|""".stripMargin
} else {
j"""
|$primitiveSingleAggResultTypeTerm $singleAggResultTerm =
| ($boxedSingleAggResultTypeTerm) $allAggRowTerm.getField(${aggregates.size});
|""".stripMargin
}
ctx.addReusablePerRecordStatement(codeForSingleAgg)
GeneratedExpression(singleAggResultTerm, singleAggNullTerm, NO_CODE, singleAggResultType)
}
def generateAggFunction(): Unit = {
val matchAgg = extractAggregatesAndExpressions
val aggCalls = matchAgg.aggregations.map(a => AggregateCall.create(
a.sqlAggFunction,
false,
false,
a.exprIndices,
-1,
a.resultType,
a.sqlAggFunction.getName))
val needRetraction = matchAgg.aggregations.map(_ => false).toArray
val typeFactory = relBuilder.getTypeFactory.asInstanceOf[FlinkTypeFactory]
val inputRelType = typeFactory.createStructType(
matchAgg.inputExprs.map(_.getType),
matchAgg.inputExprs.indices.map(i => s"TMP$i"))
val aggInfoList = AggregateUtil.transformToStreamAggregateInfoList(
aggCalls,
inputRelType,
needRetraction,
needInputCount = false,
isStateBackendDataViews = false,
needDistinctInfo = false)
val inputFieldTypes = matchAgg.inputExprs
.map(expr => FlinkTypeFactory.toLogicalType(expr.getType))
val aggsHandlerCodeGenerator = new AggsHandlerCodeGenerator(
CodeGeneratorContext(new TableConfig),
relBuilder,
inputFieldTypes,
copyInputField = false).needAccumulate()
val generatedAggsHandler = aggsHandlerCodeGenerator.generateAggsHandler(
s"AggFunction_$variableUID",
aggInfoList)
val generatedTerm = ctx.addReusableObject(generatedAggsHandler, "generatedAggHandler")
val aggsHandlerTerm = s"aggregator_$variableUID"
val declareCode = s"private $AGGS_HANDLER_FUNCTION $aggsHandlerTerm;"
val initCode = s"$aggsHandlerTerm = ($AGGS_HANDLER_FUNCTION) " +
s"$generatedTerm.newInstance($CURRENT_CLASS_LOADER);"
ctx.addReusableMember(declareCode)
ctx.addReusableInitStatement(initCode)
val transformFuncName = s"transformRowForAgg_$variableUID"
val inputTransform: String = generateAggInputExprEvaluation(
matchAgg.inputExprs,
transformFuncName)
generateAggCalculation(aggsHandlerTerm, transformFuncName, inputTransform)
}
private def extractAggregatesAndExpressions: MatchAgg = {
val inputRows = new mutable.LinkedHashMap[String, (RexNode, Int)]
val singleAggregates = aggregates.map { aggCall =>
val callsWithIndices = aggCall.operands.asScala.map(innerCall => {
inputRows.get(innerCall.toString) match {
case Some(x) =>
x
case None =>
val callWithIndex = (innerCall, inputRows.size)
inputRows(innerCall.toString) = callWithIndex
callWithIndex
}
})
SingleAggCall(
aggCall.getOperator.asInstanceOf[SqlAggFunction],
aggCall.`type`,
callsWithIndices.map(callsWithIndice => Integer.valueOf(callsWithIndice._2)))
}
MatchAgg(singleAggregates, inputRows.values.map(_._1).toSeq)
}
private def generateAggCalculation(
aggsHandlerTerm: String,
transformFuncName: String,
inputTransformFunc: String): Unit = {
val code =
j"""
|$inputTransformFunc
|
|private $GENERIC_ROW $calculateAggFuncName(java.util.List input)
| throws Exception {
| $aggsHandlerTerm.setAccumulators($aggsHandlerTerm.createAccumulators());
| for ($BASE_ROW row : input) {
| $aggsHandlerTerm.accumulate($transformFuncName(row));
| }
| $GENERIC_ROW result = ($GENERIC_ROW) $aggsHandlerTerm.getValue();
| return result;
|}
|""".stripMargin
ctx.addReusableMember(code)
ctx.addReusableOpenStatement(
s"$aggsHandlerTerm.open(new $AGGS_HANDLER_CONTEXT(getRuntimeContext()));")
ctx.addReusableCloseStatement(s"$aggsHandlerTerm.close();")
}
private def generateAggInputExprEvaluation(
inputExprs: Seq[RexNode],
funcName: String): String = {
isWithinAggExprState = true
val resultTerm = newName("result")
val exprs = inputExprs.zipWithIndex.map {
case (inputExpr, outputIndex) =>
val expr = generateExpression(inputExpr)
s"""
| ${expr.code}
| if (${expr.nullTerm}) {
| $resultTerm.setField($outputIndex, null);
| } else {
| $resultTerm.setField($outputIndex, ${expr.resultTerm});
| }
""".stripMargin
}.mkString("\\n")
isWithinAggExprState = false
j"""
|private $GENERIC_ROW $funcName($BASE_ROW $inputAggRowTerm) {
| $GENERIC_ROW $resultTerm = new $GENERIC_ROW(${inputExprs.size});
| $exprs
| return $resultTerm;
|}
|""".stripMargin
}
private case class SingleAggCall(
sqlAggFunction: SqlAggFunction,
resultType: RelDataType,
exprIndices: Seq[Integer]
)
private case class MatchAgg(
aggregations: Seq[SingleAggCall],
inputExprs: Seq[RexNode]
)
}
}
object MatchCodeGenerator {
val ALL_PATTERN_VARIABLE = "*"
val AGGS_HANDLER_CONTEXT: String = className[PerKeyStateDataViewStore]
val CURRENT_CLASS_LOADER = "Thread.currentThread().getContextClassLoader()"
}
| shaoxuan-wang/flink | flink-table/flink-table-planner-blink/src/main/scala/org/apache/flink/table/codegen/MatchCodeGenerator.scala | Scala | apache-2.0 | 29,665 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.resource
import org.apache.spark.internal.config.{SPARK_DRIVER_PREFIX, SPARK_EXECUTOR_PREFIX, SPARK_TASK_PREFIX}
import org.apache.spark.internal.config.Worker.SPARK_WORKER_PREFIX
import org.apache.spark.resource.ResourceUtils.{FPGA, GPU}
object TestResourceIDs {
val DRIVER_GPU_ID = new ResourceID(SPARK_DRIVER_PREFIX, GPU)
val EXECUTOR_GPU_ID = new ResourceID(SPARK_EXECUTOR_PREFIX, GPU)
val TASK_GPU_ID = new ResourceID(SPARK_TASK_PREFIX, GPU)
val WORKER_GPU_ID = new ResourceID(SPARK_WORKER_PREFIX, GPU)
val DRIVER_FPGA_ID = new ResourceID(SPARK_DRIVER_PREFIX, FPGA)
val EXECUTOR_FPGA_ID = new ResourceID(SPARK_EXECUTOR_PREFIX, FPGA)
val TASK_FPGA_ID = new ResourceID(SPARK_TASK_PREFIX, FPGA)
val WORKER_FPGA_ID = new ResourceID(SPARK_WORKER_PREFIX, FPGA)
}
| hvanhovell/spark | core/src/test/scala/org/apache/spark/resource/TestResourceIDs.scala | Scala | apache-2.0 | 1,609 |
package org.jetbrains.plugins.scala.lang.typeInference
import com.intellij.openapi.extensions.Extensions
import org.jetbrains.plugins.scala.lang.psi.impl.base.InterpolatedStringMacroTypeProvider
import org.jetbrains.plugins.scala.lang.typeInference.testInjectors.SCL12987Injector
/**
* @author Jan Lindemann
* @since 23.11.17.
*/
class InterpolatedStringMacroTypeProviderTest extends TypeInferenceTestBase {
protected def doTypeProviderTest(text: String, extension: InterpolatedStringMacroTypeProvider): Unit = {
val extensionPoint = Extensions.getRootArea.getExtensionPoint(InterpolatedStringMacroTypeProvider.EP_NAME)
extensionPoint.registerExtension(extension)
try {
doTest(text)
} finally {
extensionPoint.unregisterExtension(extension)
}
}
//test that specified function on StringContext triggers extension
def testPluginApplication(): Unit = {
val text =
s"""
|implicit class StringToType(val sc: StringContext) extends AnyVal {
| def toType(args: Any*): Any = sys.error("Here would be macro code")
|}
|val r = toType"2:Long"
|println(${START}r${END})
|//Long
""".stripMargin
doTypeProviderTest(text, new SCL12987Injector)
}
//test that other functions on StringContexts do not trigger extension
def testPluginIgnoring(): Unit = {
val text =
s"""
|implicit class AnotherMacro(val sc: StringContext) extends AnyVal {
| def foo(args: Any*): Any = sys.error("Here would be macro code")
|}
|val r = foo"2:Long"
|println(${START}r${END})
|//Any
""".stripMargin
doTypeProviderTest(text, new SCL12987Injector)
}
}
| jastice/intellij-scala | scala/scala-impl/test/org/jetbrains/plugins/scala/lang/typeInference/InterpolatedStringMacroTypeProviderTest.scala | Scala | apache-2.0 | 1,726 |
val list = List("USA", "Russia", "Germany")
for(country <- list)
println(country) | MartinThoma/LaTeX-examples | documents/Programmierparadigmen/scripts/scala/extended-for.scala | Scala | mit | 85 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.plan.nodes.physical.batch
import org.apache.flink.runtime.operators.DamBehavior
import org.apache.flink.streaming.api.transformations.TwoInputTransformation
import org.apache.flink.table.api.{BatchTableEnvironment, TableConfigOptions}
import org.apache.flink.table.calcite.FlinkTypeFactory
import org.apache.flink.table.codegen.CodeGeneratorContext
import org.apache.flink.table.codegen.ProjectionCodeGenerator.generateProjection
import org.apache.flink.table.codegen.sort.SortCodeGenerator
import org.apache.flink.table.dataformat.BaseRow
import org.apache.flink.table.plan.`trait`.FlinkRelDistributionTraitDef
import org.apache.flink.table.plan.cost.{FlinkCost, FlinkCostFactory}
import org.apache.flink.table.plan.nodes.ExpressionFormat
import org.apache.flink.table.plan.nodes.exec.ExecNode
import org.apache.flink.table.plan.nodes.resource.NodeResourceConfig
import org.apache.flink.table.plan.util.{FlinkRelMdUtil, FlinkRelOptUtil, JoinUtil, SortUtil}
import org.apache.flink.table.runtime.join.{FlinkJoinType, SortMergeJoinOperator}
import org.apache.flink.table.types.logical.RowType
import org.apache.flink.table.typeutils.BaseRowTypeInfo
import org.apache.calcite.plan._
import org.apache.calcite.rel.core._
import org.apache.calcite.rel.metadata.RelMetadataQuery
import org.apache.calcite.rel.{RelCollationTraitDef, RelNode, RelWriter}
import org.apache.calcite.rex.RexNode
import java.util
import org.apache.flink.api.dag.Transformation
import scala.collection.JavaConversions._
/**
* Batch physical RelNode for sort-merge [[Join]].
*/
class BatchExecSortMergeJoin(
cluster: RelOptCluster,
traitSet: RelTraitSet,
leftRel: RelNode,
rightRel: RelNode,
condition: RexNode,
joinType: JoinRelType,
// true if LHS is sorted by left join keys, else false
val leftSorted: Boolean,
// true if RHS is sorted by right join key, else false
val rightSorted: Boolean)
extends BatchExecJoinBase(cluster, traitSet, leftRel, rightRel, condition, joinType) {
protected lazy val (leftAllKey, rightAllKey) =
JoinUtil.checkAndGetJoinKeys(keyPairs, getLeft, getRight)
protected def isMergeJoinSupportedType(joinRelType: FlinkJoinType): Boolean = {
joinRelType == FlinkJoinType.INNER ||
joinRelType == FlinkJoinType.LEFT ||
joinRelType == FlinkJoinType.RIGHT ||
joinRelType == FlinkJoinType.FULL
}
override def copy(
traitSet: RelTraitSet,
conditionExpr: RexNode,
left: RelNode,
right: RelNode,
joinType: JoinRelType,
semiJoinDone: Boolean): Join = {
new BatchExecSortMergeJoin(
cluster,
traitSet,
left,
right,
conditionExpr,
joinType,
leftSorted,
rightSorted)
}
override def explainTerms(pw: RelWriter): RelWriter =
super.explainTerms(pw)
.itemIf("leftSorted", leftSorted, leftSorted)
.itemIf("rightSorted", rightSorted, rightSorted)
override def computeSelfCost(planner: RelOptPlanner, mq: RelMetadataQuery): RelOptCost = {
val leftRowCnt = mq.getRowCount(getLeft)
val rightRowCnt = mq.getRowCount(getRight)
if (leftRowCnt == null || rightRowCnt == null) {
return null
}
val numOfSort = joinInfo.leftKeys.size()
val leftSortCpuCost: Double = if (leftSorted) {
// cost of writing lhs data to buffer
leftRowCnt
} else {
// sort cost
FlinkCost.COMPARE_CPU_COST * numOfSort * leftRowCnt * Math.max(Math.log(leftRowCnt), 1.0)
}
val rightSortCpuCost: Double = if (rightSorted) {
// cost of writing rhs data to buffer
rightRowCnt
} else {
// sort cost
FlinkCost.COMPARE_CPU_COST * numOfSort * rightRowCnt * Math.max(Math.log(rightRowCnt), 1.0)
}
// cost of evaluating each join condition
val joinConditionCpuCost = FlinkCost.COMPARE_CPU_COST * (leftRowCnt + rightRowCnt)
val cpuCost = leftSortCpuCost + rightSortCpuCost + joinConditionCpuCost
val costFactory = planner.getCostFactory.asInstanceOf[FlinkCostFactory]
// assume memory is big enough, so sort process and mergeJoin process will not spill to disk.
var sortMemCost = 0D
if (!leftSorted) {
sortMemCost += FlinkRelMdUtil.computeSortMemory(mq, getLeft)
}
if (!rightSorted) {
sortMemCost += FlinkRelMdUtil.computeSortMemory(mq, getRight)
}
val rowCount = mq.getRowCount(this)
costFactory.makeCost(rowCount, cpuCost, 0, 0, sortMemCost)
}
override def satisfyTraits(requiredTraitSet: RelTraitSet): Option[RelNode] = {
val requiredDistribution = requiredTraitSet.getTrait(FlinkRelDistributionTraitDef.INSTANCE)
val (canSatisfyDistribution, leftRequiredDistribution, rightRequiredDistribution) =
satisfyHashDistributionOnNonBroadcastJoin(requiredDistribution)
if (!canSatisfyDistribution) {
return None
}
val requiredCollation = requiredTraitSet.getTrait(RelCollationTraitDef.INSTANCE)
val requiredFieldCollations = requiredCollation.getFieldCollations
val shuffleKeysSize = leftRequiredDistribution.getKeys.size
val newLeft = RelOptRule.convert(getLeft, leftRequiredDistribution)
val newRight = RelOptRule.convert(getRight, rightRequiredDistribution)
// SortMergeJoin can provide collation trait, check whether provided collation can satisfy
// required collations
val canProvideCollation = if (requiredCollation.getFieldCollations.isEmpty) {
false
} else if (requiredFieldCollations.size > shuffleKeysSize) {
// Sort by [a, b] can satisfy [a], but cannot satisfy [a, b, c]
false
} else {
val leftKeys = leftRequiredDistribution.getKeys
val leftFieldCnt = getLeft.getRowType.getFieldCount
val rightKeys = rightRequiredDistribution.getKeys.map(_ + leftFieldCnt)
requiredFieldCollations.zipWithIndex.forall { case (collation, index) =>
val idxOfCollation = collation.getFieldIndex
// Full outer join is handled before, so does not need care about it
if (idxOfCollation < leftFieldCnt && joinType != JoinRelType.RIGHT) {
val fieldCollationOnLeftSortKey = FlinkRelOptUtil.ofRelFieldCollation(leftKeys.get(index))
collation == fieldCollationOnLeftSortKey
} else if (idxOfCollation >= leftFieldCnt &&
(joinType == JoinRelType.RIGHT || joinType == JoinRelType.INNER)) {
val fieldCollationOnRightSortKey =
FlinkRelOptUtil.ofRelFieldCollation(rightKeys.get(index))
collation == fieldCollationOnRightSortKey
} else {
false
}
}
}
var newProvidedTraitSet = getTraitSet.replace(requiredDistribution)
if (canProvideCollation) {
newProvidedTraitSet = newProvidedTraitSet.replace(requiredCollation)
}
Some(copy(newProvidedTraitSet, Seq(newLeft, newRight)))
}
//~ ExecNode methods -----------------------------------------------------------
/**
* Now must be full dam without two input operator chain.
* TODO two input operator chain will return different value.
*/
override def getDamBehavior: DamBehavior = DamBehavior.FULL_DAM
override def getInputNodes: util.List[ExecNode[BatchTableEnvironment, _]] =
getInputs.map(_.asInstanceOf[ExecNode[BatchTableEnvironment, _]])
override def replaceInputNode(
ordinalInParent: Int,
newInputNode: ExecNode[BatchTableEnvironment, _]): Unit = {
replaceInput(ordinalInParent, newInputNode.asInstanceOf[RelNode])
}
override def translateToPlanInternal(
tableEnv: BatchTableEnvironment): Transformation[BaseRow] = {
val config = tableEnv.getConfig
val leftInput = getInputNodes.get(0).translateToPlan(tableEnv)
.asInstanceOf[Transformation[BaseRow]]
val rightInput = getInputNodes.get(1).translateToPlan(tableEnv)
.asInstanceOf[Transformation[BaseRow]]
val leftType = leftInput.getOutputType.asInstanceOf[BaseRowTypeInfo].toRowType
val rightType = rightInput.getOutputType.asInstanceOf[BaseRowTypeInfo].toRowType
val keyType = RowType.of(leftAllKey.map(leftType.getChildren.get(_)): _*)
val condFunc = JoinUtil.generateConditionFunction(
config,
cluster.getRexBuilder,
getJoinInfo,
leftType,
rightType)
val externalBufferMemory = config.getConf.getInteger(
TableConfigOptions.SQL_RESOURCE_EXTERNAL_BUFFER_MEM) * NodeResourceConfig.SIZE_IN_MB
val sortMemory = config.getConf.getInteger(
TableConfigOptions.SQL_RESOURCE_SORT_BUFFER_MEM) * NodeResourceConfig.SIZE_IN_MB
def newSortGen(originalKeys: Array[Int], t: RowType): SortCodeGenerator = {
val originalOrders = originalKeys.map(_ => true)
val (keys, orders, nullsIsLast) = SortUtil.deduplicateSortKeys(
originalKeys,
originalOrders,
SortUtil.getNullDefaultOrders(originalOrders))
val types = keys.map(t.getTypeAt)
new SortCodeGenerator(config, keys, types, orders, nullsIsLast)
}
val leftSortGen = newSortGen(leftAllKey, leftType)
val rightSortGen = newSortGen(rightAllKey, rightType)
val operator = new SortMergeJoinOperator(
sortMemory,
sortMemory,
externalBufferMemory,
flinkJoinType,
estimateOutputSize(getLeft) < estimateOutputSize(getRight),
condFunc,
generateProjection(
CodeGeneratorContext(config), "SMJProjection", leftType, keyType, leftAllKey),
generateProjection(
CodeGeneratorContext(config), "SMJProjection", rightType, keyType, rightAllKey),
leftSortGen.generateNormalizedKeyComputer("LeftComputer"),
leftSortGen.generateRecordComparator("LeftComparator"),
rightSortGen.generateNormalizedKeyComputer("RightComputer"),
rightSortGen.generateRecordComparator("RightComparator"),
newSortGen(leftAllKey.indices.toArray, keyType).generateRecordComparator("KeyComparator"),
filterNulls)
new TwoInputTransformation[BaseRow, BaseRow, BaseRow](
leftInput,
rightInput,
getOperatorName,
operator,
BaseRowTypeInfo.of(FlinkTypeFactory.toLogicalRowType(getRowType)),
getResource.getParallelism)
}
private def estimateOutputSize(relNode: RelNode): Double = {
val mq = relNode.getCluster.getMetadataQuery
mq.getAverageRowSize(relNode) * mq.getRowCount(relNode)
}
private def getOperatorName: String = if (getCondition != null) {
val inFields = inputRowType.getFieldNames.toList
s"SortMergeJoin(where: ${
getExpressionString(getCondition, inFields, None, ExpressionFormat.Infix)})"
} else {
"SortMergeJoin"
}
}
| shaoxuan-wang/flink | flink-table/flink-table-planner-blink/src/main/scala/org/apache/flink/table/plan/nodes/physical/batch/BatchExecSortMergeJoin.scala | Scala | apache-2.0 | 11,395 |
import sbt._
import Keys._
import org.scalatra.sbt._
import org.scalatra.sbt.PluginKeys._
import com.mojolly.scalate.ScalatePlugin._
import ScalateKeys._
object MessagesBuild extends Build {
val Organization = "com.dietcoke"
val Name = "Messages"
val Version = "0.1.0-SNAPSHOT"
val ScalaVersion = "2.10.2"
val ScalatraVersion = "2.2.1"
lazy val project = Project (
"messages",
file("."),
settings = Defaults.defaultSettings ++ ScalatraPlugin.scalatraWithJRebel ++ scalateSettings ++ Seq(
organization := Organization,
name := Name,
version := Version,
scalaVersion := ScalaVersion,
resolvers += Classpaths.typesafeReleases,
libraryDependencies ++= Seq(
"org.scalatra" %% "scalatra" % ScalatraVersion,
"org.scalatra" %% "scalatra-scalate" % ScalatraVersion,
"org.scalatra" %% "scalatra-specs2" % ScalatraVersion % "test",
"ch.qos.logback" % "logback-classic" % "1.0.6" % "runtime",
"org.eclipse.jetty" % "jetty-webapp" % "8.1.8.v20121106" % "container",
"org.eclipse.jetty.orbit" % "javax.servlet" % "3.0.0.v201112011016" % "container;provided;test" artifacts (Artifact("javax.servlet", "jar", "jar"))
),
scalateTemplateConfig in Compile <<= (sourceDirectory in Compile){ base =>
Seq(
TemplateConfig(
base / "webapp" / "WEB-INF" / "templates",
Seq.empty, /* default imports should be added here */
Seq(
Binding("context", "_root_.org.scalatra.scalate.ScalatraRenderContext", importMembers = true, isImplicit = true)
), /* add extra bindings here */
Some("templates")
)
)
}
)
)
}
| aaronlifton/ScalatraMessages | project/build.scala | Scala | mit | 1,724 |
package com.googlecode.warikan.presentation.pages.items
/**
* Party List Item.
*
* @author yukei
*/
@serializable
class PartyListItem {
var partyId:String = _
var partyName:String = _
var schedule:String = _
var location:String = _
var sum:String = _
} | digitalsoul0124/warikan | src/main/scala/com/googlecode/warikan/presentation/pages/items/PartyListItem.scala | Scala | mit | 284 |
package me.sgrouples.rogue
import java.util.Locale
private[rogue] object SupportedLocales {
lazy val map: Map[String, Locale] = {
val mb = Map.newBuilder[String, Locale]
Locale.getAvailableLocales.foreach { l =>
val key = l.toString
if (key.nonEmpty) mb += (key -> l)
}
mb += ("nb" -> new Locale(
"nb"
)) // norwegian bokmål, missing from getAvailableLocales
mb.result()
}
}
| sgrouples/rogue-fsqio | bsonformats/src/main/scala/me/sgrouples/rogue/SupportedLocales.scala | Scala | apache-2.0 | 424 |
package de.tu_berlin.formic.gatling.action.json
import de.tu_berlin.formic.datastructure.json.JsonPath
import de.tu_berlin.formic.datastructure.json.client.FormicJsonObject
import de.tu_berlin.formic.gatling.action.{SessionVariables, TimeMeasureCallback}
import io.gatling.commons.util.TimeHelper
import io.gatling.core.action.{Action, ChainableAction}
import io.gatling.core.session.{Expression, Session}
import io.gatling.core.stats.StatsEngine
/**
* @author Ronny Bräunlich
*/
case class JsonDeletion(dataTypeInstanceId: Expression[String], statsEngine: StatsEngine, next: Action, pathElements: Seq[Expression[String]]) extends ChainableAction {
override def name: String = "JsonDelete action"
override def execute(session: Session): Unit = {
val start = TimeHelper.nowMillis
dataTypeInstanceId.apply(session).foreach { id =>
val dataTypeAttribute = session(id)
val validatedPath = pathElements.map(elem => elem.apply(session).get)
val path = JsonPath(validatedPath: _*)
dataTypeAttribute.asOption[FormicJsonObject] match {
case None => throw new IllegalArgumentException("Data type not found. Create it first!")
case Some(dataType) =>
val opId = dataType.remove(path)
session(SessionVariables.TIMEMEASURE_CALLBACK).as[TimeMeasureCallback]
.addListener(TimeMeasureCallback.RemoteOperationTimeMeasureListener(opId, start, session, statsEngine, name))
}
next ! session
}
}
}
| rbraeunlich/formic | formic-gatling/src/main/scala/de/tu_berlin/formic/gatling/action/json/JsonDeletion.scala | Scala | apache-2.0 | 1,487 |
package com.robot.parser
import com.robot.command._
import com.robot.core.{Coordinates, Direction, Point, Robot}
case class StringCommandParser(commandLines:String) {
def commands: Seq[Command[Robot]] = commandLines.split("\\n").map(toCommand)
private val msgPattern = "(PLACE){1} ([0-9]+),([0-9]+),([A-Za-z]+)".r
def toCommand(cmd: String): Command[Robot] = cmd.trim.toUpperCase match {
case "MOVE" => MoveCommand
case "LEFT" => TurnLeftCommand
case "RIGHT" => TurnRightCommand
case "REPORT" => new ReportCommand((x, y, z) => println(s"$x,$y,$z"))
case msgPattern(place, x, y, d) =>
val direction: Option[Direction.Value] = d
direction.map(dir => new PlaceCommand(Coordinates(Point(x.toInt, y.toInt), dir))).
getOrElse(NullCommand)
case _ => NullCommand
}
}
| ratheeshmohan/robotappscala | src/main/scala/com/robot/parser/stringcommandparser.scala | Scala | apache-2.0 | 815 |
package org.project.thunder_streaming.util
import org.apache.spark.SparkContext
import org.jboss.netty.logging.InternalLoggerFactory
import org.jboss.netty.logging.Slf4JLoggerFactory
import org.scalatest.BeforeAndAfterAll
import org.scalatest.BeforeAndAfterEach
import org.scalatest.Suite
/** Manages a local sc variable, correctly stopping it after each test.
* (adapted from Spark testing suite) */
trait LocalSparkContext extends BeforeAndAfterEach with BeforeAndAfterAll { self: Suite =>
@transient var sc: SparkContext = _
override def beforeAll() {
System.getenv("CHECKPOINT_INTERVAL")
InternalLoggerFactory.setDefaultFactory(new Slf4JLoggerFactory())
super.beforeAll()
}
override def afterEach() {
resetSparkContext()
super.afterEach()
}
def resetSparkContext() = {
LocalSparkContext.stop(sc)
sc = null
}
}
object LocalSparkContext {
def stop(sc: SparkContext) {
if (sc != null) {
sc.stop()
}
// To avoid Akka rebinding to the same port, since it doesn't unbind immediately on shutdown
System.clearProperty("spark.driver.port")
}
/** Runs `f` by passing in `sc` and ensures that `sc` is stopped. */
def withSpark[T](sc: SparkContext)(f: SparkContext => T) = {
try {
f(sc)
} finally {
stop(sc)
}
}
} | andrewosh/thunder-streaming | scala/src/test/scala/org/project/thunder_streaming/util/LocalSparkContext.scala | Scala | apache-2.0 | 1,316 |
package scalax.collection
import org.scalatest.{Spec, Matchers}
import GraphPredef._, GraphEdge._, edge._, edge.LBase._, edge.Implicits._
import org.scalatest.junit.JUnitRunner
import org.junit.runner.RunWith
import custom.flight._,
custom.flight.Helper._,
custom.flight.FlightImplicits._
@RunWith(classOf[JUnitRunner])
class TEdgeTest extends Spec with Matchers
{
def `DiHyperEdge targets are sequencies` { // fixes #47
val e = DiHyperEdge(1,2,2)
val g = Graph(e)
g.contains(e) should be (true)
}
object FlightLabel extends LEdgeImplicits[Flight]
import FlightLabel._
val (ham, gig) = (Airport("HAM"), Airport("GIG"))
val flightNo = "LH007"
object `Custom edge tests` {
def `LkDiEdge ` {
val outer = LkDiEdge(ham, gig)(Flight(flightNo))
val g = Graph(outer)
val e = g.edges.head
e.edge.nodes.productElement(0).asInstanceOf[AnyRef].getClass should be (
g.nodes.head.getClass)
e.from should be (ham)
e.to should be (gig)
e.flightNo should be (flightNo)
e should be (outer)
e.## should be (outer.##)
val eqFlight = LkDiEdge(ham, gig)(Flight(flightNo, 11 o 2))
e should be (eqFlight)
e.## should be (eqFlight.##)
val neFlight = LkDiEdge(ham, gig)(Flight(flightNo + "x", 11 o 2))
e should not be (neFlight)
e.## should not be (neFlight.##)
}
def `LkDiEdgeShortcut ` {
val outer = LkDiEdge(ham, gig)(Flight(flightNo))
(ham ~+#> gig)(Flight(flightNo)) should be (outer)
(ham ~+#> gig)(Flight(flightNo, 11 o 20)) should be (outer)
}
def `matching weighted edges` {
val (n1, n2, w) = (1, 2, 5)
def check(_n1: Int, _n2: Int, _w: Long) {
_n1 should be (n1)
_n2 should be (n2)
_w should be (w)
}
val wDi = (n1 ~%> n2)(w)
wDi match { case WDiEdge(s, t, w) => check(s, t, w) }
wDi match { case s :~> %(t, w) => check(s, t, w) }
wDi match { case s :~> t % w => check(s, t, w) }
Graph(wDi).get(wDi).edge match {
case s :~> t % w => check(s.value, t.value, w) }
val wkDi = (n1 ~%#> n2)(w)
wkDi match { case s :~> t % w => check(s, t, w) }
}
def `matching labeled edges` {
object StringLabel extends LEdgeImplicits[String]
import StringLabel._
val (n1, n2, label) = (1, 2, "A")
def check(_n1: Int, _n2: Int, _label: String) {
_n1 should be (n1)
_n2 should be (n2)
_label should be (label)
}
val lDi = (n1 ~+> n2)(label)
lDi match { case LDiEdge(s, t, l) => check(s, t, l) }
lDi match { case s :~> +(t, l) => check(s, t, l) }
lDi match { case s :~> t + l => check(s, t, l) }
Graph(lDi).get(lDi).edge match {
case s :~> t + l => check(s.value, t.value, l) }
val lkDi = (n1 ~+#> n2)(label)
lkDi match { case s :~> t + l => check(s, t, l) }
}
def `matching weighted labeled edges` {
object StringLabel extends LEdgeImplicits[String]
import StringLabel._
val (n1, n2, label, weight) = (1, 2, "A", 4L)
def check(_n1: Int, _n2: Int, _weight: Long, _label: String) {
_n1 should be (n1)
_n2 should be (n2)
_weight should be (weight)
_label should be (label)
}
val wlDi = (n1 ~%+> n2)(weight, label)
wlDi match { case WLDiEdge(s, t, w, l) => check(s, t, w, l) }
wlDi match { case s :~> %+(t, w, l) => check(s, t, w, l) }
wlDi match { case s :~> t %+ (w, l) => check(s, t, w, l) }
Graph(wlDi).get(wlDi).edge match {
case s :~> t %+ (w, l) => check(s.value, t.value, w, l) }
val wlkDi = (n1 ~%+#> n2)(weight, label)
wlkDi match { case s :~> t %+ (w, l) => check(s, t, w, l) }
}
def `findOutgoingTo LkDiEdge` {
import edge.LkDiEdge
val le = LkDiEdge(1,1)(1)
val lg = Graph(le)
val ln1 = lg get 1
(ln1 findOutgoingTo ln1) should be (Some(le))
}
def `LkHyperEdge equality` {
val e1 = LkDiHyperEdge(1,1)("a")
val e2 = LkHyperEdge(1,1)("b")
val g = Graph[Int, LHyperEdge](e1, e2)
g find e1 should be ('defined)
g find e2 should be ('defined)
}
def `LkDiHyperEdge equality` {
val e = LkDiHyperEdge(1,2,3)("a")
val g = Graph[Int, LHyperEdge](e)
val eo = g.edges.head.toOuter
g find eo should be ('defined)
}
}
}
/* Label type for use in key-labeled edges.
* Note that using path-dependent label types with Scala 2.9.1-final I had a runtime issue
* which could be resolved by moving the label class to the top-level.
*/
case class Flight(val flightNo: String,
val departure: DayTime = DayTime(0,0),
val duration: Duration = Duration(0,0))
{
/* flightNo should be treated as the label key meaning that the set of edges
* incident to two given nodes may contain at most one edge with a given flightNo.
*
* To achieve the above requirement we must override `equals` and `hashCode`
* narrowing equality to the flightNo attribute because the hash-code of key-labeled edges
* is composed by the hash-code of the incident nodes and the label hash-code.
*/
override def equals(other: Any) = other match {
case that: Flight => that.flightNo == this.flightNo
case _ => false
}
override def hashCode = flightNo.##
}
// Compiler tests for predefined edges.
object Test {
import scalax.collection.GraphPredef._
val h = 2~4~6
val d = 1~>2
val u = 1~(-1)
val (lh1, lh2) = (LHyperEdge(1,3,5)(6), LHyperEdge(1,3,5)(7))
val g_lh_h = Graph(lh1,h)
val g_lh_d = Graph[Int,HyperEdge](lh1,d) // not inferred
val g_lh_lh = Graph(lh1,lh2)
val (lkh1, lkh2) = (LkHyperEdge(1,3,5)(8), LkHyperEdge(1,3,5)(9))
val g_lkh_h = Graph(lkh1,h)
val g_lkh_lkh = Graph(lkh1,lkh2)
val g_lkh_lh = Graph(lkh1,lh1)
val (ldh1, ldh2) = (LDiHyperEdge(1,3,5)(10), LDiHyperEdge(1,3,5)(11))
val g_ldh_h = Graph(ldh1,h)
val g_ldh_ldh = Graph(ldh1,ldh2)
val g_ldh_lh = Graph(ldh1,lh2)
val g_ldh_lkh = Graph[Int,LHyperEdge](ldh1,lkh2) // not inferred
val (lkdh1, lkdh2) = (LkDiHyperEdge(1,3,5)(12), LkDiHyperEdge(1,3,5)(13))
val g_lkdh_h = Graph(lkdh1,h)
val g_lkdh_lkdh = Graph(lkdh1,lkdh2)
val g_lkdh_ldh = Graph(lkdh1,ldh2)
val g_lkdh_lh = Graph(lkdh1,lh2)
val g_lkdh_lkh = Graph[Int,LHyperEdge](lkdh1,lkh2) // not inferred
val (lu1, lu2) = (LUnDiEdge(1,3)(4), LUnDiEdge(1,3)(5))
val g_lu_u = Graph(lu1,u)
val g_lu_h = Graph(lu1,h)
val g_lu_d = Graph[Int,UnDiEdge](lu1,d) // not inferred
val g_lu_lu = Graph(lu1,lu2)
val g_lu_lh = Graph[Int,HyperEdge](lu1,lh2) // not inferred
}
// Compiler tests for calling label methods by means of implicits.
object TestImplicits {
import scalax.collection.Graph
case class MyLabel(val i: Int)
val eOuter = LUnDiEdge(1,3)(MyLabel(4))
object OuterEdge {
object UserL extends LEdgeImplicits[MyLabel]
import UserL._
val four = eOuter.i
}
object InnerEdge {
object UserL extends LEdgeImplicits[MyLabel]
import UserL._
val g = Graph(eOuter)
val eInner = g.edges.head
// val four_0 = e.label match {case m: MyLabel => m.i}
val four = eInner.i
}
}
// Compiler tests for predefined edge shortcuts.
object TestOperators {
val ld = (1 ~+> 2)(3)
val lkd = (3 ~+#> 4)(7)
} | Calavoow/scala-graph | core/src/test/scala/scalax/collection/TEdge.scala | Scala | bsd-3-clause | 7,752 |
package com.github.mauricio.async.db.mysql.codec
import com.github.mauricio.async.db.mysql.message.client.{ClientMessage, SendLongDataMessage}
import com.github.mauricio.async.db.util.{ByteBufferUtils, Log}
import io.netty.buffer.Unpooled
import io.netty.channel.ChannelHandlerContext
import io.netty.handler.codec.MessageToMessageEncoder
object SendLongDataEncoder {
val log = Log.get[SendLongDataEncoder]
val LONG_THRESHOLD = 1023
}
class SendLongDataEncoder
extends MessageToMessageEncoder[SendLongDataMessage](
classOf[SendLongDataMessage]
) {
import com.github.mauricio.async.db.mysql.codec.SendLongDataEncoder.log
def encode(
ctx: ChannelHandlerContext,
message: SendLongDataMessage,
out: java.util.List[Object]
): Unit = {
if (log.isTraceEnabled) {
log.trace(s"Writing message ${message.toString}")
}
val sequence = 0
val headerBuffer = ByteBufferUtils.mysqlBuffer(3 + 1 + 1 + 4 + 2)
ByteBufferUtils.write3BytesInt(
headerBuffer,
1 + 4 + 2 + message.value.readableBytes()
)
headerBuffer.writeByte(sequence)
headerBuffer.writeByte(ClientMessage.PreparedStatementSendLongData)
headerBuffer.writeBytes(message.statementId)
headerBuffer.writeShort(message.paramId)
val result = Unpooled.wrappedBuffer(headerBuffer, message.value)
out.add(result)
}
}
| dripower/postgresql-async | mysql-async/src/main/scala/com/github/mauricio/async/db/mysql/codec/SendLongDataEncoder.scala | Scala | apache-2.0 | 1,369 |
class Parameters2(val hello: String, var name: String) {
def greet = println(hello + " " + name)
}
| grzegorzbalcerek/scala-book-examples | examples/Parameters2.scala | Scala | mit | 101 |
package views.html.study
import play.api.i18n.Lang
import lila.app.templating.Environment._
import lila.i18n.{ I18nKeys => trans }
object jsI18n {
def apply()(implicit lang: Lang) =
views.html.board.userAnalysisI18n(withAdvantageChart = true) ++
i18nJsObject(i18nKeys)
val i18nKeys: List[lila.i18n.MessageKey] = {
import trans.study._
List(
trans.name,
trans.white,
trans.black,
trans.variant,
trans.clearBoard,
trans.startPosition,
trans.cancel,
trans.chat,
addNewChapter,
addMembers,
inviteToTheStudy,
pleaseOnlyInvitePeopleYouKnow,
searchByUsername,
spectator,
contributor,
kick,
leaveTheStudy,
youAreNowAContributor,
youAreNowASpectator,
pgnTags,
like,
newTag,
commentThisPosition,
commentThisMove,
annotateWithGlyphs,
theChapterIsTooShortToBeAnalysed,
onlyContributorsCanRequestAnalysis,
getAFullComputerAnalysis,
makeSureTheChapterIsComplete,
allSyncMembersRemainOnTheSamePosition,
shareChanges,
playing,
first,
previous,
next,
last,
shareAndExport,
cloneStudy,
studyPgn,
downloadAllGames,
chapterPgn,
downloadGame,
studyUrl,
currentChapterUrl,
youCanPasteThisInTheForumToEmbed,
startAtInitialPosition,
startAtX,
embedInYourWebsite,
readMoreAboutEmbedding,
onlyPublicStudiesCanBeEmbedded,
open,
xBroughtToYouByY,
studyNotFound,
editChapter,
newChapter,
orientation,
analysisMode,
pinnedChapterComment,
saveChapter,
clearAnnotations,
deleteChapter,
deleteThisChapter,
clearAllCommentsInThisChapter,
rightUnderTheBoard,
noPinnedComment,
normalAnalysis,
hideNextMoves,
interactiveLesson,
chapterX,
empty,
startFromInitialPosition,
editor,
startFromCustomPosition,
loadAGameByUrl,
loadAPositionFromFen,
loadAGameFromPgn,
automatic,
urlOfTheGame,
loadAGameFromXOrY,
createChapter,
configureLiveBroadcast,
createStudy,
editStudy,
visibility,
public,
`private`,
unlisted,
inviteOnly,
allowCloning,
nobody,
onlyMe,
contributors,
members,
everyone,
enableSync,
yesKeepEveryoneOnTheSamePosition,
noLetPeopleBrowseFreely,
pinnedStudyComment,
start,
save,
clearChat,
deleteTheStudyChatHistory,
deleteStudy,
confirmDeleteStudy,
whereDoYouWantToStudyThat,
nbChapters,
nbGames,
nbMembers,
pasteYourPgnTextHereUpToNbGames
).map(_.key)
}
}
| luanlv/lila | app/views/study/jsI18n.scala | Scala | mit | 2,816 |
package com.goticks
import akka.actor._
import akka.util.Timeout
import scala.concurrent.Future
import scala.concurrent.duration._
// ********** ********** ********** ********** ********** ********** ********** \\\\
class BoxOffice extends Actor with CreateTicketSellers with ActorLogging {
import com.goticks.TicketProtocol._
import context._
implicit val timeout = Timeout(5 seconds)
// +--------------------+ +--------------------+ +--------------------+ \\\\
def receive = {
case Event(name, nrOfTickets) =>
log.info(s"Creating new event ${name} with ${nrOfTickets} tickets.")
if(context.child(name).isEmpty) {
val ticketSeller = createTicketSeller(name)
val tickets = Tickets((1 to nrOfTickets).map(nr=> Ticket(name, nr)).toList)
ticketSeller ! tickets
}
sender ! EventCreated
case TicketRequest(name) =>
log.info(s"Getting a ticket for the ${name} event.")
context.child(name) match {
case Some(ticketSeller) => ticketSeller.forward(BuyTicket)
case None => sender ! SoldOut
}
case GetEvents =>
import akka.pattern.ask
val capturedSender = sender
def askAndMapToEvent(ticketSeller:ActorRef) = {
val futureInt = ticketSeller.ask(GetEvents).mapTo[Int]
futureInt.map(nrOfTickets => Event(ticketSeller.actorRef.path.name, nrOfTickets))
}
val futures = context.children.map(ticketSeller => askAndMapToEvent(ticketSeller))
Future.sequence(futures).map { events => capturedSender ! Events(events.toList)
}
}
// +--------------------+ +--------------------+ +--------------------+ \\\\
}
// ********** ********** ********** ********** ********** ********** ********** \\\\
trait CreateTicketSellers { self:Actor =>
def createTicketSeller(name:String) = context.actorOf(Props[TicketSeller], name)
}
// ********** ********** ********** ********** ********** ********** ********** \\\\
| mduttaroy-dev/akka-trials-01.idea | src/main/scala/com/goticks/BoxOffice.scala | Scala | gpl-2.0 | 1,958 |
/*
* Copyright 2018 Analytics Zoo Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.zoo.pipeline.api.keras.layers
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.Shape
import com.intel.analytics.zoo.pipeline.api.keras.layers.utils.KerasUtils
import scala.reflect.ClassTag
/**
* Do not create a new instance of it or use it in a model.
* Please use its child classes, 'AveragePooling2D' and 'MaxPooling2D' instead.
*/
abstract class Pooling2D[T: ClassTag](
val poolSize: Array[Int] = Array(2, 2),
val strides: Array[Int] = null,
val borderMode: String = "valid",
val inputShape: Shape = null)(implicit ev: TensorNumeric[T])
extends LayerWrapperByForward[T](KerasUtils.addBatch(inputShape)) {
require(poolSize.length == 2,
s"For Pooling2D, poolSize should be of length 2 but got length ${poolSize.length}")
if (borderMode!=null) {
require(borderMode == "valid" || borderMode == "same", s"Invalid border mode for " +
s"Pooling2D: $borderMode")
}
val strideValues: Array[Int] = if (strides == null) poolSize else strides
require(strideValues.length == 2,
s"For Pooling2D, strides should be of length 2 but got length ${strideValues.length}")
}
| intel-analytics/analytics-zoo | zoo/src/main/scala/com/intel/analytics/zoo/pipeline/api/keras/layers/Pooling2D.scala | Scala | apache-2.0 | 1,805 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.csv
import java.io.File
import java.nio.charset.{Charset, UnsupportedCharsetException}
import java.nio.file.Files
import java.sql.{Date, Timestamp}
import java.text.SimpleDateFormat
import java.util.Locale
import scala.collection.JavaConverters._
import scala.util.Properties
import org.apache.commons.lang3.time.FastDateFormat
import org.apache.hadoop.io.SequenceFile.CompressionType
import org.apache.hadoop.io.compress.GzipCodec
import org.apache.log4j.{AppenderSkeleton, LogManager}
import org.apache.log4j.spi.LoggingEvent
import org.apache.spark.SparkException
import org.apache.spark.sql.{AnalysisException, DataFrame, QueryTest, Row}
import org.apache.spark.sql.catalyst.util.DateTimeUtils
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.test.{SharedSQLContext, SQLTestUtils}
import org.apache.spark.sql.types._
class CSVSuite extends QueryTest with SharedSQLContext with SQLTestUtils with TestCsvData {
import testImplicits._
private val carsFile = "test-data/cars.csv"
private val carsMalformedFile = "test-data/cars-malformed.csv"
private val carsFile8859 = "test-data/cars_iso-8859-1.csv"
private val carsTsvFile = "test-data/cars.tsv"
private val carsAltFile = "test-data/cars-alternative.csv"
private val carsUnbalancedQuotesFile = "test-data/cars-unbalanced-quotes.csv"
private val carsNullFile = "test-data/cars-null.csv"
private val carsEmptyValueFile = "test-data/cars-empty-value.csv"
private val carsBlankColName = "test-data/cars-blank-column-name.csv"
private val emptyFile = "test-data/empty.csv"
private val commentsFile = "test-data/comments.csv"
private val disableCommentsFile = "test-data/disable_comments.csv"
private val boolFile = "test-data/bool.csv"
private val decimalFile = "test-data/decimal.csv"
private val simpleSparseFile = "test-data/simple_sparse.csv"
private val numbersFile = "test-data/numbers.csv"
private val datesFile = "test-data/dates.csv"
private val unescapedQuotesFile = "test-data/unescaped-quotes.csv"
private val valueMalformedFile = "test-data/value-malformed.csv"
/** Verifies data and schema. */
private def verifyCars(
df: DataFrame,
withHeader: Boolean,
numCars: Int = 3,
numFields: Int = 5,
checkHeader: Boolean = true,
checkValues: Boolean = true,
checkTypes: Boolean = false): Unit = {
val numColumns = numFields
val numRows = if (withHeader) numCars else numCars + 1
// schema
assert(df.schema.fieldNames.length === numColumns)
assert(df.count === numRows)
if (checkHeader) {
if (withHeader) {
assert(df.schema.fieldNames === Array("year", "make", "model", "comment", "blank"))
} else {
assert(df.schema.fieldNames === Array("_c0", "_c1", "_c2", "_c3", "_c4"))
}
}
if (checkValues) {
val yearValues = List("2012", "1997", "2015")
val actualYears = if (!withHeader) "year" :: yearValues else yearValues
val years = if (withHeader) df.select("year").collect() else df.select("_c0").collect()
years.zipWithIndex.foreach { case (year, index) =>
if (checkTypes) {
assert(year === Row(actualYears(index).toInt))
} else {
assert(year === Row(actualYears(index)))
}
}
}
}
test("simple csv test") {
val cars = spark
.read
.format("csv")
.option("header", "false")
.load(testFile(carsFile))
verifyCars(cars, withHeader = false, checkTypes = false)
}
test("simple csv test with calling another function to load") {
val cars = spark
.read
.option("header", "false")
.csv(testFile(carsFile))
verifyCars(cars, withHeader = false, checkTypes = false)
}
test("simple csv test with type inference") {
val cars = spark
.read
.format("csv")
.option("header", "true")
.option("inferSchema", "true")
.load(testFile(carsFile))
verifyCars(cars, withHeader = true, checkTypes = true)
}
test("simple csv test with string dataset") {
val csvDataset = spark.read.text(testFile(carsFile)).as[String]
val cars = spark.read
.option("header", "true")
.option("inferSchema", "true")
.csv(csvDataset)
verifyCars(cars, withHeader = true, checkTypes = true)
val carsWithoutHeader = spark.read
.option("header", "false")
.csv(csvDataset)
verifyCars(carsWithoutHeader, withHeader = false, checkTypes = false)
}
test("test inferring booleans") {
val result = spark.read
.format("csv")
.option("header", "true")
.option("inferSchema", "true")
.load(testFile(boolFile))
val expectedSchema = StructType(List(
StructField("bool", BooleanType, nullable = true)))
assert(result.schema === expectedSchema)
}
test("test inferring decimals") {
val result = spark.read
.format("csv")
.option("comment", "~")
.option("header", "true")
.option("inferSchema", "true")
.load(testFile(decimalFile))
val expectedSchema = StructType(List(
StructField("decimal", DecimalType(20, 0), nullable = true),
StructField("long", LongType, nullable = true),
StructField("double", DoubleType, nullable = true)))
assert(result.schema === expectedSchema)
}
test("test with alternative delimiter and quote") {
val cars = spark.read
.format("csv")
.options(Map("quote" -> "\'", "delimiter" -> "|", "header" -> "true"))
.load(testFile(carsAltFile))
verifyCars(cars, withHeader = true)
}
test("parse unescaped quotes with maxCharsPerColumn") {
val rows = spark.read
.format("csv")
.option("maxCharsPerColumn", "4")
.load(testFile(unescapedQuotesFile))
val expectedRows = Seq(Row("\"a\"b", "ccc", "ddd"), Row("ab", "cc\"c", "ddd\""))
checkAnswer(rows, expectedRows)
}
test("bad encoding name") {
val exception = intercept[UnsupportedCharsetException] {
spark
.read
.format("csv")
.option("charset", "1-9588-osi")
.load(testFile(carsFile8859))
}
assert(exception.getMessage.contains("1-9588-osi"))
}
test("test different encoding") {
withView("carsTable") {
// scalastyle:off
spark.sql(
s"""
|CREATE TEMPORARY VIEW carsTable USING csv
|OPTIONS (path "${testFile(carsFile8859)}", header "true",
|charset "iso-8859-1", delimiter "þ")
""".stripMargin.replaceAll("\n", " "))
// scalastyle:on
verifyCars(spark.table("carsTable"), withHeader = true)
}
}
test("test aliases sep and encoding for delimiter and charset") {
// scalastyle:off
val cars = spark
.read
.format("csv")
.option("header", "true")
.option("encoding", "iso-8859-1")
.option("sep", "þ")
.load(testFile(carsFile8859))
// scalastyle:on
verifyCars(cars, withHeader = true)
}
test("DDL test with tab separated file") {
withView("carsTable") {
spark.sql(
s"""
|CREATE TEMPORARY VIEW carsTable USING csv
|OPTIONS (path "${testFile(carsTsvFile)}", header "true", delimiter "\t")
""".stripMargin.replaceAll("\n", " "))
verifyCars(spark.table("carsTable"), numFields = 6, withHeader = true, checkHeader = false)
}
}
test("DDL test parsing decimal type") {
withView("carsTable") {
spark.sql(
s"""
|CREATE TEMPORARY VIEW carsTable
|(yearMade double, makeName string, modelName string, priceTag decimal,
| comments string, grp string)
|USING csv
|OPTIONS (path "${testFile(carsTsvFile)}", header "true", delimiter "\t")
""".stripMargin.replaceAll("\n", " "))
assert(
spark.sql("SELECT makeName FROM carsTable where priceTag > 60000").collect().size === 1)
}
}
test("test for DROPMALFORMED parsing mode") {
withSQLConf(SQLConf.CSV_PARSER_COLUMN_PRUNING.key -> "false") {
Seq(false, true).foreach { multiLine =>
val cars = spark.read
.format("csv")
.option("multiLine", multiLine)
.options(Map("header" -> "true", "mode" -> "dropmalformed"))
.load(testFile(carsFile))
assert(cars.select("year").collect().size === 2)
}
}
}
test("test for blank column names on read and select columns") {
val cars = spark.read
.format("csv")
.options(Map("header" -> "true", "inferSchema" -> "true"))
.load(testFile(carsBlankColName))
assert(cars.select("customer").collect().size == 2)
assert(cars.select("_c0").collect().size == 2)
assert(cars.select("_c1").collect().size == 2)
}
test("test for FAILFAST parsing mode") {
Seq(false, true).foreach { multiLine =>
val exception = intercept[SparkException] {
spark.read
.format("csv")
.option("multiLine", multiLine)
.options(Map("header" -> "true", "mode" -> "failfast"))
.load(testFile(carsFile)).collect()
}
assert(exception.getMessage.contains("Malformed CSV record"))
}
}
test("test for tokens more than the fields in the schema") {
val cars = spark
.read
.format("csv")
.option("header", "false")
.option("comment", "~")
.load(testFile(carsMalformedFile))
verifyCars(cars, withHeader = false, checkTypes = false)
}
test("test with null quote character") {
val cars = spark.read
.format("csv")
.option("header", "true")
.option("quote", "")
.load(testFile(carsUnbalancedQuotesFile))
verifyCars(cars, withHeader = true, checkValues = false)
}
test("test with empty file and known schema") {
val result = spark.read
.format("csv")
.schema(StructType(List(StructField("column", StringType, false))))
.load(testFile(emptyFile))
assert(result.collect.size === 0)
assert(result.schema.fieldNames.size === 1)
}
test("DDL test with empty file") {
withView("carsTable") {
spark.sql(
s"""
|CREATE TEMPORARY VIEW carsTable
|(yearMade double, makeName string, modelName string, comments string, grp string)
|USING csv
|OPTIONS (path "${testFile(emptyFile)}", header "false")
""".stripMargin.replaceAll("\n", " "))
assert(spark.sql("SELECT count(*) FROM carsTable").collect().head(0) === 0)
}
}
test("DDL test with schema") {
withView("carsTable") {
spark.sql(
s"""
|CREATE TEMPORARY VIEW carsTable
|(yearMade double, makeName string, modelName string, comments string, blank string)
|USING csv
|OPTIONS (path "${testFile(carsFile)}", header "true")
""".stripMargin.replaceAll("\n", " "))
val cars = spark.table("carsTable")
verifyCars(cars, withHeader = true, checkHeader = false, checkValues = false)
assert(
cars.schema.fieldNames === Array("yearMade", "makeName", "modelName", "comments", "blank"))
}
}
test("save csv") {
withTempDir { dir =>
val csvDir = new File(dir, "csv").getCanonicalPath
val cars = spark.read
.format("csv")
.option("header", "true")
.load(testFile(carsFile))
cars.coalesce(1).write
.option("header", "true")
.csv(csvDir)
val carsCopy = spark.read
.format("csv")
.option("header", "true")
.load(csvDir)
verifyCars(carsCopy, withHeader = true)
}
}
test("save csv with quote") {
withTempDir { dir =>
val csvDir = new File(dir, "csv").getCanonicalPath
val cars = spark.read
.format("csv")
.option("header", "true")
.load(testFile(carsFile))
cars.coalesce(1).write
.format("csv")
.option("header", "true")
.option("quote", "\"")
.save(csvDir)
val carsCopy = spark.read
.format("csv")
.option("header", "true")
.option("quote", "\"")
.load(csvDir)
verifyCars(carsCopy, withHeader = true)
}
}
test("save csv with quoteAll enabled") {
withTempDir { dir =>
val csvDir = new File(dir, "csv").getCanonicalPath
val data = Seq(("test \"quote\"", 123, "it \"works\"!", "\"very\" well"))
val df = spark.createDataFrame(data)
// escapeQuotes should be true by default
df.coalesce(1).write
.format("csv")
.option("quote", "\"")
.option("escape", "\"")
.option("quoteAll", "true")
.save(csvDir)
val results = spark.read
.format("text")
.load(csvDir)
.collect()
val expected = "\"test \"\"quote\"\"\",\"123\",\"it \"\"works\"\"!\",\"\"\"very\"\" well\""
assert(results.toSeq.map(_.toSeq) === Seq(Seq(expected)))
}
}
test("save csv with quote escaping enabled") {
withTempDir { dir =>
val csvDir = new File(dir, "csv").getCanonicalPath
val data = Seq(("test \"quote\"", 123, "it \"works\"!", "\"very\" well"))
val df = spark.createDataFrame(data)
// escapeQuotes should be true by default
df.coalesce(1).write
.format("csv")
.option("quote", "\"")
.option("escape", "\"")
.save(csvDir)
val results = spark.read
.format("text")
.load(csvDir)
.collect()
val expected = "\"test \"\"quote\"\"\",123,\"it \"\"works\"\"!\",\"\"\"very\"\" well\""
assert(results.toSeq.map(_.toSeq) === Seq(Seq(expected)))
}
}
test("save csv with quote escaping disabled") {
withTempDir { dir =>
val csvDir = new File(dir, "csv").getCanonicalPath
val data = Seq(("test \"quote\"", 123, "it \"works\"!", "\"very\" well"))
val df = spark.createDataFrame(data)
// escapeQuotes should be true by default
df.coalesce(1).write
.format("csv")
.option("quote", "\"")
.option("escapeQuotes", "false")
.option("escape", "\"")
.save(csvDir)
val results = spark.read
.format("text")
.load(csvDir)
.collect()
val expected = "test \"quote\",123,it \"works\"!,\"\"\"very\"\" well\""
assert(results.toSeq.map(_.toSeq) === Seq(Seq(expected)))
}
}
test("save csv with quote escaping, using charToEscapeQuoteEscaping option") {
withTempPath { path =>
// original text
val df1 = Seq(
"""You are "beautiful"""",
"""Yes, \"in the inside"\"""
).toDF()
// text written in CSV with following options:
// quote character: "
// escape character: \
// character to escape quote escaping: #
val df2 = Seq(
""""You are \"beautiful\""""",
""""Yes, #\\"in the inside\"#\""""
).toDF()
df2.coalesce(1).write.text(path.getAbsolutePath)
val df3 = spark.read
.format("csv")
.option("quote", "\"")
.option("escape", "\\")
.option("charToEscapeQuoteEscaping", "#")
.load(path.getAbsolutePath)
checkAnswer(df1, df3)
}
}
test("SPARK-19018: Save csv with custom charset") {
// scalastyle:off nonascii
val content = "µß áâä ÁÂÄ"
// scalastyle:on nonascii
Seq("iso-8859-1", "utf-8", "utf-16", "utf-32", "windows-1250").foreach { encoding =>
withTempPath { path =>
val csvDir = new File(path, "csv")
Seq(content).toDF().write
.option("encoding", encoding)
.csv(csvDir.getCanonicalPath)
csvDir.listFiles().filter(_.getName.endsWith("csv")).foreach({ csvFile =>
val readback = Files.readAllBytes(csvFile.toPath)
val expected = (content + Properties.lineSeparator).getBytes(Charset.forName(encoding))
assert(readback === expected)
})
}
}
}
test("SPARK-19018: error handling for unsupported charsets") {
val exception = intercept[SparkException] {
withTempPath { path =>
val csvDir = new File(path, "csv").getCanonicalPath
Seq("a,A,c,A,b,B").toDF().write
.option("encoding", "1-9588-osi")
.csv(csvDir)
}
}
assert(exception.getCause.getMessage.contains("1-9588-osi"))
}
test("commented lines in CSV data") {
Seq("false", "true").foreach { multiLine =>
val results = spark.read
.format("csv")
.options(Map("comment" -> "~", "header" -> "false", "multiLine" -> multiLine))
.load(testFile(commentsFile))
.collect()
val expected =
Seq(Seq("1", "2", "3", "4", "5.01", "2015-08-20 15:57:00"),
Seq("6", "7", "8", "9", "0", "2015-08-21 16:58:01"),
Seq("1", "2", "3", "4", "5", "2015-08-23 18:00:42"))
assert(results.toSeq.map(_.toSeq) === expected)
}
}
test("inferring schema with commented lines in CSV data") {
val results = spark.read
.format("csv")
.options(Map("comment" -> "~", "header" -> "false", "inferSchema" -> "true"))
.load(testFile(commentsFile))
.collect()
val expected =
Seq(Seq(1, 2, 3, 4, 5.01D, Timestamp.valueOf("2015-08-20 15:57:00")),
Seq(6, 7, 8, 9, 0, Timestamp.valueOf("2015-08-21 16:58:01")),
Seq(1, 2, 3, 4, 5, Timestamp.valueOf("2015-08-23 18:00:42")))
assert(results.toSeq.map(_.toSeq) === expected)
}
test("inferring timestamp types via custom date format") {
val options = Map(
"header" -> "true",
"inferSchema" -> "true",
"timestampFormat" -> "dd/MM/yyyy HH:mm")
val results = spark.read
.format("csv")
.options(options)
.load(testFile(datesFile))
.select("date")
.collect()
val dateFormat = new SimpleDateFormat("dd/MM/yyyy HH:mm", Locale.US)
val expected =
Seq(Seq(new Timestamp(dateFormat.parse("26/08/2015 18:00").getTime)),
Seq(new Timestamp(dateFormat.parse("27/10/2014 18:30").getTime)),
Seq(new Timestamp(dateFormat.parse("28/01/2016 20:00").getTime)))
assert(results.toSeq.map(_.toSeq) === expected)
}
test("load date types via custom date format") {
val customSchema = new StructType(Array(StructField("date", DateType, true)))
val options = Map(
"header" -> "true",
"inferSchema" -> "false",
"dateFormat" -> "dd/MM/yyyy hh:mm")
val results = spark.read
.format("csv")
.options(options)
.schema(customSchema)
.load(testFile(datesFile))
.select("date")
.collect()
val dateFormat = new SimpleDateFormat("dd/MM/yyyy hh:mm", Locale.US)
val expected = Seq(
new Date(dateFormat.parse("26/08/2015 18:00").getTime),
new Date(dateFormat.parse("27/10/2014 18:30").getTime),
new Date(dateFormat.parse("28/01/2016 20:00").getTime))
val dates = results.toSeq.map(_.toSeq.head)
expected.zip(dates).foreach {
case (expectedDate, date) =>
// As it truncates the hours, minutes and etc., we only check
// if the dates (days, months and years) are the same via `toString()`.
assert(expectedDate.toString === date.toString)
}
}
test("setting comment to null disables comment support") {
val results = spark.read
.format("csv")
.options(Map("comment" -> "", "header" -> "false"))
.load(testFile(disableCommentsFile))
.collect()
val expected =
Seq(
Seq("#1", "2", "3"),
Seq("4", "5", "6"))
assert(results.toSeq.map(_.toSeq) === expected)
}
test("nullable fields with user defined null value of \"null\"") {
// year,make,model,comment,blank
val dataSchema = StructType(List(
StructField("year", IntegerType, nullable = true),
StructField("make", StringType, nullable = false),
StructField("model", StringType, nullable = false),
StructField("comment", StringType, nullable = true),
StructField("blank", StringType, nullable = true)))
val cars = spark.read
.format("csv")
.schema(dataSchema)
.options(Map("header" -> "true", "nullValue" -> "null"))
.load(testFile(carsNullFile))
verifyCars(cars, withHeader = true, checkValues = false)
val results = cars.collect()
assert(results(0).toSeq === Array(2012, "Tesla", "S", null, null))
assert(results(2).toSeq === Array(null, "Chevy", "Volt", null, null))
}
test("empty fields with user defined empty values") {
// year,make,model,comment,blank
val dataSchema = StructType(List(
StructField("year", IntegerType, nullable = true),
StructField("make", StringType, nullable = false),
StructField("model", StringType, nullable = false),
StructField("comment", StringType, nullable = true),
StructField("blank", StringType, nullable = true)))
val cars = spark.read
.format("csv")
.schema(dataSchema)
.option("header", "true")
.option("emptyValue", "empty")
.load(testFile(carsEmptyValueFile))
verifyCars(cars, withHeader = true, checkValues = false)
val results = cars.collect()
assert(results(0).toSeq === Array(2012, "Tesla", "S", "empty", "empty"))
assert(results(1).toSeq ===
Array(1997, "Ford", "E350", "Go get one now they are going fast", null))
assert(results(2).toSeq === Array(2015, "Chevy", "Volt", null, "empty"))
}
test("save csv with empty fields with user defined empty values") {
withTempDir { dir =>
val csvDir = new File(dir, "csv").getCanonicalPath
// year,make,model,comment,blank
val dataSchema = StructType(List(
StructField("year", IntegerType, nullable = true),
StructField("make", StringType, nullable = false),
StructField("model", StringType, nullable = false),
StructField("comment", StringType, nullable = true),
StructField("blank", StringType, nullable = true)))
val cars = spark.read
.format("csv")
.schema(dataSchema)
.option("header", "true")
.option("nullValue", "NULL")
.load(testFile(carsEmptyValueFile))
cars.coalesce(1).write
.format("csv")
.option("header", "true")
.option("emptyValue", "empty")
.option("nullValue", null)
.save(csvDir)
val carsCopy = spark.read
.format("csv")
.schema(dataSchema)
.option("header", "true")
.load(csvDir)
verifyCars(carsCopy, withHeader = true, checkValues = false)
val results = carsCopy.collect()
assert(results(0).toSeq === Array(2012, "Tesla", "S", "empty", "empty"))
assert(results(1).toSeq ===
Array(1997, "Ford", "E350", "Go get one now they are going fast", null))
assert(results(2).toSeq === Array(2015, "Chevy", "Volt", null, "empty"))
}
}
test("save csv with compression codec option") {
withTempDir { dir =>
val csvDir = new File(dir, "csv").getCanonicalPath
val cars = spark.read
.format("csv")
.option("header", "true")
.load(testFile(carsFile))
cars.coalesce(1).write
.format("csv")
.option("header", "true")
.option("compression", "gZiP")
.save(csvDir)
val compressedFiles = new File(csvDir).listFiles()
assert(compressedFiles.exists(_.getName.endsWith(".csv.gz")))
val carsCopy = spark.read
.format("csv")
.option("header", "true")
.load(csvDir)
verifyCars(carsCopy, withHeader = true)
}
}
test("SPARK-13543 Write the output as uncompressed via option()") {
val extraOptions = Map(
"mapreduce.output.fileoutputformat.compress" -> "true",
"mapreduce.output.fileoutputformat.compress.type" -> CompressionType.BLOCK.toString,
"mapreduce.map.output.compress" -> "true",
"mapreduce.map.output.compress.codec" -> classOf[GzipCodec].getName
)
withTempDir { dir =>
val csvDir = new File(dir, "csv").getCanonicalPath
val cars = spark.read
.format("csv")
.option("header", "true")
.options(extraOptions)
.load(testFile(carsFile))
cars.coalesce(1).write
.format("csv")
.option("header", "true")
.option("compression", "none")
.options(extraOptions)
.save(csvDir)
val compressedFiles = new File(csvDir).listFiles()
assert(compressedFiles.exists(!_.getName.endsWith(".csv.gz")))
val carsCopy = spark.read
.format("csv")
.option("header", "true")
.options(extraOptions)
.load(csvDir)
verifyCars(carsCopy, withHeader = true)
}
}
test("Schema inference correctly identifies the datatype when data is sparse.") {
val df = spark.read
.format("csv")
.option("header", "true")
.option("inferSchema", "true")
.load(testFile(simpleSparseFile))
assert(
df.schema.fields.map(field => field.dataType).deep ==
Array(IntegerType, IntegerType, IntegerType, IntegerType).deep)
}
test("old csv data source name works") {
val cars = spark
.read
.format("com.databricks.spark.csv")
.option("header", "false")
.load(testFile(carsFile))
verifyCars(cars, withHeader = false, checkTypes = false)
}
test("nulls, NaNs and Infinity values can be parsed") {
val numbers = spark
.read
.format("csv")
.schema(StructType(List(
StructField("int", IntegerType, true),
StructField("long", LongType, true),
StructField("float", FloatType, true),
StructField("double", DoubleType, true)
)))
.options(Map(
"header" -> "true",
"mode" -> "DROPMALFORMED",
"nullValue" -> "--",
"nanValue" -> "NAN",
"negativeInf" -> "-INF",
"positiveInf" -> "INF"))
.load(testFile(numbersFile))
assert(numbers.count() == 8)
}
test("SPARK-15585 turn off quotations") {
val cars = spark.read
.format("csv")
.option("header", "true")
.option("quote", "")
.load(testFile(carsUnbalancedQuotesFile))
verifyCars(cars, withHeader = true, checkValues = false)
}
test("Write timestamps correctly in ISO8601 format by default") {
withTempDir { dir =>
val iso8601timestampsPath = s"${dir.getCanonicalPath}/iso8601timestamps.csv"
val timestamps = spark.read
.format("csv")
.option("inferSchema", "true")
.option("header", "true")
.option("timestampFormat", "dd/MM/yyyy HH:mm")
.load(testFile(datesFile))
timestamps.write
.format("csv")
.option("header", "true")
.save(iso8601timestampsPath)
// This will load back the timestamps as string.
val stringSchema = StructType(StructField("date", StringType, true) :: Nil)
val iso8601Timestamps = spark.read
.format("csv")
.schema(stringSchema)
.option("header", "true")
.load(iso8601timestampsPath)
val iso8501 = FastDateFormat.getInstance("yyyy-MM-dd'T'HH:mm:ss.SSSXXX", Locale.US)
val expectedTimestamps = timestamps.collect().map { r =>
// This should be ISO8601 formatted string.
Row(iso8501.format(r.toSeq.head))
}
checkAnswer(iso8601Timestamps, expectedTimestamps)
}
}
test("Write dates correctly in ISO8601 format by default") {
withTempDir { dir =>
val customSchema = new StructType(Array(StructField("date", DateType, true)))
val iso8601datesPath = s"${dir.getCanonicalPath}/iso8601dates.csv"
val dates = spark.read
.format("csv")
.schema(customSchema)
.option("header", "true")
.option("inferSchema", "false")
.option("dateFormat", "dd/MM/yyyy HH:mm")
.load(testFile(datesFile))
dates.write
.format("csv")
.option("header", "true")
.save(iso8601datesPath)
// This will load back the dates as string.
val stringSchema = StructType(StructField("date", StringType, true) :: Nil)
val iso8601dates = spark.read
.format("csv")
.schema(stringSchema)
.option("header", "true")
.load(iso8601datesPath)
val iso8501 = FastDateFormat.getInstance("yyyy-MM-dd", Locale.US)
val expectedDates = dates.collect().map { r =>
// This should be ISO8601 formatted string.
Row(iso8501.format(r.toSeq.head))
}
checkAnswer(iso8601dates, expectedDates)
}
}
test("Roundtrip in reading and writing timestamps") {
withTempDir { dir =>
val iso8601timestampsPath = s"${dir.getCanonicalPath}/iso8601timestamps.csv"
val timestamps = spark.read
.format("csv")
.option("header", "true")
.option("inferSchema", "true")
.load(testFile(datesFile))
timestamps.write
.format("csv")
.option("header", "true")
.save(iso8601timestampsPath)
val iso8601timestamps = spark.read
.format("csv")
.option("header", "true")
.option("inferSchema", "true")
.load(iso8601timestampsPath)
checkAnswer(iso8601timestamps, timestamps)
}
}
test("Write dates correctly with dateFormat option") {
val customSchema = new StructType(Array(StructField("date", DateType, true)))
withTempDir { dir =>
// With dateFormat option.
val datesWithFormatPath = s"${dir.getCanonicalPath}/datesWithFormat.csv"
val datesWithFormat = spark.read
.format("csv")
.schema(customSchema)
.option("header", "true")
.option("dateFormat", "dd/MM/yyyy HH:mm")
.load(testFile(datesFile))
datesWithFormat.write
.format("csv")
.option("header", "true")
.option("dateFormat", "yyyy/MM/dd")
.save(datesWithFormatPath)
// This will load back the dates as string.
val stringSchema = StructType(StructField("date", StringType, true) :: Nil)
val stringDatesWithFormat = spark.read
.format("csv")
.schema(stringSchema)
.option("header", "true")
.load(datesWithFormatPath)
val expectedStringDatesWithFormat = Seq(
Row("2015/08/26"),
Row("2014/10/27"),
Row("2016/01/28"))
checkAnswer(stringDatesWithFormat, expectedStringDatesWithFormat)
}
}
test("Write timestamps correctly with timestampFormat option") {
withTempDir { dir =>
// With dateFormat option.
val timestampsWithFormatPath = s"${dir.getCanonicalPath}/timestampsWithFormat.csv"
val timestampsWithFormat = spark.read
.format("csv")
.option("header", "true")
.option("inferSchema", "true")
.option("timestampFormat", "dd/MM/yyyy HH:mm")
.load(testFile(datesFile))
timestampsWithFormat.write
.format("csv")
.option("header", "true")
.option("timestampFormat", "yyyy/MM/dd HH:mm")
.save(timestampsWithFormatPath)
// This will load back the timestamps as string.
val stringSchema = StructType(StructField("date", StringType, true) :: Nil)
val stringTimestampsWithFormat = spark.read
.format("csv")
.schema(stringSchema)
.option("header", "true")
.load(timestampsWithFormatPath)
val expectedStringTimestampsWithFormat = Seq(
Row("2015/08/26 18:00"),
Row("2014/10/27 18:30"),
Row("2016/01/28 20:00"))
checkAnswer(stringTimestampsWithFormat, expectedStringTimestampsWithFormat)
}
}
test("Write timestamps correctly with timestampFormat option and timeZone option") {
withTempDir { dir =>
// With dateFormat option and timeZone option.
val timestampsWithFormatPath = s"${dir.getCanonicalPath}/timestampsWithFormat.csv"
val timestampsWithFormat = spark.read
.format("csv")
.option("header", "true")
.option("inferSchema", "true")
.option("timestampFormat", "dd/MM/yyyy HH:mm")
.load(testFile(datesFile))
timestampsWithFormat.write
.format("csv")
.option("header", "true")
.option("timestampFormat", "yyyy/MM/dd HH:mm")
.option(DateTimeUtils.TIMEZONE_OPTION, "GMT")
.save(timestampsWithFormatPath)
// This will load back the timestamps as string.
val stringSchema = StructType(StructField("date", StringType, true) :: Nil)
val stringTimestampsWithFormat = spark.read
.format("csv")
.schema(stringSchema)
.option("header", "true")
.load(timestampsWithFormatPath)
val expectedStringTimestampsWithFormat = Seq(
Row("2015/08/27 01:00"),
Row("2014/10/28 01:30"),
Row("2016/01/29 04:00"))
checkAnswer(stringTimestampsWithFormat, expectedStringTimestampsWithFormat)
val readBack = spark.read
.format("csv")
.option("header", "true")
.option("inferSchema", "true")
.option("timestampFormat", "yyyy/MM/dd HH:mm")
.option(DateTimeUtils.TIMEZONE_OPTION, "GMT")
.load(timestampsWithFormatPath)
checkAnswer(readBack, timestampsWithFormat)
}
}
test("load duplicated field names consistently with null or empty strings - case sensitive") {
withSQLConf(SQLConf.CASE_SENSITIVE.key -> "true") {
withTempPath { path =>
Seq("a,a,c,A,b,B").toDF().write.text(path.getAbsolutePath)
val actualSchema = spark.read
.format("csv")
.option("header", true)
.load(path.getAbsolutePath)
.schema
val fields = Seq("a0", "a1", "c", "A", "b", "B").map(StructField(_, StringType, true))
val expectedSchema = StructType(fields)
assert(actualSchema == expectedSchema)
}
}
}
test("load duplicated field names consistently with null or empty strings - case insensitive") {
withSQLConf(SQLConf.CASE_SENSITIVE.key -> "false") {
withTempPath { path =>
Seq("a,A,c,A,b,B").toDF().write.text(path.getAbsolutePath)
val actualSchema = spark.read
.format("csv")
.option("header", true)
.load(path.getAbsolutePath)
.schema
val fields = Seq("a0", "A1", "c", "A3", "b4", "B5").map(StructField(_, StringType, true))
val expectedSchema = StructType(fields)
assert(actualSchema == expectedSchema)
}
}
}
test("load null when the schema is larger than parsed tokens ") {
withTempPath { path =>
Seq("1").toDF().write.text(path.getAbsolutePath)
val schema = StructType(
StructField("a", IntegerType, true) ::
StructField("b", IntegerType, true) :: Nil)
val df = spark.read
.schema(schema)
.option("header", "false")
.csv(path.getAbsolutePath)
checkAnswer(df, Row(1, null))
}
}
test("SPARK-18699 put malformed records in a `columnNameOfCorruptRecord` field") {
Seq(false, true).foreach { multiLine =>
val schema = new StructType().add("a", IntegerType).add("b", TimestampType)
// We use `PERMISSIVE` mode by default if invalid string is given.
val df1 = spark
.read
.option("mode", "abcd")
.option("multiLine", multiLine)
.schema(schema)
.csv(testFile(valueMalformedFile))
checkAnswer(df1,
Row(null, null) ::
Row(1, java.sql.Date.valueOf("1983-08-04")) ::
Nil)
// If `schema` has `columnNameOfCorruptRecord`, it should handle corrupt records
val columnNameOfCorruptRecord = "_unparsed"
val schemaWithCorrField1 = schema.add(columnNameOfCorruptRecord, StringType)
val df2 = spark
.read
.option("mode", "Permissive")
.option("columnNameOfCorruptRecord", columnNameOfCorruptRecord)
.option("multiLine", multiLine)
.schema(schemaWithCorrField1)
.csv(testFile(valueMalformedFile))
checkAnswer(df2,
Row(null, null, "0,2013-111-11 12:13:14") ::
Row(1, java.sql.Date.valueOf("1983-08-04"), null) ::
Nil)
// We put a `columnNameOfCorruptRecord` field in the middle of a schema
val schemaWithCorrField2 = new StructType()
.add("a", IntegerType)
.add(columnNameOfCorruptRecord, StringType)
.add("b", TimestampType)
val df3 = spark
.read
.option("mode", "permissive")
.option("columnNameOfCorruptRecord", columnNameOfCorruptRecord)
.option("multiLine", multiLine)
.schema(schemaWithCorrField2)
.csv(testFile(valueMalformedFile))
checkAnswer(df3,
Row(null, "0,2013-111-11 12:13:14", null) ::
Row(1, null, java.sql.Date.valueOf("1983-08-04")) ::
Nil)
val errMsg = intercept[AnalysisException] {
spark
.read
.option("mode", "PERMISSIVE")
.option("columnNameOfCorruptRecord", columnNameOfCorruptRecord)
.option("multiLine", multiLine)
.schema(schema.add(columnNameOfCorruptRecord, IntegerType))
.csv(testFile(valueMalformedFile))
.collect
}.getMessage
assert(errMsg.startsWith("The field for corrupt records must be string type and nullable"))
}
}
test("SPARK-19610: Parse normal multi-line CSV files") {
val primitiveFieldAndType = Seq(
""""
|string","integer
|
|
|","long
|
|","bigInteger",double,boolean,null""".stripMargin,
""""this is a
|simple
|string.","
|
|10","
|21474836470","92233720368547758070","
|
|1.7976931348623157E308",true,""".stripMargin)
withTempPath { path =>
primitiveFieldAndType.toDF("value").coalesce(1).write.text(path.getAbsolutePath)
val df = spark.read
.option("header", true)
.option("multiLine", true)
.csv(path.getAbsolutePath)
// Check if headers have new lines in the names.
val actualFields = df.schema.fieldNames.toSeq
val expectedFields =
Seq("\nstring", "integer\n\n\n", "long\n\n", "bigInteger", "double", "boolean", "null")
assert(actualFields === expectedFields)
// Check if the rows have new lines in the values.
val expected = Row(
"this is a\nsimple\nstring.",
"\n\n10",
"\n21474836470",
"92233720368547758070",
"\n\n1.7976931348623157E308",
"true",
null)
checkAnswer(df, expected)
}
}
test("Empty file produces empty dataframe with empty schema") {
Seq(false, true).foreach { multiLine =>
val df = spark.read.format("csv")
.option("header", true)
.option("multiLine", multiLine)
.load(testFile(emptyFile))
assert(df.schema === spark.emptyDataFrame.schema)
checkAnswer(df, spark.emptyDataFrame)
}
}
test("Empty string dataset produces empty dataframe and keep user-defined schema") {
val df1 = spark.read.csv(spark.emptyDataset[String])
assert(df1.schema === spark.emptyDataFrame.schema)
checkAnswer(df1, spark.emptyDataFrame)
val schema = StructType(StructField("a", StringType) :: Nil)
val df2 = spark.read.schema(schema).csv(spark.emptyDataset[String])
assert(df2.schema === schema)
}
test("ignoreLeadingWhiteSpace and ignoreTrailingWhiteSpace options - read") {
val input = " a,b , c "
// For reading, default of both `ignoreLeadingWhiteSpace` and`ignoreTrailingWhiteSpace`
// are `false`. So, these are excluded.
val combinations = Seq(
(true, true),
(false, true),
(true, false))
// Check if read rows ignore whitespaces as configured.
val expectedRows = Seq(
Row("a", "b", "c"),
Row(" a", "b", " c"),
Row("a", "b ", "c "))
combinations.zip(expectedRows)
.foreach { case ((ignoreLeadingWhiteSpace, ignoreTrailingWhiteSpace), expected) =>
val df = spark.read
.option("ignoreLeadingWhiteSpace", ignoreLeadingWhiteSpace)
.option("ignoreTrailingWhiteSpace", ignoreTrailingWhiteSpace)
.csv(Seq(input).toDS())
checkAnswer(df, expected)
}
}
test("SPARK-18579: ignoreLeadingWhiteSpace and ignoreTrailingWhiteSpace options - write") {
val df = Seq((" a", "b ", " c ")).toDF()
// For writing, default of both `ignoreLeadingWhiteSpace` and `ignoreTrailingWhiteSpace`
// are `true`. So, these are excluded.
val combinations = Seq(
(false, false),
(false, true),
(true, false))
// Check if written lines ignore each whitespaces as configured.
val expectedLines = Seq(
" a,b , c ",
" a,b, c",
"a,b ,c ")
combinations.zip(expectedLines)
.foreach { case ((ignoreLeadingWhiteSpace, ignoreTrailingWhiteSpace), expected) =>
withTempPath { path =>
df.write
.option("ignoreLeadingWhiteSpace", ignoreLeadingWhiteSpace)
.option("ignoreTrailingWhiteSpace", ignoreTrailingWhiteSpace)
.csv(path.getAbsolutePath)
// Read back the written lines.
val readBack = spark.read.text(path.getAbsolutePath)
checkAnswer(readBack, Row(expected))
}
}
}
test("SPARK-21263: Invalid float and double are handled correctly in different modes") {
val exception = intercept[SparkException] {
spark.read.schema("a DOUBLE")
.option("mode", "FAILFAST")
.csv(Seq("10u12").toDS())
.collect()
}
assert(exception.getMessage.contains("""input string: "10u12""""))
val count = spark.read.schema("a FLOAT")
.option("mode", "DROPMALFORMED")
.csv(Seq("10u12").toDS())
.count()
assert(count == 0)
val results = spark.read.schema("a FLOAT")
.option("mode", "PERMISSIVE")
.csv(Seq("10u12").toDS())
checkAnswer(results, Row(null))
}
test("SPARK-20978: Fill the malformed column when the number of tokens is less than schema") {
val df = spark.read
.schema("a string, b string, unparsed string")
.option("columnNameOfCorruptRecord", "unparsed")
.csv(Seq("a").toDS())
checkAnswer(df, Row("a", null, "a"))
}
test("SPARK-21610: Corrupt records are not handled properly when creating a dataframe " +
"from a file") {
val columnNameOfCorruptRecord = "_corrupt_record"
val schema = new StructType()
.add("a", IntegerType)
.add("b", TimestampType)
.add(columnNameOfCorruptRecord, StringType)
// negative cases
val msg = intercept[AnalysisException] {
spark
.read
.option("columnNameOfCorruptRecord", columnNameOfCorruptRecord)
.schema(schema)
.csv(testFile(valueMalformedFile))
.select(columnNameOfCorruptRecord)
.collect()
}.getMessage
assert(msg.contains("only include the internal corrupt record column"))
intercept[org.apache.spark.sql.catalyst.errors.TreeNodeException[_]] {
spark
.read
.option("columnNameOfCorruptRecord", columnNameOfCorruptRecord)
.schema(schema)
.csv(testFile(valueMalformedFile))
.filter($"_corrupt_record".isNotNull)
.count()
}
// workaround
val df = spark
.read
.option("columnNameOfCorruptRecord", columnNameOfCorruptRecord)
.schema(schema)
.csv(testFile(valueMalformedFile))
.cache()
assert(df.filter($"_corrupt_record".isNotNull).count() == 1)
assert(df.filter($"_corrupt_record".isNull).count() == 1)
checkAnswer(
df.select(columnNameOfCorruptRecord),
Row("0,2013-111-11 12:13:14") :: Row(null) :: Nil
)
}
test("SPARK-23846: schema inferring touches less data if samplingRatio < 1.0") {
// Set default values for the DataSource parameters to make sure
// that whole test file is mapped to only one partition. This will guarantee
// reliable sampling of the input file.
withSQLConf(
"spark.sql.files.maxPartitionBytes" -> (128 * 1024 * 1024).toString,
"spark.sql.files.openCostInBytes" -> (4 * 1024 * 1024).toString
)(withTempPath { path =>
val ds = sampledTestData.coalesce(1)
ds.write.text(path.getAbsolutePath)
val readback = spark.read
.option("inferSchema", true).option("samplingRatio", 0.1)
.csv(path.getCanonicalPath)
assert(readback.schema == new StructType().add("_c0", IntegerType))
})
}
test("SPARK-23846: usage of samplingRatio while parsing a dataset of strings") {
val ds = sampledTestData.coalesce(1)
val readback = spark.read
.option("inferSchema", true).option("samplingRatio", 0.1)
.csv(ds)
assert(readback.schema == new StructType().add("_c0", IntegerType))
}
test("SPARK-23846: samplingRatio is out of the range (0, 1.0]") {
val ds = spark.range(0, 100, 1, 1).map(_.toString)
val errorMsg0 = intercept[IllegalArgumentException] {
spark.read.option("inferSchema", true).option("samplingRatio", -1).csv(ds)
}.getMessage
assert(errorMsg0.contains("samplingRatio (-1.0) should be greater than 0"))
val errorMsg1 = intercept[IllegalArgumentException] {
spark.read.option("inferSchema", true).option("samplingRatio", 0).csv(ds)
}.getMessage
assert(errorMsg1.contains("samplingRatio (0.0) should be greater than 0"))
val sampled = spark.read.option("inferSchema", true).option("samplingRatio", 1.0).csv(ds)
assert(sampled.count() == ds.count())
}
test("SPARK-17916: An empty string should not be coerced to null when nullValue is passed.") {
val litNull: String = null
val df = Seq(
(1, "John Doe"),
(2, ""),
(3, "-"),
(4, litNull)
).toDF("id", "name")
// Checks for new behavior where an empty string is not coerced to null when `nullValue` is
// set to anything but an empty string literal.
withTempPath { path =>
df.write
.option("nullValue", "-")
.csv(path.getAbsolutePath)
val computed = spark.read
.option("nullValue", "-")
.schema(df.schema)
.csv(path.getAbsolutePath)
val expected = Seq(
(1, "John Doe"),
(2, ""),
(3, litNull),
(4, litNull)
).toDF("id", "name")
checkAnswer(computed, expected)
}
// Keeps the old behavior where empty string us coerced to nullValue is not passed.
withTempPath { path =>
df.write
.csv(path.getAbsolutePath)
val computed = spark.read
.schema(df.schema)
.csv(path.getAbsolutePath)
val expected = Seq(
(1, "John Doe"),
(2, litNull),
(3, "-"),
(4, litNull)
).toDF("id", "name")
checkAnswer(computed, expected)
}
}
test("SPARK-25241: An empty string should not be coerced to null when emptyValue is passed.") {
val litNull: String = null
val df = Seq(
(1, "John Doe"),
(2, ""),
(3, "-"),
(4, litNull)
).toDF("id", "name")
// Checks for new behavior where a null is not coerced to an empty string when `emptyValue` is
// set to anything but an empty string literal.
withTempPath { path =>
df.write
.option("emptyValue", "-")
.csv(path.getAbsolutePath)
val computed = spark.read
.option("emptyValue", "-")
.schema(df.schema)
.csv(path.getAbsolutePath)
val expected = Seq(
(1, "John Doe"),
(2, "-"),
(3, "-"),
(4, "-")
).toDF("id", "name")
checkAnswer(computed, expected)
}
// Keeps the old behavior where empty string us coerced to emptyValue is not passed.
withTempPath { path =>
df.write
.csv(path.getAbsolutePath)
val computed = spark.read
.schema(df.schema)
.csv(path.getAbsolutePath)
val expected = Seq(
(1, "John Doe"),
(2, litNull),
(3, "-"),
(4, litNull)
).toDF("id", "name")
checkAnswer(computed, expected)
}
}
test("SPARK-24329: skip lines with comments, and one or multiple whitespaces") {
val schema = new StructType().add("colA", StringType)
val ds = spark
.read
.schema(schema)
.option("multiLine", false)
.option("header", true)
.option("comment", "#")
.option("ignoreLeadingWhiteSpace", false)
.option("ignoreTrailingWhiteSpace", false)
.csv(testFile("test-data/comments-whitespaces.csv"))
checkAnswer(ds, Seq(Row(""" "a" """)))
}
test("SPARK-24244: Select a subset of all columns") {
withTempPath { path =>
import collection.JavaConverters._
val schema = new StructType()
.add("f1", IntegerType).add("f2", IntegerType).add("f3", IntegerType)
.add("f4", IntegerType).add("f5", IntegerType).add("f6", IntegerType)
.add("f7", IntegerType).add("f8", IntegerType).add("f9", IntegerType)
.add("f10", IntegerType).add("f11", IntegerType).add("f12", IntegerType)
.add("f13", IntegerType).add("f14", IntegerType).add("f15", IntegerType)
val odf = spark.createDataFrame(List(
Row(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15),
Row(-1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15)
).asJava, schema)
odf.write.csv(path.getCanonicalPath)
val idf = spark.read
.schema(schema)
.csv(path.getCanonicalPath)
.select('f15, 'f10, 'f5)
assert(idf.count() == 2)
checkAnswer(idf, List(Row(15, 10, 5), Row(-15, -10, -5)))
}
}
def checkHeader(multiLine: Boolean): Unit = {
withSQLConf(SQLConf.CASE_SENSITIVE.key -> "true") {
withTempPath { path =>
val oschema = new StructType().add("f1", DoubleType).add("f2", DoubleType)
val odf = spark.createDataFrame(List(Row(1.0, 1234.5)).asJava, oschema)
odf.write.option("header", true).csv(path.getCanonicalPath)
val ischema = new StructType().add("f2", DoubleType).add("f1", DoubleType)
val exception = intercept[SparkException] {
spark.read
.schema(ischema)
.option("multiLine", multiLine)
.option("header", true)
.option("enforceSchema", false)
.csv(path.getCanonicalPath)
.collect()
}
assert(exception.getMessage.contains("CSV header does not conform to the schema"))
val shortSchema = new StructType().add("f1", DoubleType)
val exceptionForShortSchema = intercept[SparkException] {
spark.read
.schema(shortSchema)
.option("multiLine", multiLine)
.option("header", true)
.option("enforceSchema", false)
.csv(path.getCanonicalPath)
.collect()
}
assert(exceptionForShortSchema.getMessage.contains(
"Number of column in CSV header is not equal to number of fields in the schema"))
val longSchema = new StructType()
.add("f1", DoubleType)
.add("f2", DoubleType)
.add("f3", DoubleType)
val exceptionForLongSchema = intercept[SparkException] {
spark.read
.schema(longSchema)
.option("multiLine", multiLine)
.option("header", true)
.option("enforceSchema", false)
.csv(path.getCanonicalPath)
.collect()
}
assert(exceptionForLongSchema.getMessage.contains("Header length: 2, schema size: 3"))
val caseSensitiveSchema = new StructType().add("F1", DoubleType).add("f2", DoubleType)
val caseSensitiveException = intercept[SparkException] {
spark.read
.schema(caseSensitiveSchema)
.option("multiLine", multiLine)
.option("header", true)
.option("enforceSchema", false)
.csv(path.getCanonicalPath)
.collect()
}
assert(caseSensitiveException.getMessage.contains(
"CSV header does not conform to the schema"))
}
}
}
test(s"SPARK-23786: Checking column names against schema in the multiline mode") {
checkHeader(multiLine = true)
}
test(s"SPARK-23786: Checking column names against schema in the per-line mode") {
checkHeader(multiLine = false)
}
test("SPARK-23786: CSV header must not be checked if it doesn't exist") {
withTempPath { path =>
val oschema = new StructType().add("f1", DoubleType).add("f2", DoubleType)
val odf = spark.createDataFrame(List(Row(1.0, 1234.5)).asJava, oschema)
odf.write.option("header", false).csv(path.getCanonicalPath)
val ischema = new StructType().add("f2", DoubleType).add("f1", DoubleType)
val idf = spark.read
.schema(ischema)
.option("header", false)
.option("enforceSchema", false)
.csv(path.getCanonicalPath)
checkAnswer(idf, odf)
}
}
test("SPARK-23786: Ignore column name case if spark.sql.caseSensitive is false") {
withSQLConf(SQLConf.CASE_SENSITIVE.key -> "false") {
withTempPath { path =>
val oschema = new StructType().add("A", StringType)
val odf = spark.createDataFrame(List(Row("0")).asJava, oschema)
odf.write.option("header", true).csv(path.getCanonicalPath)
val ischema = new StructType().add("a", StringType)
val idf = spark.read.schema(ischema)
.option("header", true)
.option("enforceSchema", false)
.csv(path.getCanonicalPath)
checkAnswer(idf, odf)
}
}
}
test("SPARK-23786: check header on parsing of dataset of strings") {
val ds = Seq("columnA,columnB", "1.0,1000.0").toDS()
val ischema = new StructType().add("columnB", DoubleType).add("columnA", DoubleType)
val exception = intercept[IllegalArgumentException] {
spark.read.schema(ischema).option("header", true).option("enforceSchema", false).csv(ds)
}
assert(exception.getMessage.contains("CSV header does not conform to the schema"))
}
test("SPARK-23786: enforce inferred schema") {
val expectedSchema = new StructType().add("_c0", DoubleType).add("_c1", StringType)
val withHeader = spark.read
.option("inferSchema", true)
.option("enforceSchema", false)
.option("header", true)
.csv(Seq("_c0,_c1", "1.0,a").toDS())
assert(withHeader.schema == expectedSchema)
checkAnswer(withHeader, Seq(Row(1.0, "a")))
// Ignore the inferSchema flag if an user sets a schema
val schema = new StructType().add("colA", DoubleType).add("colB", StringType)
val ds = spark.read
.option("inferSchema", true)
.option("enforceSchema", false)
.option("header", true)
.schema(schema)
.csv(Seq("colA,colB", "1.0,a").toDS())
assert(ds.schema == schema)
checkAnswer(ds, Seq(Row(1.0, "a")))
val exception = intercept[IllegalArgumentException] {
spark.read
.option("inferSchema", true)
.option("enforceSchema", false)
.option("header", true)
.schema(schema)
.csv(Seq("col1,col2", "1.0,a").toDS())
}
assert(exception.getMessage.contains("CSV header does not conform to the schema"))
}
test("SPARK-23786: warning should be printed if CSV header doesn't conform to schema") {
class TestAppender extends AppenderSkeleton {
var events = new java.util.ArrayList[LoggingEvent]
override def close(): Unit = {}
override def requiresLayout: Boolean = false
protected def append(event: LoggingEvent): Unit = events.add(event)
}
val testAppender1 = new TestAppender
LogManager.getRootLogger.addAppender(testAppender1)
try {
val ds = Seq("columnA,columnB", "1.0,1000.0").toDS()
val ischema = new StructType().add("columnB", DoubleType).add("columnA", DoubleType)
spark.read.schema(ischema).option("header", true).option("enforceSchema", true).csv(ds)
} finally {
LogManager.getRootLogger.removeAppender(testAppender1)
}
assert(testAppender1.events.asScala
.exists(msg => msg.getRenderedMessage.contains("CSV header does not conform to the schema")))
val testAppender2 = new TestAppender
LogManager.getRootLogger.addAppender(testAppender2)
try {
withTempPath { path =>
val oschema = new StructType().add("f1", DoubleType).add("f2", DoubleType)
val odf = spark.createDataFrame(List(Row(1.0, 1234.5)).asJava, oschema)
odf.write.option("header", true).csv(path.getCanonicalPath)
val ischema = new StructType().add("f2", DoubleType).add("f1", DoubleType)
spark.read
.schema(ischema)
.option("header", true)
.option("enforceSchema", true)
.csv(path.getCanonicalPath)
.collect()
}
} finally {
LogManager.getRootLogger.removeAppender(testAppender2)
}
assert(testAppender2.events.asScala
.exists(msg => msg.getRenderedMessage.contains("CSV header does not conform to the schema")))
}
test("SPARK-25134: check header on parsing of dataset with projection and column pruning") {
withSQLConf(SQLConf.CSV_PARSER_COLUMN_PRUNING.key -> "true") {
Seq(false, true).foreach { multiLine =>
withTempPath { path =>
val dir = path.getAbsolutePath
Seq(("a", "b")).toDF("columnA", "columnB").write
.format("csv")
.option("header", true)
.save(dir)
// schema with one column
checkAnswer(spark.read
.format("csv")
.option("header", true)
.option("enforceSchema", false)
.option("multiLine", multiLine)
.load(dir)
.select("columnA"),
Row("a"))
// empty schema
assert(spark.read
.format("csv")
.option("header", true)
.option("enforceSchema", false)
.option("multiLine", multiLine)
.load(dir)
.count() === 1L)
}
}
}
}
test("SPARK-24645 skip parsing when columnPruning enabled and partitions scanned only") {
withSQLConf(SQLConf.CSV_PARSER_COLUMN_PRUNING.key -> "true") {
withTempPath { path =>
val dir = path.getAbsolutePath
spark.range(10).selectExpr("id % 2 AS p", "id").write.partitionBy("p").csv(dir)
checkAnswer(spark.read.csv(dir).selectExpr("sum(p)"), Row(5))
}
}
}
test("SPARK-24676 project required data from parsed data when columnPruning disabled") {
withSQLConf(SQLConf.CSV_PARSER_COLUMN_PRUNING.key -> "false") {
withTempPath { path =>
val dir = path.getAbsolutePath
spark.range(10).selectExpr("id % 2 AS p", "id AS c0", "id AS c1").write.partitionBy("p")
.option("header", "true").csv(dir)
val df1 = spark.read.option("header", true).csv(dir).selectExpr("sum(p)", "count(c0)")
checkAnswer(df1, Row(5, 10))
// empty required column case
val df2 = spark.read.option("header", true).csv(dir).selectExpr("sum(p)")
checkAnswer(df2, Row(5))
}
// the case where tokens length != parsedSchema length
withTempPath { path =>
val dir = path.getAbsolutePath
Seq("1,2").toDF().write.text(dir)
// more tokens
val df1 = spark.read.schema("c0 int").format("csv").option("mode", "permissive").load(dir)
checkAnswer(df1, Row(1))
// less tokens
val df2 = spark.read.schema("c0 int, c1 int, c2 int").format("csv")
.option("mode", "permissive").load(dir)
checkAnswer(df2, Row(1, 2, null))
}
}
}
test("count() for malformed input") {
def countForMalformedCSV(expected: Long, input: Seq[String]): Unit = {
val schema = new StructType().add("a", IntegerType)
val strings = spark.createDataset(input)
val df = spark.read.schema(schema).option("header", false).csv(strings)
assert(df.count() == expected)
}
def checkCount(expected: Long): Unit = {
val validRec = "1"
val inputs = Seq(
Seq("{-}", validRec),
Seq(validRec, "?"),
Seq("0xAC", validRec),
Seq(validRec, "0.314"),
Seq("\\\\\\", validRec)
)
inputs.foreach { input =>
countForMalformedCSV(expected, input)
}
}
checkCount(2)
countForMalformedCSV(0, Seq(""))
}
test("SPARK-25387: bad input should not cause NPE") {
val schema = StructType(StructField("a", IntegerType) :: Nil)
val input = spark.createDataset(Seq("\u0000\u0000\u0001234"))
checkAnswer(spark.read.schema(schema).csv(input), Row(null))
checkAnswer(spark.read.option("multiLine", true).schema(schema).csv(input), Row(null))
assert(spark.read.csv(input).collect().toSet == Set(Row()))
}
}
| michalsenkyr/spark | sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/csv/CSVSuite.scala | Scala | apache-2.0 | 61,459 |
/**
* Copyright (C) 2010-2011 LShift Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.lshift.diffa.kernel.participants
import org.junit.Assert._
import org.junit.Assume._
import org.easymock.EasyMock._
import net.lshift.diffa.kernel.util.EasyMockScalaUtils._
import net.lshift.diffa.kernel.config.Endpoint
import org.junit.experimental.theories.{Theories, Theory, DataPoint}
import org.junit.runner.RunWith
import scala.collection.JavaConversions._
import net.lshift.diffa.participant.correlation.ProcessingResponse
import net.lshift.diffa.participant.scanning.{ScanConstraint, ScanResultEntry}
/**
* Test cases for the participant factory.
*/
@RunWith(classOf[Theories])
class ParticipantFactoryTest {
private val scanning1 = createStrictMock("scanning1", classOf[ScanningParticipantFactory])
private val scanning2 = createStrictMock("scanning2", classOf[ScanningParticipantFactory])
private val content1 = createStrictMock("content1", classOf[ContentParticipantFactory])
private val content2 = createStrictMock("content2", classOf[ContentParticipantFactory])
private val versioning1 = createStrictMock("versioning1", classOf[VersioningParticipantFactory])
private val versioning2 = createStrictMock("versioning2", classOf[VersioningParticipantFactory])
private val allFactories = Seq(scanning1, scanning2, content1, content2, versioning1, versioning2)
private val factory = new ParticipantFactory()
factory.registerScanningFactory(scanning1)
factory.registerScanningFactory(scanning2)
factory.registerContentFactory(content1)
factory.registerContentFactory(content2)
factory.registerVersioningFactory(versioning1)
factory.registerVersioningFactory(versioning2)
private val scanningRef = createStrictMock("scanningRef", classOf[ScanningParticipantRef])
private val contentRef = createStrictMock("contentRef", classOf[ContentParticipantRef])
private val versionRef = createStrictMock("versionRef", classOf[VersioningParticipantRef])
private val allRefs = Seq(scanningRef, contentRef, versionRef)
// Factories aren't order dependent
allFactories.foreach(checkOrder(_, false))
// Apply an accepted URL for each factory
expect(scanning1.supportsAddress("http://localhost/scan")).andReturn(true).anyTimes
expect(scanning2.supportsAddress("amqp://localhost/scan")).andReturn(true).anyTimes
expect(content1.supportsAddress("http://localhost/content")).andReturn(true).anyTimes
expect(content2.supportsAddress("amqp://localhost/content")).andReturn(true).anyTimes
expect(versioning1.supportsAddress("http://localhost/corr-version")).andReturn(true).anyTimes
expect(versioning2.supportsAddress("amqp://localhost/corr-version")).andReturn(true).anyTimes
// Default to factories not supporting addresses
allFactories.foreach(f => expect(f.supportsAddress(anyString)).andReturn(false).anyTimes)
@Theory
def shouldFailToCreateUpstreamWhenAddressIsInvalid(e:EndpointConfig) {
assumeTrue(!e.validUpstream)
replayAll()
expectsInvalidParticipantException {
factory.createUpstreamParticipant(e.endpoint)
}
}
@Theory
def shouldFailToCreateDownstreamWhenAddressIsInvalid(e:EndpointConfig) {
assumeTrue(!e.validDownstream)
replayAll()
expectsInvalidParticipantException {
factory.createDownstreamParticipant(e.endpoint)
}
}
@Theory
def shouldCloseBothScanningAndContentRefsWhenUpstreamParticipantIsClosed(e:EndpointConfig) {
assumeTrue(e.validUpstream)
expectParticipantCreation(e)
e.scan match {
case Fails =>
case _ => scanningRef.close(); expectLastCall()
}
e.retrieveContent match {
case Fails =>
case _ => contentRef.close(); expectLastCall()
}
replayAll()
factory.createUpstreamParticipant(e.endpoint).close()
verifyAll()
}
@Theory
def shouldCloseScanningAndContentAndVersionRefsWhenDownstreamParticipantIsClosed(e:EndpointConfig) {
assumeTrue(e.validDownstream)
expectParticipantCreation(e)
e.scan match {
case Fails =>
case _ => scanningRef.close(); expectLastCall()
}
e.retrieveContent match {
case Fails =>
case _ => contentRef.close(); expectLastCall()
}
e.correlateVersion match {
case Fails =>
case _ => versionRef.close(); expectLastCall()
}
replayAll()
factory.createDownstreamParticipant(e.endpoint).close()
verifyAll()
}
@Theory
def shouldCreateUpstreamParticipantEvenWhenUrlsAreMissingButFailOperation(e:EndpointConfig) {
assumeTrue(e.validUpstream)
expectParticipantCreation(e)
replayAll()
val part = factory.createUpstreamParticipant(e.endpoint)
if (e.scan == Fails) {
expectsInvalidParticipantOperationException {
part.scan(Seq(), Seq())
}
}
if (e.retrieveContent == Fails) {
expectsInvalidParticipantOperationException {
part.retrieveContent("id1")
}
}
}
@Theory
def shouldCreateDownstreamParticipantEvenWhenUrlsAreMissingButFailOperation(e:EndpointConfig) {
assumeTrue(e.validDownstream)
expectParticipantCreation(e)
replayAll()
val part = factory.createDownstreamParticipant(e.endpoint)
if (e.scan == Fails) {
expectsInvalidParticipantOperationException {
part.scan(Seq(), Seq())
}
}
if (e.retrieveContent == Fails) {
expectsInvalidParticipantOperationException {
part.retrieveContent("id1")
}
}
if (e.correlateVersion == Fails) {
expectsInvalidParticipantOperationException {
part.generateVersion("asdasdasd")
}
}
}
@Theory
def shouldDelegateToValidRefsInUpstreamParticipant(e:EndpointConfig) {
val constraints = Seq(createStrictMock(classOf[ScanConstraint]))
val aggregations = Seq(createStrictMock(classOf[CategoryFunction]))
val scanEntries = Seq(ScanResultEntry.forAggregate("v1", Map[String, String]()))
assumeTrue(e.validUpstream)
expectParticipantCreation(e)
if (e.scan != Fails) {
expect(scanningRef.scan(constraints, aggregations)).andReturn(scanEntries)
}
if (e.retrieveContent != Fails) {
expect(contentRef.retrieveContent("id1")).andReturn("content1")
}
replayAll()
val part = factory.createUpstreamParticipant(e.endpoint)
if (e.scan != Fails) {
assertEquals(scanEntries, part.scan(constraints, aggregations))
}
if (e.retrieveContent != Fails) {
assertEquals("content1", part.retrieveContent("id1"))
}
verifyAll()
}
@Theory
def shouldDelegateToValidRefsInDownstreamParticipant(e:EndpointConfig) {
val constraints = Seq(createStrictMock(classOf[ScanConstraint]))
val aggregations = Seq(createStrictMock(classOf[CategoryFunction]))
val scanEntries = Seq(ScanResultEntry.forAggregate("v1", Map[String, String]()))
val procResponse = new ProcessingResponse("id", "uvsn", "dvsn")
assumeTrue(e.validDownstream)
expectParticipantCreation(e)
if (e.scan != Fails) {
expect(scanningRef.scan(constraints, aggregations)).andReturn(scanEntries)
}
if (e.retrieveContent != Fails) {
expect(contentRef.retrieveContent("id1")).andReturn("content1")
}
if (e.correlateVersion != Fails) {
expect(versionRef.generateVersion("body")).andReturn(procResponse)
}
replayAll()
val part = factory.createDownstreamParticipant(e.endpoint)
if (e.scan != Fails) {
assertEquals(scanEntries, part.scan(constraints, aggregations))
}
if (e.retrieveContent != Fails) {
assertEquals("content1", part.retrieveContent("id1"))
}
if (e.correlateVersion != Fails) {
assertEquals(procResponse, part.generateVersion("body"))
}
verifyAll()
}
def replayAll() { replay(allFactories: _*); replay(allRefs: _*) }
def verifyAll() { verify(allFactories: _*); verify(allRefs: _*) }
def expectParticipantCreation(e:EndpointConfig) {
e.scan match {
case Fails =>
case UseFirst => expect(scanning1.createParticipantRef(e.endpoint.scanUrl)).andReturn(scanningRef).anyTimes
case UseSecond => expect(scanning2.createParticipantRef(e.endpoint.scanUrl)).andReturn(scanningRef).anyTimes
}
e.retrieveContent match {
case Fails =>
case UseFirst => expect(content1.createParticipantRef(e.endpoint.contentRetrievalUrl)).andReturn(contentRef).anyTimes
case UseSecond => expect(content2.createParticipantRef(e.endpoint.contentRetrievalUrl)).andReturn(contentRef).anyTimes
}
e.correlateVersion match {
case Fails =>
case UseFirst => expect(versioning1.createParticipantRef(e.endpoint.versionGenerationUrl)).andReturn(versionRef).anyTimes
case UseSecond => expect(versioning2.createParticipantRef(e.endpoint.versionGenerationUrl)).andReturn(versionRef).anyTimes
}
}
def expectsInvalidParticipantException(f: => Unit) {
try {
f
fail("Should have thrown InvalidParticipantAddressException")
} catch {
case ipae:InvalidParticipantAddressException =>
}
}
def expectsInvalidParticipantOperationException(f: => Unit) {
try {
f
fail("Should have thrown InvalidParticipantOperationException")
} catch {
case ipoe:InvalidParticipantOperationException =>
}
}
}
abstract class OperationTarget
case object Fails extends OperationTarget
case object UseFirst extends OperationTarget
case object UseSecond extends OperationTarget
case class EndpointConfig(endpoint:Endpoint,
validUpstream:Boolean = true, validDownstream:Boolean = true,
scan:OperationTarget = Fails, retrieveContent:OperationTarget = Fails, correlateVersion:OperationTarget = Fails)
object ParticipantFactoryTest {
@DataPoint def noUrls = EndpointConfig(
Endpoint(name = "invalid"))
@DataPoint def allUrls = EndpointConfig(
Endpoint(name = "allUrls",
scanUrl = "http://localhost/scan", contentRetrievalUrl = "http://localhost/content",
versionGenerationUrl = "http://localhost/corr-version"),
scan = UseFirst, retrieveContent = UseFirst, correlateVersion = UseFirst)
@DataPoint def invalidScanUrl = EndpointConfig(
Endpoint(name = "invalidScanUrl", scanUrl = "ftp://blah"),
validUpstream = false, validDownstream = false)
@DataPoint def firstScanUrl = EndpointConfig(
Endpoint(name = "firstScanUrl", scanUrl = "http://localhost/scan"),
scan = UseFirst)
@DataPoint def secondScanUrl = EndpointConfig(
Endpoint(name = "secondScanUrl", scanUrl = "amqp://localhost/scan"),
scan = UseSecond)
@DataPoint def invalidContentUrl = EndpointConfig(
Endpoint(name = "invalidContentUrl", contentRetrievalUrl = "ftp://blah"),
validUpstream = false, validDownstream = false)
@DataPoint def firstContentUrl = EndpointConfig(
Endpoint(name = "firstContentUrl", contentRetrievalUrl = "http://localhost/content"),
retrieveContent = UseFirst)
@DataPoint def secondContentUrl = EndpointConfig(
Endpoint(name = "secondContentUrl", contentRetrievalUrl = "amqp://localhost/content"),
retrieveContent = UseSecond)
@DataPoint def invalidVersionUrl = EndpointConfig(
Endpoint(name = "invalidVersionUrl", versionGenerationUrl = "ftp://blah"),
validUpstream = true, validDownstream = false)
@DataPoint def firstVersionUrl = EndpointConfig(
Endpoint(name = "firstVersionUrl", versionGenerationUrl = "http://localhost/corr-version"),
correlateVersion = UseFirst)
@DataPoint def secondVersionUrl = EndpointConfig(
Endpoint(name = "secondVersionUrl", versionGenerationUrl = "amqp://localhost/corr-version"),
correlateVersion = UseSecond)
} | aprescott/diffa | kernel/src/test/scala/net/lshift/diffa/kernel/participants/ParticipantFactoryTest.scala | Scala | apache-2.0 | 12,586 |
package org.messages
sealed abstract trait Message
case class CopyMessage(rank: Int) extends Message
case class PasteMessage(rank: Int) extends Message
case class UpdateClipboard(rank: Int, str: String) extends Message
case class RequestClipboardItem(rank: Int) extends Message | bhattacharyyasom/CopyPasteTool | src/main/scala/org/messages/Message.scala | Scala | apache-2.0 | 278 |
package gines.akka
import gines.akka.GinesActors.system
import akka.actor._
import akka.zeromq._
import akka.util.{Timeout, ByteString}
import akka.zeromq.Listener
import scala.util.Success
import scala.util.Failure
import scala.Some
import akka.zeromq.Bind
import com.codahale.jerkson.Json
import com.fasterxml.jackson.annotation.{JsonInclude, JsonTypeInfo, JsonSubTypes}
import com.fasterxml.jackson.annotation.JsonInclude.Include
import gines.App
class AdminActor(confFile: Option[String]) extends Actor with ActorLogging {
val adminSocket = ZeroMQExtension(system).newSocket(SocketType.Rep, Listener(self), Bind(s"tcp://*:$adminPort"))
def receive: Actor.Receive = {
case Connecting => log.debug("Connecting")
case m: ZMQMessage => {
sender ! ZMQMessage(ByteString("gines"), ByteString("{\\"status:\\": \\"OK\\"}"))
log.debug(s"Received message: ${m.frame(1).utf8String}")
val obj = Json.parse[Command](m.frame(1).utf8String.toString)
processMessage(obj)
}
case Closed => log.debug("Disconnected")
case _ => log.warning("Other message")
}
def processMessage(cmd: Command): Unit = cmd match {
case c: StartCommand => simulationControl(
onSuccess = { actor =>
log.debug("Simulation is already running")
},
onFailure = {
log.info("Creating simulation")
App.createSimulation(confFile)
}
)
case c: StopCommand => {
simulationControl(onSuccess = { actor =>
log.info("Stoping simulation")
actor ! PoisonPill
}, onFailure = {
log.warning("Cannot stop not running simulation")
})
}
case _ => log.warning("Mismatch message")
}
private def simulationControl(onSuccess: (ActorRef) => Unit = {actor => ()}, onFailure: => Unit = {}): Unit = {
implicit val timeout: Timeout = 5000
import context.dispatcher
context.system.actorSelection("/user/simulation").resolveOne.onComplete {
case Success(actor) => onSuccess(actor)
case Failure(ex) => onFailure
}
}
}
@JsonTypeInfo(
use = JsonTypeInfo.Id.NAME,
include = JsonTypeInfo.As.PROPERTY,
property = "command"
)
@JsonSubTypes(
Array(
new JsonSubTypes.Type(value=classOf[StartCommand], name="start"),
new JsonSubTypes.Type(value=classOf[StopCommand], name="stop")
)
)
abstract class Command
case class StartCommand(params: Option[String]) extends Command
case class StopCommand(foo: Option[String]) extends Command
object AdminActor {
def apply(port: Int) =
Props(classOf[AdminActor], port)
}
| mikusp/gines | src/main/scala/gines/akka/AdminActor.scala | Scala | gpl-3.0 | 2,551 |
package notebook
import rx.lang.scala.{Observable => RxObservable, Observer => RxObserver, _}
/**
* Author: Ken
*/
trait Observer[T] extends RxObserver[T] {
def map[A](fxn: A=>T): Observer[A] = new MappingObserver[T,A]{
def innerObserver = Observer.this;
def observerMapper = fxn
}
}
/**
* A no-op observer, useful for extending just the methods you want
* @tparam T
*/
trait ConcreteObserver[T] extends Observer[T] {
override def onCompleted() {}
def onError(e: Exception) {}
override def onNext(args: T) {}
}
class NoopObserver[T] extends ConcreteObserver[T]
trait MappingObserver[A,B] extends Observer[B] {
protected def innerObserver: Observer[A]
protected def observerMapper: B=>A
override def onCompleted() {innerObserver.onCompleted()}
def onError(e: Exception) {innerObserver.onError(e)}
override def onNext(args: B) {innerObserver.onNext(observerMapper(args))}
}
object Observer {
def apply[A](f:A => Unit) = new ConcreteObserver[A] {
override def onNext(args:A) = f(args)
}
}
| vitan/spark-notebook | modules/observable/src/main/scala/notebook/Observer.scala | Scala | apache-2.0 | 1,042 |
package demo.components.elementalui
import chandu0101.macros.tojs.GhPagesMacros
import chandu0101.scalajs.react.components.elementalui._
import demo.components.CodeExample
import japgolly.scalajs.react._
import japgolly.scalajs.react.vdom.html_<^._
import scala.scalajs.js.`|`
object EuiModalDemo {
val code = GhPagesMacros.exampleSource
// EXAMPLE:START
case class State(modalIsOpen: Boolean = false,
sizeModalIsOpen: Boolean = false,
modalSize: String | Int = 0)
case class Backend($ : BackendScope[Unit, State]) {
def toggleModal(event: ReactEventFromHtml) =
$.modState(state => state.copy(modalIsOpen = !state.modalIsOpen))
def toggleSizeModal(size: String | Int)(event: ReactEventFromHtml) =
$.modState(state => state.copy(sizeModalIsOpen = !state.sizeModalIsOpen, modalSize = size))
def renderLiveDemo(S: State) =
<.div(
Button(toggleModal _)("Show it"),
Modal(
isOpen = S.modalIsOpen,
backdropClosesModal = true,
onCancel = toggleModal _
)(
ModalHeader(text = "Modal Header")(),
ModalBody()(
FormField(label = "Email")(
FormInput(label = "Email",
`type` = "email",
name = "email",
placeholder = "name@example.com",
required = true)()
),
FormField(label = "Password")(
FormInput(label = "Password",
`type` = "password",
name = "password",
placeholder = "Min 8 chars",
required = true)()
),
<.p("From the Wikipedia article",
<.a(^.href := "https://en.wikipedia.org/wiki/Elemental",
^.target := "_blank",
"https://en.wikipedia.org/wiki/Elemental")),
<.p(
"An elemental is a mythic being described in occult and alchemical works from around the time of the European Renaissance and particularly elaborated in the 16th century works of Paracelsus."),
<.p(
"There are four elemental categories: gnomes, undines, sylphs, and salamanders. These correspond to the Classical elements of antiquity: earth, water, air and fire. Aether (quintessence) was not assigned an elemental."),
<.p(
"Terms employed for beings associated with alchemical elements vary by source and gloss."),
<.h2("History"),
<.p(
"The Paracelsian concept of elementals draws from several much older traditions in mythology and religion. Common threads can be found in folklore, animism, and anthropomorphism. Examples of creatures such as the Pygmy were taken from Greek mythology."),
<.p(
"The elements of earth, water, air, and fire, were classed as the fundamental building blocks of nature. This system prevailed in the Classical world and was highly influential in medieval natural philosophy. Although Paracelsus uses these foundations and the popular preexisting names of elemental creatures, he is doing so to present new ideas which expand on his own philosophical system. The homunculus is another example of a Paracelsian idea with roots in earlier alchemical, scientific, and folklore traditions."),
<.h3("Paracelsus"),
<.p(
"In his 16th-century alchemical work Liber de Nymphis, sylphis, pygmaeis et salamandris et de caeteris spiritibus, Paracelsus identified mythological beings as belonging to one of the four elements. Part of the Philosophia Magna, this book was first printed in 1566 after Paracelsus' death. He wrote the book to \"describe the creatures that are outside the cognizance of the light of nature, how they are to be understood, what marvellous works God has created\". He states that there is more bliss in describing these \"divine objects\" than in describing fencing, court etiquette, cavalry, and other worldly pursuits."),
<.p("The concept of elementals seems to have been conceived by Paracelsus in the 16th century, though he did not in fact use the term \"elemental\" or a German equivalent.[5] He regarded them not so much as spirits but as beings between creatures and spirits, generally being invisible to mankind but having physical and commonly humanoid bodies, as well as eating, sleeping, and wearing clothes like humans. Paracelsus gave common names for the elemental types, as well as correct names, which he seems to have considered somewhat more proper, \"recht namen\". He also referred to them by purely German terms which are roughly equivalent to \"water people,\" \"mountain people,\" and so on, using all the different forms interchangeably.")
),
ModalFooter()(
Button(onClick = toggleModal _, `type` = ButtonType.PRIMARY)("Submit"),
Button(onClick = toggleModal _, `type` = ButtonType.LINK_CANCEL)("Cancel")
)
)
)
val renderStaticExample =
<.div(
^.className := "code-example",
<.div(
^.className := "code-example__example",
<.div(
^.className := "Modal-content",
ModalHeader(text = "Modal Header")(),
ModalBody()(
FormField(label = "Email")(
FormInput(label = "Email",
`type` = "email",
name = "email",
placeholder = "name@example.com",
required = true)()
),
FormField(label = "Password")(
FormInput(
label = "Password",
`type` = "password",
name = "password",
placeholder = "Min 8 chars",
required = true
)()
)
),
ModalFooter()(
Button()("Submit"),
Button()("Cancel")
)
)
)
)
def renderSizes(S: State) =
<.div(
Button(onClick = toggleSizeModal("small") _)("small"),
Button(onClick = toggleSizeModal("large") _)("large"),
Button(onClick = toggleSizeModal(768) _)("768"),
Modal(isOpen = S.sizeModalIsOpen,
onCancel = toggleSizeModal("small") _,
backdropClosesModal = true,
width = S.modalSize)(ModalHeader(
text = s"${S.modalSize}",
showCloseButton = true,
onClose = toggleSizeModal("small") _
)(),
ModalBody()(<.p("…")))
)
def render(S: State) =
CodeExample(code, "EuiModal")(
Container()(<.h1("Modal"),
<.h2("Static Example"),
renderStaticExample,
<.h2("Live Demo"),
renderLiveDemo(S),
<.h2("Sizes"),
renderSizes(S))
)
}
val component = ScalaComponent
.builder[Unit]("EuiModalDemo")
.initialState(State())
.renderBackend[Backend]
.build
// EXAMPLE:END
def apply() = component()
}
| rleibman/scalajs-react-components | demo/src/main/scala/demo/components/elementalui/EuiModalDemo.scala | Scala | apache-2.0 | 7,383 |
/*
Copyright 2012 Twitter, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.twitter.scalding.typed
import cascading.pipe.CoGroup
import cascading.pipe.joiner.{Joiner => CJoiner, JoinerClosure}
import cascading.tuple.{Tuple => CTuple, Fields, TupleEntry}
import com.twitter.scalding._
import scala.collection.JavaConverters._
class CoGrouped2[K,V,W,R](left: Grouped[K,V],
right: Grouped[K,W],
joiner: (K, Iterator[V], Iterable[W]) => Iterator[R])
extends KeyedList[K,R] with java.io.Serializable {
override lazy val toTypedPipe : TypedPipe[(K,R)] = {
// Actually make a new coGrouping:
assert(left.valueSort == None, "secondary sorting unsupported in CoGrouped2")
assert(right.valueSort == None, "secondary sorting unsupported in CoGrouped2")
import Dsl._
import RichPipe.assignName
val rightGroupKey = RichFields(StringField[K]("key1")(right.ordering, None))
val cascadingJoiner = new Joiner2(left.streamMapping, right.streamMapping, joiner)
/*
* we have to have 4 fields, but we only want key and value.
* Cascading requires you have the same number coming in as out.
* in the first case, we introduce (null0, null1), in the second
* we have (key1, value1), but they are then discarded:
*/
val newPipe = if(left.pipe == right.pipe) {
// This is a self-join
val NUM_OF_SELF_JOINS = 1
new CoGroup(assignName(left.pipe), left.groupKey,
NUM_OF_SELF_JOINS,
// A self join with two fields results in 4 declared:
new Fields("key","value","null0", "null1"),
cascadingJoiner)
}
else {
new CoGroup(assignName(left.pipe), left.groupKey,
assignName(right.pipe.rename(('key, 'value) -> ('key1, 'value1))),
rightGroupKey,
cascadingJoiner)
}
val reducers = scala.math.max(left.reducers, right.reducers)
/*
* the Joiner only populates the first two fields, the second two
* are null. We then project out at the end of the method.
*/
val pipeWithRed = RichPipe.setReducers(newPipe, reducers).project('key, 'value)
//Construct the new TypedPipe
TypedPipe.from[(K,R)](pipeWithRed, ('key, 'value))
}
override def mapValueStream[U](fn: Iterator[R] => Iterator[U]): KeyedList[K,U] = {
new CoGrouped2[K,V,W,U](left, right, {(k,vit,wit) => fn(joiner(k,vit,wit))})
}
}
class Joiner2[K,V,W,R](leftGetter : Iterator[CTuple] => Iterator[V],
rightGetter: Iterator[CTuple] => Iterator[W],
joiner: (K, Iterator[V], Iterable[W]) => Iterator[R]) extends CJoiner {
import Joiner._
override def getIterator(jc: JoinerClosure) = {
// The left one cannot be iterated multiple times on Hadoop:
val (lkopt, left) = getKeyValue[K](jc.getIterator(0))
// It's safe to call getIterator more than once on index > 0
val (rkopt, _) = getKeyValue[K](jc.getIterator(1))
// Try to get from the right-hand-side
val goodKey = lkopt.orElse(rkopt).get
val rightIterable = new Iterable[W] with java.io.Serializable {
def iterator = rightGetter(jc.getIterator(1).asScala.map { Dsl.tupleAt(1) })
}
joiner(goodKey, leftGetter(left), rightIterable).map { rval =>
// There always has to be four resulting fields
// or otherwise the flow planner will throw
val res = CTuple.size(4)
res.set(0, goodKey)
res.set(1, rval)
res
}.asJava
}
override val numJoins = 1
}
| stripe/scalding | scalding-core/src/main/scala/com/twitter/scalding/typed/CoGrouped2.scala | Scala | apache-2.0 | 3,909 |
/*
Copyright 2012 Twitter, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.twitter.scalding
import org.scalatest.{ Matchers, WordSpec }
import com.twitter.scalding.source.TypedText
// Use the scalacheck generators
import org.scalacheck.Gen
import scala.collection.mutable.Buffer
import TDsl._
import typed.MultiJoin
object TUtil {
def printStack(fn: => Unit) {
try { fn } catch { case e: Throwable => e.printStackTrace; throw e }
}
}
class TupleAdderJob(args: Args) extends Job(args) {
TypedText.tsv[(String, String)]("input")
.map{ f =>
(1 +: f) ++ (2, 3)
}
.write(TypedText.tsv[(Int, String, String, Int, Int)]("output"))
}
class TupleAdderTest extends WordSpec with Matchers {
import Dsl._
"A TupleAdderJob" should {
JobTest(new TupleAdderJob(_))
.source(TypedText.tsv[(String, String)]("input"), List(("a", "a"), ("b", "b")))
.sink[(Int, String, String, Int, Int)](TypedText.tsv[(Int, String, String, Int, Int)]("output")) { outBuf =>
"be able to use generated tuple adders" in {
outBuf should have size 2
outBuf.toSet shouldBe Set((1, "a", "a", 2, 3), (1, "b", "b", 2, 3))
}
}
.run
.finish
}
}
class TypedPipeJob(args: Args) extends Job(args) {
//Word count using TypedPipe
TextLine("inputFile")
.flatMap { _.split("\\\\s+") }
.map { w => (w, 1L) }
.forceToDisk
.group
//.forceToReducers
.sum
.debug
.write(TypedText.tsv[(String, Long)]("outputFile"))
}
class TypedPipeTest extends WordSpec with Matchers {
import Dsl._
"A TypedPipe" should {
var idx = 0
TUtil.printStack {
JobTest(new TypedPipeJob(_))
.source(TextLine("inputFile"), List("0" -> "hack hack hack and hack"))
.sink[(String, Long)](TypedText.tsv[(String, Long)]("outputFile")){ outputBuffer =>
val outMap = outputBuffer.toMap
(idx + ": count words correctly") in {
outMap("hack") shouldBe 4
outMap("and") shouldBe 1
}
idx += 1
}
.run
.runHadoop
.finish
}
}
}
class TypedSumByKeyJob(args: Args) extends Job(args) {
//Word count using TypedPipe
TextLine("inputFile")
.flatMap { l => l.split("\\\\s+").map((_, 1L)) }
.sumByKey
.write(TypedText.tsv[(String, Long)]("outputFile"))
}
class TypedSumByKeyTest extends WordSpec with Matchers {
"A TypedSumByKeyPipe" should {
var idx = 0
TUtil.printStack {
JobTest(new TypedSumByKeyJob(_))
.source(TextLine("inputFile"), List("0" -> "hack hack hack and hack"))
.sink[(String, Long)](TypedText.tsv[(String, Long)]("outputFile")){ outputBuffer =>
val outMap = outputBuffer.toMap
(idx + ": count words correctly") in {
outMap("hack") shouldBe 4
outMap("and") shouldBe 1
}
idx += 1
}
.run
.runHadoop
.finish
}
}
}
class TypedPipeJoinJob(args: Args) extends Job(args) {
(Tsv("inputFile0").read.toTypedPipe[(Int, Int)](0, 1).group
leftJoin TypedPipe.from[(Int, Int)](Tsv("inputFile1").read, (0, 1)).group)
.toTypedPipe
.write(TypedText.tsv[(Int, (Int, Option[Int]))]("outputFile"))
}
class TypedPipeJoinTest extends WordSpec with Matchers {
import Dsl._
"A TypedPipeJoin" should {
JobTest(new com.twitter.scalding.TypedPipeJoinJob(_))
.source(Tsv("inputFile0"), List((0, 0), (1, 1), (2, 2), (3, 3), (4, 5)))
.source(Tsv("inputFile1"), List((0, 1), (1, 2), (2, 3), (3, 4)))
.typedSink[(Int, (Int, Option[Int]))](TypedText.tsv[(Int, (Int, Option[Int]))]("outputFile")){ outputBuffer =>
val outMap = outputBuffer.toMap
"correctly join" in {
outMap should have size 5
outMap(0) shouldBe (0, Some(1))
outMap(1) shouldBe (1, Some(2))
outMap(2) shouldBe (2, Some(3))
outMap(3) shouldBe (3, Some(4))
outMap(4) shouldBe (5, None)
}
}(implicitly[TypeDescriptor[(Int, (Int, Option[Int]))]].converter)
.run
.finish
}
}
// This is a non-serializable class
class OpaqueJoinBox(i: Int) { def get = i }
class TypedPipeJoinKryoJob(args: Args) extends Job(args) {
val box = new OpaqueJoinBox(2)
TypedPipe.from(TypedText.tsv[(Int, Int)]("inputFile0"))
.join(TypedPipe.from(TypedText.tsv[(Int, Int)]("inputFile1")))
.mapValues { case (x, y) => x * y * box.get }
.write(TypedText.tsv[(Int, Int)]("outputFile"))
}
class TypedPipeJoinKryoTest extends WordSpec with Matchers {
"OpaqueJoinBox" should {
"not be serializable" in {
serialization.Externalizer(new OpaqueJoinBox(1)).javaWorks shouldBe false
}
"closure not be serializable" in {
val box = new OpaqueJoinBox(2)
val fn = { v: Int => v * box.get }
serialization.Externalizer(fn).javaWorks shouldBe false
}
}
"A TypedPipeJoinKryo" should {
JobTest(new com.twitter.scalding.TypedPipeJoinKryoJob(_))
.source(TypedText.tsv[(Int, Int)]("inputFile0"), List((0, 0), (1, 1), (2, 2), (3, 3), (4, 5)))
.source(TypedText.tsv[(Int, Int)]("inputFile1"), List((0, 1), (1, 2), (2, 3), (3, 4)))
.typedSink[(Int, Int)](TypedText.tsv[(Int, Int)]("outputFile")){ outputBuffer =>
val outMap = outputBuffer.toMap
"correctly join" in {
outMap should have size 4
outMap(0) shouldBe 0
outMap(1) shouldBe 4
outMap(2) shouldBe 12
outMap(3) shouldBe 24
}
}(implicitly[TypeDescriptor[(Int, Int)]].converter)
.runHadoop // need hadoop to test serialization
.finish
}
}
class TypedPipeDistinctJob(args: Args) extends Job(args) {
Tsv("inputFile").read.toTypedPipe[(Int, Int)](0, 1)
.distinct
.write(TypedText.tsv[(Int, Int)]("outputFile"))
}
class TypedPipeDistinctTest extends WordSpec with Matchers {
import Dsl._
"A TypedPipeDistinctJob" should {
JobTest(new TypedPipeDistinctJob(_))
.source(Tsv("inputFile"), List((0, 0), (1, 1), (2, 2), (2, 2), (2, 5)))
.sink[(Int, Int)](TypedText.tsv[(Int, Int)]("outputFile")){ outputBuffer =>
val outMap = outputBuffer.toMap
"correctly count unique item sizes" in {
outputBuffer.toSet should have size 4
}
}
.run
.finish
}
}
class TypedPipeDistinctByJob(args: Args) extends Job(args) {
Tsv("inputFile").read.toTypedPipe[(Int, Int)](0, 1)
.distinctBy(_._2)
.write(TypedText.tsv[(Int, Int)]("outputFile"))
}
class TypedPipeDistinctByTest extends WordSpec with Matchers {
import Dsl._
"A TypedPipeDistinctByJob" should {
JobTest(new TypedPipeDistinctByJob(_))
.source(Tsv("inputFile"), List((0, 1), (1, 1), (2, 2), (2, 2), (2, 5)))
.typedSink(TypedText.tsv[(Int, Int)]("outputFile")){ outputBuffer =>
"correctly count unique item sizes" in {
val outSet = outputBuffer.toSet
outSet should have size 3
List(outSet) should contain oneOf (Set((0, 1), (2, 2), (2, 5)), Set((1, 1), (2, 2), (2, 5)))
}
}
.run
.finish
}
}
class TypedPipeGroupedDistinctJob(args: Args) extends Job(args) {
val groupedTP = Tsv("inputFile").read.toTypedPipe[(Int, Int)](0, 1)
.group
groupedTP
.distinctValues
.write(TypedText.tsv[(Int, Int)]("outputFile1"))
groupedTP
.distinctSize
.write(TypedText.tsv[(Int, Long)]("outputFile2"))
}
class TypedPipeGroupedDistinctJobTest extends WordSpec with Matchers {
import Dsl._
"A TypedPipeGroupedDistinctJob" should {
JobTest(new TypedPipeGroupedDistinctJob(_))
.source(Tsv("inputFile"), List((0, 0), (0, 1), (0, 1), (1, 0), (1, 1)))
.sink[(Int, Int)](TypedText.tsv[(Int, Int)]("outputFile1")){ outputBuffer =>
val outSet = outputBuffer.toSet
"correctly generate unique items" in {
outSet should have size 4
}
}
.sink[(Int, Int)](TypedText.tsv[(Int, Long)]("outputFile2")){ outputBuffer =>
val outMap = outputBuffer.toMap
"correctly count unique item sizes" in {
outMap(0) shouldBe 2
outMap(1) shouldBe 2
}
}
.run
.finish
}
}
class TypedPipeHashJoinJob(args: Args) extends Job(args) {
TypedText.tsv[(Int, Int)]("inputFile0")
.group
.hashLeftJoin(TypedText.tsv[(Int, Int)]("inputFile1").group)
.write(TypedText.tsv[(Int, (Int, Option[Int]))]("outputFile"))
}
class TypedPipeHashJoinTest extends WordSpec with Matchers {
import Dsl._
"A TypedPipeHashJoinJob" should {
JobTest(new TypedPipeHashJoinJob(_))
.source(TypedText.tsv[(Int, Int)]("inputFile0"), List((0, 0), (1, 1), (2, 2), (3, 3), (4, 5)))
.source(TypedText.tsv[(Int, Int)]("inputFile1"), List((0, 1), (1, 2), (2, 3), (3, 4)))
.typedSink(TypedText.tsv[(Int, (Int, Option[Int]))]("outputFile")){ outputBuffer =>
val outMap = outputBuffer.toMap
"correctly join" in {
outMap should have size 5
outMap(0) shouldBe (0, Some(1))
outMap(1) shouldBe (1, Some(2))
outMap(2) shouldBe (2, Some(3))
outMap(3) shouldBe (3, Some(4))
outMap(4) shouldBe (5, None)
}
}(implicitly[TypeDescriptor[(Int, (Int, Option[Int]))]].converter)
.run
.finish
}
}
class TypedImplicitJob(args: Args) extends Job(args) {
def revTup[K, V](in: (K, V)): (V, K) = (in._2, in._1)
TextLine("inputFile").read.typed(1 -> ('maxWord, 'maxCnt)) { tpipe: TypedPipe[String] =>
tpipe.flatMap { _.split("\\\\s+") }
.map { w => (w, 1L) }
.group
.sum
.groupAll
// Looks like swap, but on the values in the grouping:
.mapValues { revTup _ }
.forceToReducers
.max
// Throw out the Unit key and reverse the value tuple
.values
.swap
}.write(TypedText.tsv[(String, Int)]("outputFile"))
}
class TypedPipeTypedTest extends WordSpec with Matchers {
import Dsl._
"A TypedImplicitJob" should {
JobTest(new TypedImplicitJob(_))
.source(TextLine("inputFile"), List("0" -> "hack hack hack and hack"))
.typedSink(TypedText.tsv[(String, Int)]("outputFile")){ outputBuffer =>
val outMap = outputBuffer.toMap
"find max word" in {
outMap should have size 1
outMap("hack") shouldBe 4
}
}
.run
.finish
}
}
class TypedWithOnCompleteJob(args: Args) extends Job(args) {
val onCompleteMapperStat = Stat("onCompleteMapper")
val onCompleteReducerStat = Stat("onCompleteReducer")
def onCompleteMapper() = onCompleteMapperStat.inc
def onCompleteReducer() = onCompleteReducerStat.inc
// find repeated words ignoring case
TypedText.tsv[String]("input")
.map(_.toUpperCase)
.onComplete(onCompleteMapper)
.groupBy(identity)
.mapValueStream(words => Iterator(words.size))
.filter { case (word, occurrences) => occurrences > 1 }
.keys
.onComplete(onCompleteReducer)
.write(TypedText.tsv[String]("output"))
}
class TypedPipeWithOnCompleteTest extends WordSpec with Matchers {
import Dsl._
val inputText = "the quick brown fox jumps over the lazy LAZY dog"
"A TypedWithOnCompleteJob" should {
JobTest(new TypedWithOnCompleteJob(_))
.source(TypedText.tsv[String]("input"), inputText.split("\\\\s+").map(Tuple1(_)))
.counter("onCompleteMapper") { cnt => "have onComplete called on mapper" in { assert(cnt == 1) } }
.counter("onCompleteReducer") { cnt => "have onComplete called on reducer" in { assert(cnt == 1) } }
.sink[String](TypedText.tsv[String]("output")) { outbuf =>
"have the correct output" in {
val correct = inputText.split("\\\\s+").map(_.toUpperCase).groupBy(x => x).filter(_._2.size > 1).keys.toList.sorted
val sortedL = outbuf.toList.sorted
assert(sortedL == correct)
}
}
.runHadoop
.finish
}
}
class TypedPipeWithOuterAndLeftJoin(args: Args) extends Job(args) {
val userNames = TypedText.tsv[(Int, String)]("inputNames").group
val userData = TypedText.tsv[(Int, Double)]("inputData").group
val optionalData = TypedText.tsv[(Int, Boolean)]("inputOptionalData").group
userNames
.outerJoin(userData)
.leftJoin(optionalData)
.map { case (id, ((nameOpt, userDataOption), optionalDataOpt)) => id }
.write(TypedText.tsv[Int]("output"))
}
class TypedPipeWithOuterAndLeftJoinTest extends WordSpec with Matchers {
import Dsl._
"A TypedPipeWithOuterAndLeftJoin" should {
JobTest(new TypedPipeWithOuterAndLeftJoin(_))
.source(TypedText.tsv[(Int, String)]("inputNames"), List((1, "Jimmy Foursquare")))
.source(TypedText.tsv[(Int, Double)]("inputData"), List((1, 0.1), (5, 0.5)))
.source(TypedText.tsv[(Int, Boolean)]("inputOptionalData"), List((1, true), (99, false)))
.sink[Long](TypedText.tsv[Int]("output")) { outbuf =>
"have output for user 1" in {
assert(outbuf.toList.contains(1) == true)
}
"have output for user 5" in {
assert(outbuf.toList.contains(5) == true)
}
"not have output for user 99" in {
assert(outbuf.toList.contains(99) == false)
}
}
.run
.finish
}
}
class TJoinCountJob(args: Args) extends Job(args) {
(TypedPipe.from[(Int, Int)](Tsv("in0", (0, 1)), (0, 1)).group
join TypedPipe.from[(Int, Int)](Tsv("in1", (0, 1)), (0, 1)).group)
.size
.write(TypedText.tsv[(Int, Long)]("out"))
//Also check simple joins:
(TypedPipe.from[(Int, Int)](Tsv("in0", (0, 1)), (0, 1)).group
join TypedPipe.from[(Int, Int)](Tsv("in1", (0, 1)), (0, 1)).group)
//Flatten out to three values:
.toTypedPipe
.map { kvw => (kvw._1, kvw._2._1, kvw._2._2) }
.write(TypedText.tsv[(Int, Int, Int)]("out2"))
//Also check simple leftJoins:
(TypedPipe.from[(Int, Int)](Tsv("in0", (0, 1)), (0, 1)).group
leftJoin TypedPipe.from[(Int, Int)](Tsv("in1", (0, 1)), (0, 1)).group)
//Flatten out to three values:
.toTypedPipe
.map { kvw: (Int, (Int, Option[Int])) =>
(kvw._1, kvw._2._1, kvw._2._2.getOrElse(-1))
}
.write(TypedText.tsv[(Int, Int, Int)]("out3"))
}
/**
* This test exercises the implicit from TypedPipe to HashJoinabl
*/
class TNiceJoinCountJob(args: Args) extends Job(args) {
(TypedPipe.from[(Int, Int)](Tsv("in0", (0, 1)), (0, 1))
join TypedPipe.from[(Int, Int)](Tsv("in1", (0, 1)), (0, 1)))
.size
.write(TypedText.tsv[(Int, Long)]("out"))
//Also check simple joins:
(TypedPipe.from[(Int, Int)](Tsv("in0", (0, 1)), (0, 1))
join TypedPipe.from[(Int, Int)](Tsv("in1", (0, 1)), (0, 1)))
//Flatten out to three values:
.toTypedPipe
.map { kvw => (kvw._1, kvw._2._1, kvw._2._2) }
.write(TypedText.tsv[(Int, Int, Int)]("out2"))
//Also check simple leftJoins:
(TypedPipe.from[(Int, Int)](Tsv("in0", (0, 1)), (0, 1))
leftJoin TypedPipe.from[(Int, Int)](Tsv("in1", (0, 1)), (0, 1)))
//Flatten out to three values:
.toTypedPipe
.map { kvw: (Int, (Int, Option[Int])) =>
(kvw._1, kvw._2._1, kvw._2._2.getOrElse(-1))
}
.write(TypedText.tsv[(Int, Int, Int)]("out3"))
}
class TNiceJoinByCountJob(args: Args) extends Job(args) {
import com.twitter.scalding.typed.Syntax._
(TypedPipe.from[(Int, Int)](Tsv("in0", (0, 1)), (0, 1))
joinBy TypedPipe.from[(Int, Int)](Tsv("in1", (0, 1)), (0, 1)))(_._1, _._1)
.size
.write(TypedText.tsv[(Int, Long)]("out"))
//Also check simple joins:
(TypedPipe.from[(Int, Int)](Tsv("in0", (0, 1)), (0, 1))
joinBy TypedPipe.from[(Int, Int)](Tsv("in1", (0, 1)), (0, 1)))(_._1, _._1)
//Flatten out to three values:
.toTypedPipe
.map { kvw => (kvw._1, kvw._2._1._2, kvw._2._2._2) }
.write(TypedText.tsv[(Int, Int, Int)]("out2"))
//Also check simple leftJoins:
(TypedPipe.from[(Int, Int)](Tsv("in0", (0, 1)), (0, 1))
leftJoinBy TypedPipe.from[(Int, Int)](Tsv("in1", (0, 1)), (0, 1)))(_._1, _._1)
//Flatten out to three values:
.toTypedPipe
.map { kvw: (Int, ((Int, Int), Option[(Int, Int)])) =>
(kvw._1, kvw._2._1._2, kvw._2._2.getOrElse((-1, -1))._2)
}
.write(TypedText.tsv[(Int, Int, Int)]("out3"))
}
class TypedPipeJoinCountTest extends WordSpec with Matchers {
import Dsl._
val joinTests = List("com.twitter.scalding.TJoinCountJob", "com.twitter.scalding.TNiceJoinCountJob", "com.twitter.scalding.TNiceJoinByCountJob")
joinTests.foreach{ jobName =>
"A " + jobName should {
var idx = 0
JobTest(jobName)
.source(Tsv("in0", (0, 1)), List((0, 1), (0, 2), (1, 1), (1, 5), (2, 10)))
.source(Tsv("in1", (0, 1)), List((0, 10), (1, 20), (1, 10), (1, 30)))
.typedSink(TypedText.tsv[(Int, Long)]("out")) { outbuf =>
val outMap = outbuf.toMap
(idx + ": correctly reduce after cogroup") in {
outMap should have size 2
outMap(0) shouldBe 2
outMap(1) shouldBe 6
}
idx += 1
}
.typedSink(TypedText.tsv[(Int, Int, Int)]("out2")) { outbuf2 =>
val outMap = outbuf2.groupBy { _._1 }
(idx + ": correctly do a simple join") in {
outMap should have size 2
outMap(0).toList.sorted shouldBe List((0, 1, 10), (0, 2, 10))
outMap(1).toList.sorted shouldBe List((1, 1, 10), (1, 1, 20), (1, 1, 30), (1, 5, 10), (1, 5, 20), (1, 5, 30))
}
idx += 1
}
.typedSink(TypedText.tsv[(Int, Int, Int)]("out3")) { outbuf =>
val outMap = outbuf.groupBy { _._1 }
(idx + ": correctly do a simple leftJoin") in {
outMap should have size 3
outMap(0).toList.sorted shouldBe List((0, 1, 10), (0, 2, 10))
outMap(1).toList.sorted shouldBe List((1, 1, 10), (1, 1, 20), (1, 1, 30), (1, 5, 10), (1, 5, 20), (1, 5, 30))
outMap(2).toList.sorted shouldBe List((2, 10, -1))
}
idx += 1
}
.run
.runHadoop
.finish
}
}
}
class TCrossJob(args: Args) extends Job(args) {
(TextLine("in0") cross TextLine("in1"))
.write(TypedText.tsv[(String, String)]("crossed"))
}
class TypedPipeCrossTest extends WordSpec with Matchers {
import Dsl._
"A TCrossJob" should {
var idx = 0
TUtil.printStack {
JobTest(new TCrossJob(_))
.source(TextLine("in0"), List((0, "you"), (1, "all")))
.source(TextLine("in1"), List((0, "every"), (1, "body")))
.typedSink(TypedText.tsv[(String, String)]("crossed")) { outbuf =>
val sortedL = outbuf.toList.sorted
(idx + ": create a cross-product") in {
sortedL shouldBe List(("all", "body"),
("all", "every"),
("you", "body"),
("you", "every"))
}
idx += 1
}
.run
.runHadoop
.finish
}
}
}
class TJoinTakeJob(args: Args) extends Job(args) {
val items0 = TextLine("in0").flatMap { s => (1 to 10).map((_, s)) }.group
val items1 = TextLine("in1").map { s => (s.toInt, ()) }.group
items0.join(items1.take(1))
.mapValues(_._1) // discard the ()
.toTypedPipe
.write(TypedText.tsv[(Int, String)]("joined"))
}
class TypedJoinTakeTest extends WordSpec with Matchers {
import Dsl._
"A TJoinTakeJob" should {
var idx = 0
TUtil.printStack {
JobTest(new TJoinTakeJob(_))
.source(TextLine("in0"), List((0, "you"), (1, "all")))
.source(TextLine("in1"), List((0, "3"), (1, "2"), (0, "3")))
.typedSink(TypedText.tsv[(Int, String)]("joined")) { outbuf =>
val sortedL = outbuf.toList.sorted
(idx + ": dedup keys by using take") in {
sortedL shouldBe (List((3, "you"), (3, "all"), (2, "you"), (2, "all")).sorted)
}
idx += 1
}
.run
.runHadoop
.finish
}
}
}
class TGroupAllJob(args: Args) extends Job(args) {
TextLine("in")
.groupAll
.sorted
.values
.write(TypedText.tsv[String]("out"))
}
class TypedGroupAllTest extends WordSpec with Matchers {
import Dsl._
"A TGroupAllJob" should {
var idx = 0
TUtil.printStack {
val input = List((0, "you"), (1, "all"), (2, "everybody"))
JobTest(new TGroupAllJob(_))
.source(TextLine("in"), input)
.typedSink(TypedText.tsv[String]("out")) { outbuf =>
val sortedL = outbuf.toList
val correct = input.map { _._2 }.sorted
(idx + ": create sorted output") in {
sortedL shouldBe correct
}
idx += 1
}
.run
.runHadoop
.finish
}
}
}
class TSelfJoin(args: Args) extends Job(args) {
val g = TypedText.tsv[(Int, Int)]("in").group
g.join(g).values.write(TypedText.tsv[(Int, Int)]("out"))
}
class TSelfJoinTest extends WordSpec with Matchers {
import Dsl._
"A TSelfJoin" should {
JobTest(new TSelfJoin(_))
.source(TypedText.tsv[(Int, Int)]("in"), List((1, 2), (1, 3), (2, 1)))
.typedSink(TypedText.tsv[(Int, Int)]("out")) { outbuf =>
outbuf.toList.sorted shouldBe List((1, 1), (2, 2), (2, 3), (3, 2), (3, 3))
}
.run
.runHadoop
.finish
}
}
class TJoinWordCount(args: Args) extends Job(args) {
def countWordsIn(pipe: TypedPipe[(String)]) = {
pipe.flatMap { _.split("\\\\s+").map(_.toLowerCase) }
.groupBy(identity)
.mapValueStream(input => Iterator(input.size))
.forceToReducers
}
val first = countWordsIn(TypedPipe.from(TextLine("in0")))
val second = countWordsIn(TypedPipe.from(TextLine("in1")))
first.outerJoin(second)
.toTypedPipe
.map {
case (word, (firstCount, secondCount)) =>
(word, firstCount.getOrElse(0), secondCount.getOrElse(0))
}
.write(TypedText.tsv[(String, Int, Int)]("out"))
}
class TypedJoinWCTest extends WordSpec with Matchers {
import Dsl._
"A TJoinWordCount" should {
TUtil.printStack {
val in0 = List((0, "you all everybody"), (1, "a b c d"), (2, "a b c"))
val in1 = List((0, "you"), (1, "a b c d"), (2, "a a b b c c"))
def count(in: List[(Int, String)]): Map[String, Int] = {
in.flatMap { _._2.split("\\\\s+").map { _.toLowerCase } }.groupBy { identity }.mapValues { _.size }
}
def outerjoin[K, U, V](m1: Map[K, U], z1: U, m2: Map[K, V], z2: V): Map[K, (U, V)] = {
(m1.keys ++ m2.keys).map { k => (k, (m1.getOrElse(k, z1), m2.getOrElse(k, z2))) }.toMap
}
val correct = outerjoin(count(in0), 0, count(in1), 0)
.toList
.map { tup => (tup._1, tup._2._1, tup._2._2) }
.sorted
JobTest(new TJoinWordCount(_))
.source(TextLine("in0"), in0)
.source(TextLine("in1"), in1)
.typedSink(TypedText.tsv[(String, Int, Int)]("out")) { outbuf =>
val sortedL = outbuf.toList
"create sorted output" in {
sortedL shouldBe correct
}
}
.run
.finish
}
}
}
class TypedLimitJob(args: Args) extends Job(args) {
val p = TypedText.tsv[String]("input").limit(10): TypedPipe[String]
p.write(TypedText.tsv[String]("output"))
}
class TypedLimitTest extends WordSpec with Matchers {
import Dsl._
"A TypedLimitJob" should {
JobTest(new TypedLimitJob(_))
.source(TypedText.tsv[String]("input"), (0 to 100).map { i => Tuple1(i.toString) })
.typedSink(TypedText.tsv[String]("output")) { outBuf =>
"not have more than the limited outputs" in {
outBuf.size should be <= 10
}
}
.runHadoop
.finish
}
}
class TypedFlattenJob(args: Args) extends Job(args) {
TypedText.tsv[String]("input").map { _.split(" ").toList }
.flatten
.write(TypedText.tsv[String]("output"))
}
class TypedFlattenTest extends WordSpec with Matchers {
import Dsl._
"A TypedLimitJob" should {
JobTest(new TypedFlattenJob(_))
.source(TypedText.tsv[String]("input"), List(Tuple1("you all"), Tuple1("every body")))
.typedSink(TypedText.tsv[String]("output")) { outBuf =>
"correctly flatten" in {
outBuf.toSet shouldBe Set("you", "all", "every", "body")
}
}
.runHadoop
.finish
}
}
class TypedMergeJob(args: Args) extends Job(args) {
val tp = TypedPipe.from(TypedText.tsv[String]("input"))
// This exercise a self merge
(tp ++ tp)
.write(TypedText.tsv[String]("output"))
(tp ++ (tp.map(_.reverse)))
.write(TypedText.tsv[String]("output2"))
}
class TypedMergeTest extends WordSpec with Matchers {
import Dsl._
"A TypedMergeJob" should {
var idx = 0
JobTest(new TypedMergeJob(_))
.source(TypedText.tsv[String]("input"), List(Tuple1("you all"), Tuple1("every body")))
.typedSink(TypedText.tsv[String]("output")) { outBuf =>
(idx + ": correctly flatten") in {
outBuf.toSet shouldBe Set("you all", "every body")
}
idx += 1
}
.typedSink(TypedText.tsv[String]("output2")) { outBuf =>
(idx + ": correctly flatten") in {
val correct = Set("you all", "every body")
outBuf.toSet shouldBe (correct ++ correct.map(_.reverse))
}
idx += 1
}
.runHadoop
.finish
}
}
class TypedShardJob(args: Args) extends Job(args) {
(TypedPipe.from(TypedText.tsv[String]("input")) ++
(TypedPipe.empty.map { _ => "hey" }) ++
TypedPipe.from(List("item")))
.shard(10)
.write(TypedText.tsv[String]("output"))
}
class TypedShardTest extends WordSpec with Matchers {
import Dsl._
"A TypedShardJob" should {
val genList = Gen.listOf(Gen.identifier)
// Take one random sample
lazy val mk: List[String] = genList.sample.getOrElse(mk)
JobTest(new TypedShardJob(_))
.source(TypedText.tsv[String]("input"), mk)
.typedSink(TypedText.tsv[String]("output")) { outBuf =>
"correctly flatten" in {
outBuf should have size (mk.size + 1)
outBuf.toSet shouldBe (mk.toSet + "item")
}
}
.run
.finish
}
}
class TypedLocalSumJob(args: Args) extends Job(args) {
TypedPipe.from(TypedText.tsv[String]("input"))
.flatMap { s => s.split(" ").map((_, 1L)) }
.sumByLocalKeys
.write(TypedText.tsv[(String, Long)]("output"))
}
class TypedLocalSumTest extends WordSpec with Matchers {
import Dsl._
"A TypedLocalSumJob" should {
var idx = 0
val genList = Gen.listOf(Gen.identifier)
// Take one random sample
lazy val mk: List[String] = genList.sample.getOrElse(mk)
JobTest(new TypedLocalSumJob(_))
.source(TypedText.tsv[String]("input"), mk)
.typedSink(TypedText.tsv[(String, Long)]("output")) { outBuf =>
s"$idx: not expand and have correct total sum" in {
import com.twitter.algebird.MapAlgebra.sumByKey
val lres = outBuf.toList
val fmapped = mk.flatMap { s => s.split(" ").map((_, 1L)) }
lres.size should be <= (fmapped.size)
sumByKey(lres) shouldBe (sumByKey(fmapped))
}
idx += 1
}
.run
.runHadoop
.finish
}
}
class TypedHeadJob(args: Args) extends Job(args) {
TypedPipe.from(TypedText.tsv[(Int, Int)]("input"))
.group
.head
.write(TypedText.tsv[(Int, Int)]("output"))
}
class TypedHeadTest extends WordSpec with Matchers {
import Dsl._
"A TypedHeadJob" should {
val rng = new java.util.Random
val COUNT = 10000
val KEYS = 100
val mk = (1 to COUNT).map { _ => (rng.nextInt % KEYS, rng.nextInt) }
JobTest(new TypedHeadJob(_))
.source(TypedText.tsv[(Int, Int)]("input"), mk)
.typedSink(TypedText.tsv[(Int, Int)]("output")) { outBuf =>
"correctly take the first" in {
val correct = mk.groupBy(_._1).mapValues(_.head._2)
outBuf should have size (correct.size)
outBuf.toMap shouldBe correct
}
}
.run
.finish
}
}
class TypedSortWithTakeJob(args: Args) extends Job(args) {
val in = TypedPipe.from(TypedText.tsv[(Int, Int)]("input"))
in
.group
.sortedReverseTake(5)
.flattenValues
.write(TypedText.tsv[(Int, Int)]("output"))
in
.group
.sorted
.reverse
.bufferedTake(5)
.write(TypedText.tsv[(Int, Int)]("output2"))
}
class TypedSortWithTakeTest extends WordSpec with Matchers {
import Dsl._
"A TypedSortWithTakeJob" should {
val rng = new java.util.Random
val COUNT = 10000
val KEYS = 100
val mk = (1 to COUNT).map { _ => (rng.nextInt % KEYS, rng.nextInt) }
JobTest(new TypedSortWithTakeJob(_))
.source(TypedText.tsv[(Int, Int)]("input"), mk)
.sink[(Int, Int)](TypedText.tsv[(Int, Int)]("output")) { outBuf =>
"correctly take the first" in {
val correct = mk.groupBy(_._1).mapValues(_.map(i => i._2).sorted.reverse.take(5).toSet)
outBuf.groupBy(_._1).mapValues(_.map { case (k, v) => v }.toSet) shouldBe correct
}
}
.sink[(Int, Int)](TypedText.tsv[(Int, Int)]("output2")) { outBuf =>
"correctly take the first using sorted.reverse.take" in {
val correct = mk.groupBy(_._1).mapValues(_.map(i => i._2).sorted.reverse.take(5).toSet)
outBuf.groupBy(_._1).mapValues(_.map { case (k, v) => v }.toSet) shouldBe correct
}
}
.run
.finish
}
}
class TypedLookupJob(args: Args) extends Job(args) {
TypedPipe.from(TypedText.tsv[Int]("input0"))
.hashLookup(TypedPipe.from(TypedText.tsv[(Int, String)]("input1")).group)
.mapValues { o: Option[String] => o.getOrElse("") }
.write(TypedText.tsv[(Int, String)]("output"))
}
class TypedLookupJobTest extends WordSpec with Matchers {
import Dsl._
"A TypedLookupJob" should {
val rng = new java.util.Random
val COUNT = 10000
val KEYS = 100
val mk = (1 to COUNT).map { _ => (rng.nextInt % KEYS, rng.nextInt.toString) }
JobTest(new TypedLookupJob(_))
.source(TypedText.tsv[Int]("input0"), (-1 to 100))
.source(TypedText.tsv[(Int, String)]("input1"), mk)
.typedSink(TypedText.tsv[(Int, String)]("output")) { outBuf =>
"correctly TypedPipe.hashLookup" in {
val data = mk.groupBy(_._1)
val correct = (-1 to 100).flatMap { k =>
data.get(k).getOrElse(List((k, "")))
}.toList.sorted
outBuf should have size (correct.size)
outBuf.toList.sorted shouldBe correct
}
}(implicitly[TypeDescriptor[(Int, String)]].converter)
.run
.finish
}
}
class TypedLookupReduceJob(args: Args) extends Job(args) {
TypedPipe.from(TypedText.tsv[Int]("input0"))
.hashLookup(TypedPipe.from(TypedText.tsv[(Int, String)]("input1")).group.max)
.mapValues { o: Option[String] => o.getOrElse("") }
.write(TypedText.tsv[(Int, String)]("output"))
}
class TypedLookupReduceJobTest extends WordSpec with Matchers {
import Dsl._
"A TypedLookupJob" should {
val rng = new java.util.Random
val COUNT = 10000
val KEYS = 100
val mk = (1 to COUNT).map { _ => (rng.nextInt % KEYS, rng.nextInt.toString) }
JobTest(new TypedLookupReduceJob(_))
.source(TypedText.tsv[Int]("input0"), (-1 to 100))
.source(TypedText.tsv[(Int, String)]("input1"), mk)
.typedSink(TypedText.tsv[(Int, String)]("output")) { outBuf =>
"correctly TypedPipe.hashLookup" in {
val data = mk.groupBy(_._1)
.mapValues { kvs =>
val (k, v) = kvs.maxBy(_._2)
(k, v)
}
val correct = (-1 to 100).map { k =>
data.get(k).getOrElse((k, ""))
}.toList.sorted
outBuf should have size (correct.size)
outBuf.toList.sorted shouldBe correct
}
}(implicitly[TypeDescriptor[(Int, String)]].converter)
.run
.finish
}
}
class TypedFilterJob(args: Args) extends Job(args) {
TypedPipe.from(TypedText.tsv[Int]("input"))
.filter { _ > 50 }
.filterNot { _ % 2 == 0 }
.write(TypedText.tsv[Int]("output"))
}
class TypedFilterTest extends WordSpec with Matchers {
import Dsl._
"A TypedPipe" should {
"filter and filterNot elements" in {
val input = -1 to 100
val isEven = (i: Int) => i % 2 == 0
val expectedOutput = input filter { _ > 50 } filterNot isEven
TUtil.printStack {
JobTest(new com.twitter.scalding.TypedFilterJob(_))
.source(TypedText.tsv[Int]("input"), input)
.typedSink(TypedText.tsv[Int]("output")) { outBuf =>
outBuf.toList shouldBe expectedOutput
}
.run
.runHadoop
.finish
}
}
}
}
class TypedPartitionJob(args: Args) extends Job(args) {
val (p1, p2) = TypedPipe.from(TypedText.tsv[Int]("input")).partition { _ > 50 }
p1.write(TypedText.tsv[Int]("output1"))
p2.write(TypedText.tsv[Int]("output2"))
}
class TypedPartitionTest extends WordSpec with Matchers {
import Dsl._
"A TypedPipe" should {
"partition elements" in {
val input = -1 to 100
val (expected1, expected2) = input partition { _ > 50 }
TUtil.printStack {
JobTest(new com.twitter.scalding.TypedPartitionJob(_))
.source(TypedText.tsv[Int]("input"), input)
.typedSink(TypedText.tsv[Int]("output1")) { outBuf =>
outBuf.toList shouldBe expected1
}
.typedSink(TypedText.tsv[Int]("output2")) { outBuf =>
outBuf.toList shouldBe expected2
}
.run
.runHadoop
.finish
}
}
}
}
class TypedMultiJoinJob(args: Args) extends Job(args) {
val zero = TypedPipe.from(TypedText.tsv[(Int, Int)]("input0"))
val one = TypedPipe.from(TypedText.tsv[(Int, Int)]("input1"))
val two = TypedPipe.from(TypedText.tsv[(Int, Int)]("input2"))
val cogroup = MultiJoin(zero, one.group.max, two.group.max)
// make sure this is indeed a case with no self joins
// distinct by mapped
val distinct = cogroup.inputs.groupBy(identity).map(_._2.head).toList
assert(distinct.size == cogroup.inputs.size)
cogroup
.map { case (k, (v0, v1, v2)) => (k, v0, v1, v2) }
.write(TypedText.tsv[(Int, Int, Int, Int)]("output"))
}
class TypedMultiJoinJobTest extends WordSpec with Matchers {
import Dsl._
"A TypedMultiJoinJob" should {
val rng = new java.util.Random
val COUNT = 100 * 100
val KEYS = 10
def mk = (1 to COUNT).map { _ => (rng.nextInt % KEYS, rng.nextInt) }
val mk0 = mk
val mk1 = mk
val mk2 = mk
JobTest(new TypedMultiJoinJob(_))
.source(TypedText.tsv[(Int, Int)]("input0"), mk0)
.source(TypedText.tsv[(Int, Int)]("input1"), mk1)
.source(TypedText.tsv[(Int, Int)]("input2"), mk2)
.typedSink(TypedText.tsv[(Int, Int, Int, Int)]("output")) { outBuf =>
"correctly do a multi-join" in {
def groupMax(it: Seq[(Int, Int)]): Map[Int, Int] =
it.groupBy(_._1).mapValues { kvs =>
val (k, v) = kvs.maxBy(_._2)
v
}.toMap
val d0 = mk0.groupBy(_._1).mapValues(_.map { case (_, v) => v })
val d1 = groupMax(mk1)
val d2 = groupMax(mk2)
val correct = (d0.keySet ++ d1.keySet ++ d2.keySet).toList
.flatMap { k =>
(for {
v0s <- d0.get(k)
v1 <- d1.get(k)
v2 <- d2.get(k)
} yield (v0s, (k, v1, v2)))
}
.flatMap {
case (v0s, (k, v1, v2)) =>
v0s.map { (k, _, v1, v2) }
}
.toList.sorted
outBuf should have size (correct.size)
outBuf.toList.sorted shouldBe correct
}
}
.runHadoop
.finish
}
}
class TypedMultiSelfJoinJob(args: Args) extends Job(args) {
val zero = TypedPipe.from(TypedText.tsv[(Int, Int)]("input0"))
val one = TypedPipe.from(TypedText.tsv[(Int, Int)]("input1"))
// forceToReducers makes sure the first and the second part of
.group.forceToReducers
val cogroup = zero.group
.join(one.max)
.join(one.min)
// make sure this is indeed a case with some self joins
// distinct by mapped
val distinct = cogroup.inputs.groupBy(identity).map(_._2.head).toList
assert(distinct.size < cogroup.inputs.size)
cogroup
.map { case (k, ((v0, v1), v2)) => (k, v0, v1, v2) }
.write(TypedText.tsv[(Int, Int, Int, Int)]("output"))
}
class TypedMultiSelfJoinJobTest extends WordSpec with Matchers {
import Dsl._
"A TypedMultiSelfJoinJob" should {
val rng = new java.util.Random
val COUNT = 10000
val KEYS = 100
def mk = (1 to COUNT).map { _ => (rng.nextInt % KEYS, rng.nextInt) }
val mk0 = mk
val mk1 = mk
JobTest(new TypedMultiSelfJoinJob(_))
.source(TypedText.tsv[(Int, Int)]("input0"), mk0)
.source(TypedText.tsv[(Int, Int)]("input1"), mk1)
.typedSink(TypedText.tsv[(Int, Int, Int, Int)]("output")) { outBuf =>
"correctly do a multi-self-join" in {
def group(it: Seq[(Int, Int)])(red: (Int, Int) => Int): Map[Int, Int] =
it.groupBy(_._1).mapValues { kvs =>
kvs.map(_._2).reduce(red)
}.toMap
val d0 = mk0.groupBy(_._1).mapValues(_.map { case (_, v) => v })
val d1 = group(mk1)(_ max _)
val d2 = group(mk1)(_ min _)
val correct = (d0.keySet ++ d1.keySet ++ d2.keySet).toList
.flatMap { k =>
(for {
v0s <- d0.get(k)
v1 <- d1.get(k)
v2 <- d2.get(k)
} yield (v0s, (k, v1, v2)))
}
.flatMap {
case (v0s, (k, v1, v2)) =>
v0s.map { (k, _, v1, v2) }
}
.toList.sorted
outBuf should have size (correct.size)
outBuf.toList.sorted shouldBe correct
}
}
.runHadoop
.finish
}
}
class TypedMapGroup(args: Args) extends Job(args) {
TypedPipe.from(TypedText.tsv[(Int, Int)]("input"))
.group
.mapGroup { (k, iters) => iters.map(_ * k) }
.max
.write(TypedText.tsv[(Int, Int)]("output"))
}
class TypedMapGroupTest extends WordSpec with Matchers {
import Dsl._
"A TypedMapGroup" should {
val rng = new java.util.Random
val COUNT = 10000
val KEYS = 100
val mk = (1 to COUNT).map { _ => (rng.nextInt % KEYS, rng.nextInt) }
JobTest(new TypedMapGroup(_))
.source(TypedText.tsv[(Int, Int)]("input"), mk)
.typedSink(TypedText.tsv[(Int, Int)]("output")) { outBuf =>
"correctly do a mapGroup" in {
def mapGroup(it: Seq[(Int, Int)]): Map[Int, Int] =
it.groupBy(_._1).mapValues { kvs =>
kvs.map { case (k, v) => k * v }.max
}.toMap
val correct = mapGroup(mk).toList.sorted
outBuf should have size (correct.size)
outBuf.toList.sorted shouldBe correct
}
}
.runHadoop
.finish
}
}
class TypedSelfCrossJob(args: Args) extends Job(args) {
val pipe = TypedPipe.from(TypedText.tsv[Int]("input"))
pipe
.cross(pipe.groupAll.sum.values)
.write(TypedText.tsv[(Int, Int)]("output"))
}
class TypedSelfCrossTest extends WordSpec with Matchers {
import Dsl._
val input = (1 to 100).toList
"A TypedSelfCrossJob" should {
var idx = 0
JobTest(new TypedSelfCrossJob(_))
.source(TypedText.tsv[Int]("input"), input)
.typedSink(TypedText.tsv[(Int, Int)]("output")) { outBuf =>
(idx + ": not change the length of the input") in {
outBuf should have size (input.size)
}
idx += 1
}
.run
.runHadoop
.finish
}
}
class TypedSelfLeftCrossJob(args: Args) extends Job(args) {
val pipe = TypedPipe.from(TypedText.tsv[Int]("input"))
pipe
.leftCross(pipe.sum)
.write(TypedText.tsv[(Int, Option[Int])]("output"))
}
class TypedSelfLeftCrossTest extends WordSpec with Matchers {
import Dsl._
val input = (1 to 100).toList
"A TypedSelfLeftCrossJob" should {
var idx = 0
JobTest(new TypedSelfLeftCrossJob(_))
.source(TypedText.tsv[Int]("input"), input)
.typedSink(TypedText.tsv[(Int, Option[Int])]("output")) { outBuf =>
s"$idx: attach the sum of all values correctly" in {
outBuf should have size (input.size)
val sum = input.reduceOption(_ + _)
// toString to deal with our hadoop testing jank
outBuf.toList.sortBy(_._1).toString shouldBe (input.sorted.map((_, sum)).toString)
}
idx += 1
}(implicitly[TypeDescriptor[(Int, Option[Int])]].converter)
.run
.runHadoop
.finish
}
}
class JoinMapGroupJob(args: Args) extends Job(args) {
def r1 = TypedPipe.from(Seq((1, 10)))
def r2 = TypedPipe.from(Seq((1, 1), (2, 2), (3, 3)))
r1.groupBy(_._1).join(r2.groupBy(_._1))
.mapGroup { case (a, b) => Iterator("a") }
.write(TypedText.tsv("output"))
}
class JoinMapGroupJobTest extends WordSpec with Matchers {
import Dsl._
"A JoinMapGroupJob" should {
JobTest(new JoinMapGroupJob(_))
.typedSink(TypedText.tsv[(Int, String)]("output")) { outBuf =>
"not duplicate keys" in {
outBuf.toList shouldBe List((1, "a"))
}
}
.run
.finish
}
}
class MapValueStreamNonEmptyIteratorJob(args: Args) extends Job(args) {
val input = TypedPipe.from[(Int, String)](Seq((1, "a"), (1, "b"), (3, "a")))
val extraKeys = TypedPipe.from[(Int, String)](Seq((4, "a")))
input
.groupBy(_._1)
.mapValueStream(values => List(values.size).toIterator)
.leftJoin(extraKeys.group)
.toTypedPipe
.map { case (key, (iteratorSize, extraOpt)) => (key, iteratorSize) }
.write(TypedText.tsv[(Int, Int)]("output"))
}
class MapValueStreamNonEmptyIteratorTest extends WordSpec with Matchers {
"A MapValueStreamNonEmptyIteratorJob" should {
JobTest(new MapValueStreamNonEmptyIteratorJob(_))
.sink[(Int, Int)](TypedText.tsv[(Int, Int)]("output")) { outBuf =>
"not have iterators of size 0" in {
assert(outBuf.toList.filter(_._2 == 0) === Nil)
}
}
.run
.finish
}
}
class NullSinkJob(args: Args, m: scala.collection.mutable.Buffer[Int]) extends Job(args) {
TypedPipe.from(0 to 100)
.map { i => m += i; i } // side effect
.write(source.NullSink)
}
class NullSinkJobTest extends WordSpec with Matchers {
"A NullSinkJob" should {
val buf = scala.collection.mutable.Buffer[Int]()
JobTest(new NullSinkJob(_, buf))
.typedSink[Any](source.NullSink) { _ =>
"have a side effect" in {
assert(buf.toSet === (0 to 100).toSet)
}
}
.run
.finish
}
}
class TypedSketchJoinJob(args: Args) extends Job(args) {
val zero = TypedPipe.from(TypedText.tsv[(Int, Int)]("input0"))
val one = TypedPipe.from(TypedText.tsv[(Int, Int)]("input1"))
implicit def serialize(k: Int) = k.toString.getBytes
zero
.sketch(args("reducers").toInt)
.join(one)
.map{ case (k, (v0, v1)) => (k, v0, v1) }
.write(TypedText.tsv[(Int, Int, Int)]("output-sketch"))
zero
.group
.join(one.group)
.map{ case (k, (v0, v1)) => (k, v0, v1) }
.write(TypedText.tsv[(Int, Int, Int)]("output-join"))
}
class TypedSketchLeftJoinJob(args: Args) extends Job(args) {
val zero = TypedPipe.from(TypedText.tsv[(Int, Int)]("input0"))
val one = TypedPipe.from(TypedText.tsv[(Int, Int)]("input1"))
implicit def serialize(k: Int) = k.toString.getBytes
zero
.sketch(args("reducers").toInt)
.leftJoin(one)
.map{ case (k, (v0, v1)) => (k, v0, v1.getOrElse(-1)) }
.write(TypedText.tsv[(Int, Int, Int)]("output-sketch"))
zero
.group
.leftJoin(one.group)
.map{ case (k, (v0, v1)) => (k, v0, v1.getOrElse(-1)) }
.write(TypedText.tsv[(Int, Int, Int)]("output-join"))
}
object TypedSketchJoinTestHelper {
import Dsl._
val rng = new java.util.Random
def generateInput(size: Int, max: Int, dist: (Int) => Int): List[(Int, Int)] = {
def next: Int = rng.nextInt(max)
(0 to size).flatMap { i =>
val k = next
(1 to dist(k)).map { j => (k, next) }
}.toList
}
def runJobWithArguments(fn: (Args) => Job, reducers: Int, dist: (Int) => Int): (List[(Int, Int, Int)], List[(Int, Int, Int)]) = {
val sketchResult = Buffer[(Int, Int, Int)]()
val innerResult = Buffer[(Int, Int, Int)]()
JobTest(fn)
.arg("reducers", reducers.toString)
.source(TypedText.tsv[(Int, Int)]("input0"), generateInput(1000, 100, dist))
.source(TypedText.tsv[(Int, Int)]("input1"), generateInput(100, 100, x => 1))
.typedSink(TypedText.tsv[(Int, Int, Int)]("output-sketch")) { outBuf => sketchResult ++= outBuf }
.typedSink(TypedText.tsv[(Int, Int, Int)]("output-join")) { outBuf => innerResult ++= outBuf }
.run
.runHadoop
.finish
(sketchResult.toList.sorted, innerResult.toList.sorted)
}
}
class TypedSketchJoinJobTest extends WordSpec with Matchers {
import Dsl._
import TypedSketchJoinTestHelper._
"A TypedSketchJoinJob" should {
"get the same result as an inner join" in {
val (sk, inner) = runJobWithArguments(new TypedSketchJoinJob(_), 10, x => 1)
sk shouldBe inner
}
"get the same result when half the left keys are missing" in {
val (sk, inner) = runJobWithArguments(new TypedSketchJoinJob(_), 10, x => if (x < 50) 0 else 1)
sk shouldBe inner
}
"get the same result with a massive skew to one key" in {
val (sk, inner) = runJobWithArguments(new TypedSketchJoinJob(_), 10, x => if (x == 50) 1000 else 1)
sk shouldBe inner
}
"still work with only one reducer" in {
val (sk, inner) = runJobWithArguments(new TypedSketchJoinJob(_), 1, x => 1)
sk shouldBe inner
}
"still work with massive skew and only one reducer" in {
val (sk, inner) = runJobWithArguments(new TypedSketchJoinJob(_), 1, x => if (x == 50) 1000 else 1)
sk shouldBe inner
}
}
}
class TypedSketchLeftJoinJobTest extends WordSpec with Matchers {
import Dsl._
import TypedSketchJoinTestHelper._
"A TypedSketchLeftJoinJob" should {
"get the same result as a left join" in {
val (sk, left) = runJobWithArguments(new TypedSketchLeftJoinJob(_), 10, x => 1)
sk shouldBe left
}
"get the same result when half the left keys are missing" in {
val (sk, inner) = runJobWithArguments(new TypedSketchJoinJob(_), 10, x => if (x < 50) 0 else 1)
sk shouldBe inner
}
"get the same result with a massive skew to one key" in {
val (sk, inner) = runJobWithArguments(new TypedSketchJoinJob(_), 10, x => if (x == 50) 1000 else 1)
sk shouldBe inner
}
"still work with only one reducer" in {
val (sk, inner) = runJobWithArguments(new TypedSketchJoinJob(_), 1, x => 1)
sk shouldBe inner
}
"still work with massive skew and only one reducer" in {
val (sk, inner) = runJobWithArguments(new TypedSketchJoinJob(_), 1, x => if (x == 50) 1000 else 1)
sk shouldBe inner
}
}
}
| tglstory/scalding | scalding-core/src/test/scala/com/twitter/scalding/TypedPipeTest.scala | Scala | apache-2.0 | 47,479 |
/*
Copyright 2012 Twitter, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.twitter.scalding.source
import com.twitter.bijection.Injection
import java.io.Serializable
/**
* Handles the error checking for Injection inversion if check fails, it will throw an unrecoverable exception
* stopping the job TODO: probably belongs in Bijection
*/
trait CheckedInversion[T, U] extends Serializable {
def injection: Injection[T, U]
def apply(input: U): Option[T]
}
| twitter/scalding | scalding-core/src/main/scala/com/twitter/scalding/source/CheckedInversion.scala | Scala | apache-2.0 | 958 |
package tu.model.knowledge.domain
import tu.model.knowledge._
import annotator.AnnotatedPhrase
import tu.model.knowledge.semanticnetwork.SemanticNetwork
import org.slf4j.LoggerFactory
import tu.exception.UnexpectedException
import collection.mutable.ListBuffer
import tu.model.knowledge.helper.ModelHelper
/**
* @author max talanov
* date 2012-06-17
* time: 10:41 PM
*/
//TODO refactor nodes and links to Set and make it val
case class ConceptNetwork(var _nodes: List[Concept] = List[Concept](),
var _links: List[ConceptLink] = List[ConceptLink](),
override val _uri: KnowledgeURI,
override val _probability: Probability = new Probability())
extends SemanticNetwork(_nodes, _uri, _probability) {
val log = LoggerFactory.getLogger(this.getClass)
def this(map: Map[String, String]) = {
this(
List[Concept](),
List[ConceptLink](),
new KnowledgeURI(map),
new Probability(map)
)
}
def this(uri: KnowledgeURI) = {
this(List[Concept](), List[ConceptLink](), uri)
}
private var _notKnownPhrases: List[AnnotatedPhrase] = List[AnnotatedPhrase]()
def notKnownPhrases = _notKnownPhrases
def notKnownPhrases_=(aVal: List[AnnotatedPhrase]) = _notKnownPhrases = aVal
def nodes = _nodes
def nodes_=(aNodes: List[Concept]) {
_nodes = aNodes
}
def links = _links
override def rootNodes = _nodes
/**
* Returns nodes with specified name.
* @param name String parameter of filter
* @return List[Concept] that has uri-s with specified name.
*/
def getNodeByName(name: String): List[Concept] = {
if (_nodes != null) {
_nodes.filter {
concept: Concept => {
reduceInstanceIdentifier(concept.uri.name) == reduceInstanceIdentifier(name)
}
}
} else {
List[Concept]()
}
}
/**
* Returns nodes with specified uri.
* @param aUri to search node with.
* @return List of found Concepts with specified aUri, if no nodes found empty list is returened.
*/
def getNodeByURI(aUri: KnowledgeURI): List[Concept] = {
if (_nodes != null) {
_nodes.filter {
concept: Concept => {
concept.uri == aUri
}
}
} else {
List[Concept]()
}
}
/**
* Returns instance identifier reduced concept name
*/
def reduceInstanceIdentifier(name: String): String = {
val indexOfUIDDelimiter = name indexOf tu.model.knowledge.Constant.UID_INSTANCE_DELIMITER
if (indexOfUIDDelimiter > -1) {
name.substring(0, indexOfUIDDelimiter)
} else {
name
}
}
/**
* Returns nodes that starts with specified start.
* @param start String parameter of filter
* @return List[Concept] that has uri-s with specified name.
*/
def getNodeByStartOfName(start: String): List[Concept] = {
if (start.trim.size > 0) {
_nodes.filter {
concept: Concept => {
concept.uri.name.startsWith(start)
}
}
} else {
List[Concept]()
}
}
/**
* Returns links with specified name.
* @param name String parameter of filter
* @return List[ConceptLink] that has uri-s with specified name.
*/
def getLinkByName(name: String): List[ConceptLink] = {
_links.filter {
link: ConceptLink => {
link.uri.name == name
}
}
}
/**
* Returns nodes that has generalisations with specified name.
* @param name String parameter of filter
* @return List[Concepts] that has generalisations uri-s with specified name.
*/
def getNodeByGeneralisationName(name: String): List[Concept] = {
_nodes.filter {
concept: Concept => {
val gens: Map[KnowledgeURI, Concept] = concept.generalisations.frames.filter {
uriConcept: Pair[KnowledgeURI, Concept] => {
uriConcept._1 == name
}
}
gens.size > 0
}
}
}
/**
* Returns nodes that has generalisations with specified name.
* @param aPhrase AnnotatedPhrase parameter of filter.
* @return List[Concepts] that has generalisations uri-s with specified name.
*/
def getNodeByPhrase(aPhrase: AnnotatedPhrase): List[Concept] = {
if (this.nodes != null) {
val res = this.nodes.filter {
concept: Concept => {
concept.content.toString.toLowerCase==aPhrase.text.toLowerCase
/*val phrases: Map[KnowledgeURI, AnnotatedPhrase] = concept.phrases.frames.filter {
uriPhrase: Pair[KnowledgeURI, AnnotatedPhrase] => {
uriPhrase._2.text.trim == aPhrase.text.trim
}
}
phrases.size > 0 */
}
}
res
} else {
List[Concept]()
}
}
override def toString = {
this.getClass.getName + " [" + nodes.toString + "][" + links + "]@" + uri.toString
}
def toText = {
def searchToUp(where: Concept, what: Concept): Boolean = {
if (where.uri.name == what.uri.name)
return true
val up = where.generalisationsList
if (up.size == 0)
return false
searchToUp(up.head, what)
}
val leafs = nodes.filter(i => nodes.filter(j => j.uri.name != i.uri.name && searchToUp(j, i)).isEmpty)
def oneLink(x: Concept, l: ConceptLink): String = {
val lString = l.uri.name
if (l.source.toString == l.destination.toString)
"<" + lString + ">"
else if (x.toString == l.destination.toString)
"[" + l.source.toString + " <" + lString + ">]"
else if (l.source.toString == x.toString)
"[<" + lString + "> " + l.destination.toString + "]"
else
""
}
def listLinks(x: Concept): String = {
_links.map(l => oneLink(x, l)).mkString("")
}
def oneConcept(x: Concept): String = {
if (_nodes.contains(x))
x.__content.toString + listLinks(x)
else
"(" + x.__content.toString + ")"
}
def oneLeaf(x: Concept): String = {
val up = x.generalisationsList
if (up.size == 0)
return x.toString
oneConcept(x) + " <- " + oneLeaf(up.head)
}
leafs.map(oneLeaf).mkString("\\n")
}
override def save(kb: KB, parent: KBNodeId, key: String, linkType: String, saved: ListBuffer[String] = new ListBuffer[String]()): Boolean = {
if (ModelHelper.checkIfSaved(kb, parent, key, linkType, saved, KBNodeId(this), this.uri)) return true
var res = kb.saveResource(this, parent, key, linkType)
ModelHelper.appendToSave(this.uri, saved)
if (nodes != null) {
for (x: Resource <- nodes) {
res &= x.save(kb, KBNodeId(this), x.uri.toString, Constant.NODES_LINK_NAME, saved)
}
}
if (_rootNodes != null) {
for (x: Resource <- _rootNodes) {
res &= x.save(kb, KBNodeId(this), x.uri.toString, Constant.ROOT_NODES_LINK_NAME, saved)
}
}
if (links != null) {
for (y: Resource <- links) {
res &= y.save(kb, KBNodeId(this), y.uri.toString, Constant.LINKS_LINK_NAME, saved)
}
}
res
}
}
object ConceptNetwork {
val log = LoggerFactory.getLogger(this.getClass)
def load(kb: KB, parent: KBNodeId, key: String, linkType: String, alreadyLoaded: ListBuffer[String] = new ListBuffer[String]()): ConceptNetwork = {
val selfMap = kb.loadChild(parent, key, linkType)
if (selfMap.isEmpty) {
log.error("Concept not loaded for link {}/{} for {}", List(key, linkType, parent.ID.toString))
throw new UnexpectedException("Concept not loaded for link " + key + "/" + linkType + " for " + parent.ID.toString)
}
//try to load from cache
val cached = KBMap.loadFromCache(new KnowledgeURI(selfMap))
if (!cached.isEmpty) return cached.get.asInstanceOf[ConceptNetwork]
val ID = new KBNodeId(selfMap)
val res = new ConceptNetwork(null,
null,
new KnowledgeURI(selfMap),
new Probability(selfMap)
)
KBMap.register(res, ID.ID)
def oneList(items: Map[String, Map[String, String]], linkName: String): Map[KnowledgeURI, Concept] = {
items.keys.foldLeft(Map[KnowledgeURI, Concept]()) {
(acc, uri) => acc + Pair(KnowledgeURI(uri, true), Concept.load(kb, ID, uri, linkName)) //new Concept(items(uri)))
}
}
val concepts: List[Concept] = oneList(kb.loadChildrenMap(ID, Constant.NODES_LINK_NAME), Constant.NODES_LINK_NAME).map {
pair: Pair[KnowledgeURI, Resource] => pair._2.asInstanceOf[Concept]
}.toList
//load each concept link
val linksSourceMap = kb.loadChildrenMap(ID, Constant.LINKS_LINK_NAME)
val linksDestinationMap = kb.loadChildrenMap(ID, Constant.LINKS_LINK_NAME)
val conceptLinkList: List[ConceptLink] =
linksSourceMap.keys.foldLeft(List[ConceptLink]()) {
(acc, uri) => ConceptLink(new Concept(linksSourceMap(uri)), new Concept(linksDestinationMap(uri)), uri) :: acc
}
res._nodes = concepts
res._links = conceptLinkList
res
}
/**
* Create not known concept holder (fill not known phrases)
* @param notKnownConcept
* @return
*/
def apply(notKnownConcept: List[AnnotatedPhrase]): ConceptNetwork = {
val uri = KnowledgeURI("NotKnownConceptsHolder")
val re = new ConceptNetwork(List(), List(), uri)
re.notKnownPhrases = notKnownConcept
re
}
def apply(nodes: List[Concept], links: List[ConceptLink], name: String) = {
val uri = KnowledgeURI(name)
new ConceptNetwork(nodes, links, uri)
}
def apply(nodes: List[Concept], name: String) = {
val uri = KnowledgeURI(name)
val links: List[ConceptLink] = nodes.map {
node: Concept => {
node.links
}
}.flatten
new ConceptNetwork(nodes, links, uri)
}
/**
* Returns nodes that has generalisations with specified name.
* @param nodes List[Concept] to filter.
* @param name String parameter of filter.
* @return List[Concepts] that has generalisations uri-s with specified name.
*/
def getNodeByGeneralisationName(nodes: List[Concept], name: String): List[Concept] = {
nodes.filter {
concept: Concept => {
val gens: Map[KnowledgeURI, Concept] = concept.generalisations.frames.filter {
uriConcept: Pair[KnowledgeURI, Concept] => {
uriConcept._1.name == name
}
}
gens.size > 0
}
}
}
/**
* Runs recursively, returns nodes that has generalisations with specified name.
* @param nodes List[Concept] to filter.
* @param name String parameter of filter.
* @return List[Concepts] that has generalisations uri-s with specified name.
*/
def getNodeByGeneralisationNameRec(nodes: List[Concept], name: String): List[Concept] = {
nodes.filter {
concept: Concept => {
val gens: Map[KnowledgeURI, Concept] = filterGeneralisations(concept.generalisations, name)
gens.size > 0
}
}
}
private def filterGeneralisations(generalisations: TypedKLine[Concept], name: String): Map[KnowledgeURI, Concept] = {
generalisations.frames.filter {
uriConcept: Pair[KnowledgeURI, Concept] => {
if (uriConcept._1.name == name) {
true
} else {
filterGeneralisations(uriConcept._2.generalisations, name).size > 0
}
}
}
}
def containsConceptByURI(conceptNetWork: ConceptNetwork, uri: KnowledgeURI): Boolean = {
conceptNetWork.nodes.filter {
c: Concept => {
c.uri.equals(uri)
}
}.size > 0
}
def findConceptByURI(conceptNetWork: ConceptNetwork, uri: KnowledgeURI): Option[Concept] = {
conceptNetWork.nodes.find {
c: Concept => {
c.uri.equals(uri)
}
}
}
} | keskival/2 | model.knowledge/src/main/scala/tu/model/knowledge/domain/ConceptNetwork.scala | Scala | gpl-3.0 | 11,638 |
package bifrost.state
import java.util.UUID
import bifrost.BifrostNodeViewHolder.{HIS, MP, MS, VL}
import bifrost.forging.ForgingSettings
import bifrost.transaction.box.StateBox
import bifrost.transaction.box.proposition.PublicKey25519Proposition
import bifrost.{BifrostGenerators, BifrostNodeViewHolder, ValidGenerators}
import com.google.common.primitives.Ints
import io.circe
import io.circe.syntax._
import org.scalatest.{BeforeAndAfterAll, Matchers, PropSpec}
import org.scalatest.prop.{GeneratorDrivenPropertyChecks, PropertyChecks}
import scorex.crypto.encode.Base58
import scorex.crypto.signatures.Curve25519
import scala.reflect.io.Path
import scala.util.Try
class ProgramBoxeRegistrySpec extends PropSpec
with PropertyChecks
with GeneratorDrivenPropertyChecks
with Matchers
with BeforeAndAfterAll
with BifrostGenerators
with ValidGenerators {
val path: Path = Path("/tmp/bifrost/test-data")
Try(path.deleteRecursively())
val settingsFilename = "testSettings.json"
lazy val testSettings: ForgingSettings = new ForgingSettings {
override val settingsJSON: Map[String, circe.Json] = settingsFromFile(settingsFilename)
}
val gs: (HIS, MS, VL, MP) = BifrostNodeViewHolder.initializeGenesis(testSettings)
val history: HIS = gs._1
var genesisState: MS = gs._2
var gw: VL = gs._3
val pubKey: PublicKey25519Proposition = PublicKey25519Proposition(Base58.decode("6sYyiTguyQ455w2dGEaNbrwkAWAEYV1Zk6FtZMknWDKQ").get)
val stateOne =
s"""
|{ "a": "0" }
""".stripMargin.asJson
val stateTwo =
s"""
|{"b": "1" }
""".stripMargin.asJson
val sboxOneWithoutUUID: StateBox = StateBox(pubKey, 0L, null, stateOne)
val sboxTwoWithoutUUID: StateBox = StateBox(pubKey, 1L, null, stateTwo)
val uuid: UUID = UUID.nameUUIDFromBytes(sboxOneWithoutUUID.id)
val sboxOne: StateBox = StateBox(pubKey, 0L, uuid, stateOne)
val sboxTwo: StateBox = StateBox(pubKey, 1L, uuid, stateTwo)
var newState_1: BifrostState = null
assert(sboxOne.value == uuid)
property("BifrostState should update programBoxRegistry with state box and rollback correctly") {
val changes_1: BifrostStateChanges = BifrostStateChanges(Set(), Set(sboxOne), 0L)
newState_1 = genesisState.applyChanges(changes_1, Ints.toByteArray(1)).get
assert(newState_1.pbr.getBoxId(uuid).get sameElements sboxOne.id)
assert(newState_1.pbr.getBox(uuid).get.bytes sameElements sboxOne.bytes)
val changes_2: BifrostStateChanges = BifrostStateChanges(Set(sboxOne.id), Set(sboxTwo), 0L)
val newState_2 = newState_1.applyChanges(changes_2, Ints.toByteArray(2)).get
assert(newState_2.pbr.getBoxId(uuid).get sameElements sboxTwo.id)
assert(newState_2.pbr.getBox(uuid).get.bytes sameElements sboxTwo.bytes)
val oldState = newState_2.rollbackTo(newState_1.version).get
assert(oldState.pbr.getBoxId(sboxOne.value).get sameElements sboxOne.id)
}
property("BifrostState should tombstone uuid in programBoxRegistry correctly") {
val changes_2: BifrostStateChanges = BifrostStateChanges(Set(sboxOne.id), Set(), 0L)
val newState_2 = newState_1.applyChanges(changes_2, Ints.toByteArray(3)).get
assert(newState_2.pbr.getBoxId(sboxOne.value).isEmpty)
}
override def afterAll() {
history.storage.storage.close
}
}
| Topl/Project-Bifrost | src/test/scala/bifrost/state/SBRSpec.scala | Scala | mpl-2.0 | 3,301 |
// Copyright 2013 Christopher Swenson.
// Author: Christopher Swenson (chris@caswenson.com)
package com.caswenson.pizza.data
import com.caswenson.pizza.{ LatLon, Location }
import com.google.common.base.CharMatcher
import com.vividsolutions.jts.geom.impl.CoordinateArraySequenceFactory
import com.vividsolutions.jts.geom.{ Coordinate,
Envelope,
GeometryFactory,
LineSegment,
LineString,
MultiLineString,
Point }
import com.vividsolutions.jts.index.strtree.STRtree
import java.io.File
import org.geotools.data.FileDataStoreFinder
import org.geotools.data.shapefile.ShapefileDataStore
import scala.collection.JavaConversions._
import scala.collection.mutable
import scala.math.min
import grizzled.slf4j.Logging
/**
* Handles geocoding and reverse geocoding queries.
*/
class Pizza(cities: Cities,
reverseTree: STRtree,
oddZipMap: mutable.OpenHashMap[String, mutable.OpenHashMap[String, Array[(Double, Point)]]],
evenZipMap: mutable.OpenHashMap[String, mutable.OpenHashMap[String, Array[(Double, Point)]]]) extends Logging {
val zipMatch = """\d{5}(-\d{4})?""".r
val directions = Set("nw", "ne", "se", "sw")
/**
* Tries to geocode the given address, guessing which parts belong to which pieces.
* This requires that the City, State, and ZIP are present somehow currently.
*/
def geocode(address: String): Option[LatLon] = {
// Delete commas, convert to lower case
val parts = address.replace(",", "").toLowerCase.split(" ")
val numberString = parts(0)
// First part has to be a number
if (!CharMatcher.JAVA_DIGIT.matchesAllOf(numberString)) {
debug("No numeric address found in " + numberString)
return None
}
val number = numberString.toInt
// Take out the US / USA part
val nonUsParts = if (parts.last == "usa" || parts.last == "us") {
parts.dropRight(1).drop(1)
} else {
parts.drop(1)
}
// Check for ZIP
val (zip, nonZipParts) = zipMatch.findFirstIn(nonUsParts.last) match {
case Some(z) => {
(Some(z.substring(0, 5)), nonUsParts.dropRight(1))
}
case None => {
(None, nonUsParts)
}
}
val state = nonZipParts.last.toUpperCase
if (!AnsiStates.stateNames.contains(state)) {
debug("Unknown state " + state)
return None
}
val nonStateParts = nonZipParts.dropRight(1)
// Split the rest up between street and city every which way we can.
val streetStart = nonZipParts(0)
val cityEnd = nonZipParts.last
// Parts that could be either
val either = nonZipParts.drop(1).dropRight(1)
0.to(either.size).flatMap { i =>
val street = (streetStart + " " + either.slice(0, i).mkString(" ")).trim
val city = (either.slice(i, either.size).mkString(" ") + " " + cityEnd).trim
geocode(number, street, city, state, zip)
}.headOption
}
/**
* Geocodes a given location to a latitude, longitude pair, if the given address makes any sense.
*/
def geocode(number: Int, rawStreet: String, rawCity: String, rawState: String, zipOption: Option[String]): Option[LatLon] = {
val street = rawStreet.toLowerCase
zipOption match {
case Some(zip) => {
oddZipMap.get(zip) match {
case Some(streetMap) =>
streetMap.get(street).map { coords => pointIn(number, coords) }
case None =>
evenZipMap.get(zip) match {
case Some(streetMap) =>
streetMap.get(street).map { coords => pointIn(number, coords) }
case None =>
None
}
}
}
case None => {
// No ZIP supplied
// TODO(swenson): Use the ZCTAs to determine the proper zip.
None
}
}
}
/**
* Uses binary search and interpolation to find the closest geometric
* point to the given address.
* @param number Address number
* @param points List of (address number, point)-pairs to find in.
*/
def pointIn(number: Int, points: Array[(Double, Point)]): LatLon = {
if (number < points.head._1) {
pointFor(points.head._2)
} else if (number > points.last._1) {
pointFor(points.last._2)
} else {
var l = 0
var c = points.size / 2
var r = points.size - 1
while (true) {
if (c - l <= 1) {
return interpolate(number, l, points)
} else if (r - c <= 1) {
return interpolate(number, c, points)
}
if (number < points(c)._1) {
r = c
c = (l + r) / 2
} else {
l = c
c = (l + r) / 2
}
}
null // unreachable, but scala compiler complains otherwise
}
}
/**
* Interpolates the address between index and index + 1 of the given array.
*/
def interpolate(number: Int, index: Int, points: Array[(Double, Point)]): LatLon = {
val (addr1, point1) = points(index)
val (addr2, point2) = points(index + 1)
val addrDelta = addr2 - addr1
val line = new LineSegment(point1.getCoordinate, point2.getCoordinate)
val lengthAlong = (number - addr1) / addrDelta
val point = line.pointAlong(lengthAlong)
LatLon(point.y, point.x)
}
/**
* Converts a Point to a LatLon.
*/
def pointFor(point: Point): LatLon = {
LatLon(point.getY, point.getX)
}
/**
* Capitalizes road parts, and makes directions all caps
*/
def capitalize(road: String): String = {
road.split(" ").map { piece =>
if (directions.contains(piece)) {
piece.toUpperCase
} else {
piece.capitalize
}
}.mkString(" ")
}
/**
* Reverse geocodes the given point to the nearest address it can find.
*/
def reverseGeocode(latLon: LatLon): Location = {
val coord = new Coordinate(latLon.lon, latLon.lat)
val point = new Point(CoordinateArraySequenceFactory.instance().create(Array(coord)), Pizza.geometryFactory)
val search = new Envelope(coord)
while (true) {
// Expansive search
val results = reverseTree.query(search).asInstanceOf[java.util.List[(LineString, Int, Int, String, String, String)]].toIndexedSeq
if (results.size > 0) {
val (lineString, minAddr, maxAddr, road, state, zip) = results.minBy { case(line, _, _, _, _, _) =>
point.distance(line)
}
val capRoad = capitalize(road)
val addrDelta = maxAddr - minAddr
// Convert the line to line segments.
val lineSegments = constructLineSegments(lineString.getCoordinates)
// Find the nearest line segment
val lineIndex = lineSegments.indices.minBy { li => lineSegments(li).distance(coord) }
// Find the closest point on the line segment
val nearestPoint = lineSegments(lineIndex).project(coord)
// Use the distance from that point on the line to compute the address.
val lineDistance = lineSegments(lineIndex).getCoordinate(0).distance(nearestPoint)
val otherDistances = 0.until(lineIndex).map { i => lineSegments(i).getLength }.sum
val address = ((otherDistances + lineDistance) / lineString.getLength * addrDelta + minAddr).toInt
val city = cities.city(state, point).capitalize
// TODO(swenson): fill in TZ?
return Location(street = Some("%d %s".format(address, capRoad)), city = Some(city), state = Some(state), zip = Some(zip),
country = Some("USA"), lat = Some(latLon.lat), lon = Some(latLon.lon))
} else {
// Try again
search.expandBy(1/10000.0)
}
}
null // unreachable, but scala compiler complains otherwise
}
/**
* Convertes a sequence of coordinates into a sequence of line segments.
*/
def constructLineSegments(coords: Array[Coordinate]): Array[LineSegment] = {
coords.sliding(2, 1).map { x => new LineSegment(x(0), x(1)) }.toArray
}
def cityFor(state: String, zip: String): String = {
""
}
}
object Pizza extends Logging {
val geometryFactory = new GeometryFactory
val digits = CharMatcher.JAVA_DIGIT
/**
* Construct a new geocoder and reverse geocoder from the given data for the specified
* states.
*/
def apply(cities: Cities, dataDir: String, states: Set[String] = AnsiStates.states.values.toSet): Pizza = {
var segmentsLoaded = 0
// The left-hand and right-hand sides of the road may be in different zipcodes.
// We sort them by even-numbered addresses and odd-numbered.
val oddZipMap = mutable.OpenHashMap[String, mutable.OpenHashMap[String, Array[(Double, Point)]]]()
val evenZipMap = mutable.OpenHashMap[String, mutable.OpenHashMap[String, Array[(Double, Point)]]]()
// The reverse geocoding STRtree
val reverseTree = new STRtree()
// Scan for data files.
val dir = new File(dataDir)
val files = dir.listFiles()
.filter { _.getName.endsWith(".shp") }
// Only keep states we were told to process.
.filter { file =>
states.contains(AnsiStates.states(file.getName.split("_")(2).substring(0, 2).toInt))
}
files.foreach { file =>
val stateId = file.getName.split("_")(2).substring(0, 2).toInt
val state = AnsiStates.states(stateId)
val dataStore = FileDataStoreFinder.getDataStore(file).asInstanceOf[ShapefileDataStore]
info("Loading %s %s".format(state, file.getName))
val featureReader = dataStore.getFeatureReader
while (featureReader.hasNext) {
val feature = featureReader.next()
val line = feature.getAttribute("the_geom").asInstanceOf[MultiLineString]
val fullName = feature.getAttribute("FULLNAME").toString.toLowerCase
val leftFrom = feature.getAttribute("LFROMHN").toString
val leftTo = feature.getAttribute("LTOHN").toString
val rightFrom = feature.getAttribute("RFROMHN").toString
val rightTo = feature.getAttribute("RTOHN").toString
val zipLeft = feature.getAttribute("ZIPL").toString
val zipRight = feature.getAttribute("ZIPR").toString
// These might be useful in the future, though they aren't ever-present.
//val zip9Left = feature.getAttribute("PLUS4L").toString
//val zip9Right = feature.getAttribute("PLUS4R").toString
// Only continue if everything is all digits.
if (digits.matchesAllOf(leftFrom) &&
digits.matchesAllOf(leftTo) &&
digits.matchesAllOf(rightFrom) &&
digits.matchesAllOf(rightTo)) {
require(line.getNumGeometries == 1)
val line0 = line.getGeometryN(0).asInstanceOf[LineString]
// Build a map from this line to each house number, evenly distributed
val numPoints = line0.getNumPoints
val totalLength = line0.getLength
val startPoint = if (leftFrom == "") {
rightFrom.toInt
} else if (rightFrom == "") {
leftFrom.toInt
} else {
min(leftFrom.toInt, rightFrom.toInt)
}
val endPoint = if (leftTo == "") {
rightTo.toInt
} else if (rightTo == "") {
leftTo.toInt
} else {
min(leftTo.toInt, rightTo.toInt)
}
val addressDiff = endPoint - startPoint
var currPoint = startPoint.toDouble
// Reverse geocoding info
if (zipLeft != "") {
reverseTree.insert(line0.getEnvelopeInternal, (line0, startPoint, endPoint, fullName, state, zipLeft))
}
if (zipRight != "" && zipLeft != zipRight) {
reverseTree.insert(line0.getEnvelopeInternal, (line0, startPoint, endPoint, fullName, state, zipRight))
}
// Geocoding info
val points = (0.until(numPoints - 1).map { i: Int =>
val point1 = line0.getPointN(i)
val point2 = line0.getPointN(i + 1)
val length = point2.distance(point1)
val delta = length / totalLength * addressDiff
currPoint += delta
(currPoint - delta, point1)
} ++ Seq((currPoint, line0.getEndPoint))).toArray
// Populate the odd and even address number zip codes.
if (zipLeft != "") {
val toUpdate = if ((leftFrom.toInt & 1) == 1) {
oddZipMap
} else {
evenZipMap
}
val zipData = toUpdate.getOrElseUpdate(zipLeft, mutable.OpenHashMap[String, Array[(Double, Point)]]())
val streetData = zipData.getOrElse(fullName, Array[(Double, Point)]())
zipData += (fullName -> (streetData ++ points).toSet.toArray.sortBy(_._1))
segmentsLoaded += 1
}
if (zipRight != "") {
val toUpdate = if ((rightFrom.toInt & 1) == 1) {
oddZipMap
} else {
evenZipMap
}
val zipData = toUpdate.getOrElseUpdate(zipRight, mutable.OpenHashMap[String, Array[(Double, Point)]]())
val streetData = zipData.getOrElse(fullName, Array[(Double, Point)]())
zipData += (fullName -> (streetData ++ points).toSet.toArray.sortBy(_._1))
}
}
}
try {
featureReader.close()
} catch {
case e: IllegalArgumentException => // Ignore buggy FeatureReader
}
}
info("Segments loaded: %d".format(segmentsLoaded))
new Pizza(cities, reverseTree, oddZipMap, evenZipMap)
}
}
| swenson/pizza | src/main/scala/com/caswenson/pizza/data/Pizza.scala | Scala | mit | 13,611 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.