code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1 value | license stringclasses 15 values | size int64 5 1M |
|---|---|---|---|---|---|
package com.artclod.mathml.scalar.apply
import com.artclod.mathml._
import com.artclod.mathml.scalar._
import com.artclod.mathml.scalar.concept.Constant
import scala.util._
case class ApplyTimes(val values: MathMLElem*)
extends MathMLElem(MathML.h.prefix, "apply", MathML.h.attributes, MathML.h.scope, false, (Seq[MathMLElem](Times) ++ values): _*) {
def eval(boundVariables: Map[String, Double]): Try[Double] = {
val tryVals = values.map(_.eval(boundVariables))
tryVals.find(_.isFailure) match {
case Some(failure) => failure
case None => product(tryVals.map(_.get))
}
}
private def product(vals: Seq[Double]): Try[Double] = {
val product = vals.reduceLeft(_ * _)
val hasNoZeroes = vals.find(_ == 0d).isEmpty
if (product == 0d && hasNoZeroes) {
Failure(new IllegalStateException("times returned 0 when there were no 0 elements " + this))
} else {
Success(product)
}
}
def constant: Option[Constant] = if (values.forall(_.c.nonEmpty)) {
Some(values.map(_.c.get).reduce(_ * _))
} else if (values.map(_.c).contains(Some(`0`))) {
Some(`0`)
} else {
None
}
def simplifyStep() =
(cns, flattenedMathMLElems) match {
case (Seq(cns @ _*), Seq()) => cns.reduce(_ * _)
case (Seq(), Seq(elem)) => elem
case (Seq(), Seq(elems @ _*)) => ApplyTimes(elems: _*)
case (Seq(cns @ _*), Seq(elems @ _*)) => ApplyTimes(Seq(cns.reduce(_ * _)).filterNot(_.isOne) ++ elems: _*)
}
private def cns = values.map(_.c).filter(_.nonEmpty).map(_.get)
private def flattenedMathMLElems: Seq[MathMLElem] = values.filter(_.c.isEmpty).map(_.s)
.flatMap(_ match {
case v: ApplyTimes => v.values
case v: MathMLElem => Seq(v)
})
def variables: Set[String] = values.foldLeft(Set[String]())(_ ++ _.variables)
def derivative(x: String): MathMLElem =
if (values.size == 1) {
values(0)
} else {
ApplyPlus((0 until values.size).map(i => derivativeForPos(x, i)): _*)
}
private def derivativeForPos(x: String, pos: Int) = {
val items = for (i <- 0 until values.size) yield { if (i == pos) { values(i).d(x) } else { values(i).s } }
ApplyTimes(items: _*)
}
def toMathJS = values.map(_.toMathJS).mkString("(", " * " ,")")
}
| kristiankime/web-education-games | app/com/artclod/mathml/scalar/apply/ApplyTimes.scala | Scala | mit | 2,181 |
package net.machinemuse.powersuits.powermodule.tool
import net.machinemuse.api.IModularItem
import net.machinemuse.powersuits.powermodule.PowerModuleBase
import net.machinemuse.utils.{MuseItemUtils, MuseCommonStrings}
import net.machinemuse.api.moduletrigger.IRightClickModule
import net.minecraft.entity.player.EntityPlayer
import net.minecraft.world.World
import net.minecraft.item.ItemStack
import net.machinemuse.powersuits.common.ModularPowersuits
import net.machinemuse.powersuits.item.ItemComponent
import net.minecraft.util.StatCollector
/**
* Created with IntelliJ IDEA.
* User: Claire2
* Date: 4/30/13
* Time: 3:14 PM
* To change this template use File | Settings | File Templates.
*/
class FieldTinkerModule(list: java.util.List[IModularItem]) extends PowerModuleBase(list) with IRightClickModule {
addInstallCost(MuseItemUtils.copyAndResize(ItemComponent.controlCircuit, 1))
addInstallCost(MuseItemUtils.copyAndResize(ItemComponent.servoMotor, 2))
def getCategory: String = MuseCommonStrings.CATEGORY_SPECIAL
def getDataName: String = "Field Tinker Module"
def getLocalizedName: String = StatCollector.translateToLocal("module.fieldTinkerer.name")
// If lang fails to encode trademark symbol, it's "\u2122"
def getDescription: String = StatCollector.translateToLocal("module.fieldTinkerer.desc")
def getTextureFile: String = "transparentarmor"
def onItemUse(itemStack: ItemStack, player: EntityPlayer, world: World, x: Int, y: Int, z: Int, side: Int, hitX: Float, hitY: Float, hitZ: Float) {}
def onItemUseFirst(itemStack: ItemStack, player: EntityPlayer, world: World, x: Int, y: Int, z: Int, side: Int, hitX: Float, hitY: Float, hitZ: Float): Boolean = {false}
def onPlayerStoppedUsing(itemStack: ItemStack, world: World, player: EntityPlayer, par4: Int) {}
def onRightClick(player: EntityPlayer, world: World, item: ItemStack) {
player.openGui(ModularPowersuits, 2, world, player.posX.toInt, player.posY.toInt, player.posZ.toInt)
}
}
| QMXTech/MachineMusePowersuits | src/main/scala/net/machinemuse/powersuits/powermodule/tool/FieldTinkerModule.scala | Scala | bsd-3-clause | 1,994 |
package org.ferrit.core.util
import scala.util.matching.Regex
object TextUtil {
final val Ls:String = System.getProperty("line.separator")
private val KeyVal = """(?i)^([a-z\\s-]+):\\s*(.*)""".r
/**
* Parse a sequence of lines where duplicate keys are allowed.
* Example:
*
* accept: http://site.net
* reject: http://other.site.net
*
*/
def parseKeyValueLines[T](
directives:Seq[String],
lines: Seq[String],
bindFn: (String,String) => T):Seq[T] = {
val Directives = directives.mkString("|")
lines
.filterNot(l => l.trim.isEmpty || l.startsWith("#"))
.map({line =>
KeyVal.findFirstMatchIn(line) match {
case Some(m) if m.group(1) != null && m.group(1).matches(Directives) =>
bindFn(m.group(1), m.group(2).trim)
case m =>
throw new IllegalArgumentException(
s"Unrecognised directive on line [${line}]. " +
"Directives should be one of [%s]".format(directives.mkString(","))
)
}
})
}
} | reggoodwin/ferrit | src/main/scala/org/ferrit/core/util/TextUtil.scala | Scala | mit | 1,065 |
package io.abacus.tallyho.rollingcounter
import org.scalatest.WordSpec
import org.scalatest.ShouldMatchers
import org.scalatest.ParallelTestExecution
class RollingCounterSpec extends WordSpec with ShouldMatchers with ParallelTestExecution {
"A Windowed Counter" should {
"return 0 when no objects are in the counter" in {
val a = new RollingCounter[String](2)
a.count("hello") should be (0)
}
"increment the count by 1" in {
val a = new RollingCounter[String](2)
a.increment("a")
a.count("a") should be (1)
}
"return a map of counts for objects" in {
val a = new RollingCounter[String](2)
a.increment("a")
a.increment("b")
a.increment("a")
a.counts should be (Map("a"->2, "b"->1))
}
"reset the count for a key and bucket" in {
val a = new RollingCounter[String](2)
a.increment("a")
a.resetCountForBucket("a",0)
}
"advance the current bucket by 1 with 2 total buckets" in {
val a = new RollingCounter[String](2)
a.increment("a")
a.advanceBucket
a.increment("a")
a.count("a") should be (2)
}
"advance the current bucket by 2 and wrap around" in {
val a = new RollingCounter[String](2)
a.increment("a")
a.increment("b")
a.advanceBucket
a.increment("a")
a.advanceBucket
a.counts should be (Map("a"->1, "b"->0))
}
}
}
| non/tallyho | src/test/scala/io/abacus/tallyho/rollingcounter/RollingCounterSpec.scala | Scala | apache-2.0 | 1,423 |
package kata.scala
import scala.collection.mutable
class Graph {
private val edges: mutable.Map[Int, mutable.ArrayBuffer[Int]] = mutable.Map.empty
def adjacentTo(v: Int): Iterable[Int] = {
edges(v)
}
def isEmpty: Boolean = edges.isEmpty
def vertices(): Iterable[Int] = edges.keys
def addEdge(v: Int, w: Int) = {
if (edges.contains(v)) {
edges(v) += w
}
else {
val buffer = new mutable.ArrayBuffer[Int]()
buffer += w
edges.put(v, buffer)
}
if (edges.contains(w)) {
edges(w) += v
}
else {
val buffer = new mutable.ArrayBuffer[Int]()
buffer += v
edges.put(w, buffer)
}
}
}
| Alex-Diez/Scala-TDD-Katas | old-katas/graph-search-kata/day-1/src/main/scala/kata/scala/Graph.scala | Scala | mit | 772 |
/*
Copyright 2014 Twitter, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.twitter.scalding.typed
/**
* This type is used to implement .andThen on a function in a way
* that will never blow up the stack. This is done to prevent
* deep scalding TypedPipe pipelines from blowing the stack
*
* This may be slow, but is used in scalding at planning time
*/
sealed trait NoStackAndThen[-A, +B] extends java.io.Serializable {
def apply(a: A): B
def andThen[C](fn: B => C): NoStackAndThen[A, C] = NoStackAndThen.NoStackMore(this, fn)
def andThen[C](that: NoStackAndThen[B, C]): NoStackAndThen[A, C] = {
import NoStackAndThen._
@annotation.tailrec
def push(front: NoStackAndThen[A, Any],
next: NoStackAndThen[Any, Any],
toAndThen: ReversedStack[Any, C]): NoStackAndThen[A, C] =
(next, toAndThen) match {
case (NoStackWrap(fn), EmptyStack(fn2)) => NoStackMore(front, fn).andThen(fn2)
case (NoStackWrap(fn), NonEmpty(h, tail)) => push(NoStackMore(front, fn), NoStackAndThen.NoStackWrap(h), tail)
case (NoStackMore(first, tail), _) => push(front, first, NonEmpty(tail, toAndThen))
case (WithStackTrace(_, _), _) => sys.error("should be unreachable")
}
that match {
case NoStackWrap(fn) => andThen(fn)
case NoStackMore(head, tail) =>
// casts needed for the tailrec, they can't cause runtime errors
push(this, head.asInstanceOf[NoStackAndThen[Any, Any]], EmptyStack(tail))
case WithStackTrace(inner, stack) => WithStackTrace(andThen(inner), stack)
}
}
}
object NoStackAndThen {
private[typed] def buildStackEntry: Array[StackTraceElement] = Thread.currentThread().getStackTrace
def apply[A, B](fn: A => B): NoStackAndThen[A, B] = WithStackTrace(NoStackWrap(fn), buildStackEntry)
private sealed trait ReversedStack[-A, +B]
private final case class EmptyStack[-A, +B](fn: A => B) extends ReversedStack[A, B]
private final case class NonEmpty[-A, B, +C](head: A => B, rest: ReversedStack[B, C]) extends ReversedStack[A, C]
private[scalding] final case class WithStackTrace[A, B](inner: NoStackAndThen[A, B], stackEntry: Array[StackTraceElement]) extends NoStackAndThen[A, B] {
override def apply(a: A): B = inner(a)
override def andThen[C](fn: B => C): NoStackAndThen[A, C] =
WithStackTrace[A, C](inner.andThen(fn), stackEntry ++ buildStackEntry)
override def andThen[C](that: NoStackAndThen[B, C]): NoStackAndThen[A, C] =
WithStackTrace[A, C](inner.andThen(that), stackEntry ++ buildStackEntry)
}
// Just wraps a function
private final case class NoStackWrap[A, B](fn: A => B) extends NoStackAndThen[A, B] {
def apply(a: A) = fn(a)
}
// This is the defunctionalized andThen
private final case class NoStackMore[A, B, C](first: NoStackAndThen[A, B], andThenFn: (B) => C) extends NoStackAndThen[A, C] {
/*
* scala cannot optimize tail calls if the types change.
* Any call that changes types, we replace that type with Any. These casts
* can never fail, due to the structure above.
*/
@annotation.tailrec
private def reversed(toPush: NoStackAndThen[A, Any], rest: ReversedStack[Any, C]): ReversedStack[A, C] =
toPush match {
case NoStackWrap(fn) => NonEmpty(fn, rest)
case NoStackMore(more, fn) => reversed(more, NonEmpty(fn, rest))
case WithStackTrace(_, _) => sys.error("should be unreachable")
}
@annotation.tailrec
private def call(arg: Any, revstack: ReversedStack[Any, C]): C = revstack match {
case EmptyStack(last) => last(arg)
case NonEmpty(head, rest) => call(head(arg), rest)
}
private lazy val revStack: ReversedStack[Any, C] =
// casts needed for the tailrec, they can't cause runtime errors
reversed(first, EmptyStack(andThenFn.asInstanceOf[(Any) => (C)]))
.asInstanceOf[ReversedStack[Any, C]]
def apply(a: A): C = call(a, revStack)
}
}
| tdyas/scalding | scalding-core/src/main/scala/com/twitter/scalding/typed/NoStackAndThen.scala | Scala | apache-2.0 | 4,436 |
/**
* Copyright 2015 Thomson Reuters
*
* Licensed under the Apache License, Version 2.0 (the “License”); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an “AS IS” BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmwell.fts
import java.net.InetAddress
import java.util
import java.util.concurrent.atomic.AtomicLong
import java.util.concurrent.{Executors, TimeUnit, TimeoutException}
import akka.NotUsed
import akka.stream.scaladsl.Source
import cmwell.common.formats.JsonSerializer
import cmwell.domain._
import cmwell.util.jmx._
import cmwell.common.exception._
import com.typesafe.config.ConfigFactory
import com.typesafe.scalalogging.{LazyLogging, Logger}
import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse
import org.elasticsearch.action.bulk.{BulkItemResponse, BulkResponse}
import org.elasticsearch.action.delete.DeleteResponse
import org.elasticsearch.action.deletebyquery.DeleteByQueryResponse
import org.elasticsearch.action.get.GetResponse
import org.elasticsearch.action.index.{IndexRequest, IndexResponse}
import org.elasticsearch.action.search.{SearchRequestBuilder, SearchResponse, SearchType}
import org.elasticsearch.action.update.UpdateRequest
import org.elasticsearch.action.{ActionListener, ActionRequest, WriteConsistencyLevel}
import org.elasticsearch.client.Client
import org.elasticsearch.client.transport.TransportClient
import org.elasticsearch.common.netty.util.{HashedWheelTimer, Timeout, TimerTask}
import org.elasticsearch.common.settings.ImmutableSettings
import org.elasticsearch.common.transport.InetSocketTransportAddress
import org.elasticsearch.common.unit.TimeValue
import org.elasticsearch.index.VersionType
import org.elasticsearch.index.query.{BoolFilterBuilder, BoolQueryBuilder, QueryBuilder, QueryBuilders}
import org.elasticsearch.index.query.FilterBuilders._
import org.elasticsearch.index.query.QueryBuilders._
import org.elasticsearch.node.Node
import org.elasticsearch.node.NodeBuilder._
import org.elasticsearch.search.SearchHit
import org.elasticsearch.search.aggregations._
import org.elasticsearch.search.aggregations.bucket.histogram.Histogram
import org.elasticsearch.search.aggregations.bucket.significant.InternalSignificantTerms
import org.elasticsearch.search.aggregations.bucket.terms.InternalTerms
import org.elasticsearch.search.aggregations.metrics.cardinality.InternalCardinality
import org.elasticsearch.search.aggregations.metrics.stats.InternalStats
import org.elasticsearch.search.sort.SortBuilders._
import org.elasticsearch.search.sort.SortOrder
import org.joda.time.DateTime
import org.slf4j.LoggerFactory
import scala.collection.JavaConverters._
import scala.compat.Platform._
import scala.concurrent.duration._
import scala.concurrent.{ExecutionContext, Future, Promise}
import scala.util._
import scala.language.implicitConversions
/**
* User: Israel
* Date: 11/5/12
* Time: 10:19 AM
*/
object FTSServiceES {
def getOne(classPathConfigFile: String, waitForGreen: Boolean = true) =
new FTSServiceES(classPathConfigFile, waitForGreen)
}
object Settings {
val config = ConfigFactory.load()
val isTransportClient = config.getBoolean("ftsService.isTransportClient")
val transportAddress = config.getString("ftsService.transportAddress")
val transportPort = config.getInt("ftsService.transportPort")
val defPartition = config.getString("ftsService.defaultPartition")
val clusterName = config.getString("ftsService.clusterName")
val scrollTTL = config.getLong("ftsService.scrollTTL")
val scrollLength = config.getInt("ftsService.scrollLength")
val dataCenter = config.getString("dataCenter.id")
}
class FTSServiceES private (classPathConfigFile: String, waitForGreen: Boolean)
extends FTSServiceOps
with FTSServiceESMBean {
import cmwell.fts.Settings._
override val defaultPartition = defPartition
override val defaultScrollTTL = scrollTTL
var client: Client = _
var node: Node = null
@volatile var totalRequestedToIndex: Long = 0
val totalIndexed = new AtomicLong(0)
val totalFailedToIndex = new AtomicLong(0)
jmxRegister(this, "cmwell.indexer:type=FTSService")
if (isTransportClient) {
val esSettings = ImmutableSettings.settingsBuilder.put("cluster.name", clusterName).build
// if(transportAddress=="localhost") InetAddress.getLocalHost.getHostName else
val actualTransportAddress = transportAddress
client = new TransportClient(esSettings)
.addTransportAddress(new InetSocketTransportAddress(actualTransportAddress, transportPort))
loger.info(s"starting es transport client [/$actualTransportAddress:$transportPort]")
} else {
val esSettings = ImmutableSettings.settingsBuilder().loadFromClasspath(classPathConfigFile)
node = nodeBuilder().settings(esSettings).node()
client = node.client()
}
val localHostName = InetAddress.getLocalHost.getHostName
loger.info(s"localhostname: $localHostName")
val nodesInfo = client.admin().cluster().prepareNodesInfo().execute().actionGet()
loger.info(s"nodesInfo: $nodesInfo")
lazy val localNodeId = client
.admin()
.cluster()
.prepareNodesInfo()
.execute()
.actionGet()
.getNodesMap
.asScala
.filter {
case (id, node) =>
node.getHostname.equals(localHostName) && node.getNode.isDataNode
}
.map(_._1)
.head
val clients: Map[String, Client] = isTransportClient match {
case true =>
nodesInfo.getNodes
.filterNot(n => !n.getNode.dataNode())
.map { node =>
val nodeId = node.getNode.getId
val nodeHostName = node.getNode.getHostName
val clint = nodeHostName.equalsIgnoreCase(localHostName) match {
case true => client
case false =>
val transportAddress = node.getNode.getAddress
val settings = ImmutableSettings.settingsBuilder
.put("cluster.name", node.getSettings.get("cluster.name"))
.put("transport.netty.worker_count", 3)
.put("transport.connections_per_node.recovery", 1)
.put("transport.connections_per_node.bulk", 1)
.put("transport.connections_per_node.reg", 2)
.put("transport.connections_per_node.state", 1)
.put("transport.connections_per_node.ping", 1)
.build
new TransportClient(settings).addTransportAddress(transportAddress)
}
(nodeId, clint)
}
.toMap
case false =>
Map(localNodeId -> client)
}
if (waitForGreen) {
loger.info("waiting for ES green status")
// wait for green status
client
.admin()
.cluster()
.prepareHealth()
.setWaitForGreenStatus()
.setTimeout(TimeValue.timeValueMinutes(5))
.execute()
.actionGet()
loger.info("got green light from ES")
} else {
loger.info("waiting for ES yellow status")
// wait for yellow status
client
.admin()
.cluster()
.prepareHealth()
.setWaitForYellowStatus()
.setTimeout(TimeValue.timeValueMinutes(5))
.execute()
.actionGet()
loger.info("got yellow light from ES")
}
def getTotalRequestedToIndex(): Long = totalRequestedToIndex
def getTotalIndexed(): Long = totalIndexed.get()
def getTotalFailedToIndex(): Long = totalFailedToIndex.get()
def close() {
if (client != null)
client.close()
if (node != null && !node.isClosed)
node.close()
jmxUnRegister("cmwell.indexer:type=FTSService")
}
/**
* Add given Infoton to Current index. If previous version of this Infoton passed, it will
* be added to History index
*
* @param infoton
* @param previousInfoton previous version of this infoton
* @param partition logical name of partition. Used for targeting a specific index
*/
def index(infoton: Infoton, previousInfoton: Option[Infoton] = None, partition: String = defaultPartition)(
implicit executionContext: ExecutionContext
): Future[Unit] = {
loger.debug("indexing current: " + infoton.uuid)
totalRequestedToIndex += 1
val infotonToIndex = new String(JsonSerializer.encodeInfoton(infoton, toEs = true), "utf-8")
val currentFuture = injectFuture[IndexResponse](
client
.prepareIndex(partition + "_current", "infoclone", infoton.uuid)
.setSource(JsonSerializer.encodeInfoton(infoton, toEs = true))
.setConsistencyLevel(WriteConsistencyLevel.ALL)
.execute(_)
).map { _ =>
}
currentFuture.andThen {
case Failure(t) =>
loger.debug(
"failed to index infoton uuid: " + infoton.uuid + "\\n" + t.getLocalizedMessage + "\\n" + t
.getStackTrace()
.mkString("", EOL, EOL)
)
case Success(v) => loger.debug("successfully indexed infoton uuid: " + infoton.uuid); totalIndexed.addAndGet(1)
}
if (previousInfoton.isDefined) {
totalRequestedToIndex += 1
val previousWriteFuture = injectFuture[IndexResponse](
client
.prepareIndex(partition + "_history", "infoclone", previousInfoton.get.uuid)
.setSource(JsonSerializer.encodeInfoton(previousInfoton.get, toEs = true))
.setConsistencyLevel(WriteConsistencyLevel.ALL)
.execute(_)
).map { _ =>
}
val previousDeleteFuture = injectFuture[DeleteResponse](
client
.prepareDelete(partition + "_current", "infoclone", previousInfoton.get.uuid)
.setConsistencyLevel(WriteConsistencyLevel.ALL)
.execute(_)
).map { _.isFound }
.andThen {
case Failure(t) =>
loger.debug(
"failed to delete infoton uuid: " + infoton.uuid + "\\n" + t.getLocalizedMessage + "\\n" + t
.getStackTrace()
.mkString("", EOL, EOL)
); case Success(v) => loger.debug("successfully deleted infoton uuid: " + infoton.uuid)
}
.map { _ =>
}
currentFuture.flatMap(_ => previousWriteFuture).flatMap(_ => previousDeleteFuture)
} else {
currentFuture.map { _ =>
}
}
}
val scheduler = Executors.newSingleThreadScheduledExecutor()
def extractSource[T: EsSourceExtractor](uuid: String, index: String)(implicit executionContext: ExecutionContext) = {
injectFuture[GetResponse](client.prepareGet(index, "infoclone", uuid).execute(_)).map { hit =>
implicitly[EsSourceExtractor[T]].extract(hit) -> hit.getVersion
}
}
def executeBulkActionRequests(
actionRequests: Iterable[ActionRequest[_ <: ActionRequest[_ <: AnyRef]]]
)(implicit executionContext: ExecutionContext, logger: Logger = loger) = {
val requestedToIndexSize = actionRequests.size
totalRequestedToIndex += actionRequests.size
val bulkRequest = client.prepareBulk()
bulkRequest.request().add(actionRequests.asJava)
val response = injectFuture[BulkResponse](bulkRequest.execute(_))
response.onComplete {
case Success(response) if !response.hasFailures =>
logger.debug(s"successfully indexed ${requestedToIndexSize} infotons")
totalIndexed.addAndGet(actionRequests.size)
response.getItems.foreach { r =>
if (!r.getOpType.equalsIgnoreCase("delete") && r.getVersion > 1) {
logger.error(s"just wrote duplicate infoton: ${r.getId}")
}
}
case Success(response) =>
try {
val failed = response.getItems.filter(_.isFailed).map(_.getId)
// here we get if got response that has failures
logger.error(s"failed to index ${failed.size} out of $requestedToIndexSize infotons: ${failed.mkString(",")}")
totalFailedToIndex.addAndGet(failed.size)
totalIndexed.addAndGet(requestedToIndexSize - failed.size)
response.getItems.foreach { r =>
if (!r.getOpType.equalsIgnoreCase("delete") && r.getVersion > 1) {
logger.debug(s"just wrote duplicate infoton: ${r.getId}")
}
if (r.isFailed) {
logger.debug(s"failed infoton: ${r.getId}")
}
}
} catch {
case t: Throwable =>
logger.error(s"exception while handling ES response errors:\\n ${getStackTrace(t)}")
}
case Failure(t) =>
logger.error(s"exception during bulk indexing\\n${getStackTrace(t)}")
totalFailedToIndex.addAndGet(requestedToIndexSize)
}
response
}
def executeBulkIndexRequests(
indexRequests: Iterable[ESIndexRequest],
numOfRetries: Int = 15,
waitBetweenRetries: Long = 3000
)(implicit executionContext: ExecutionContext, logger: Logger = loger): Future[SuccessfulBulkIndexResult] = ???
def getMappings(withHistory: Boolean = false, partition: String = defaultPartition)(
implicit executionContext: ExecutionContext
): Future[Set[String]] = {
import org.elasticsearch.cluster.ClusterState
implicit class AsLinkedHashMap[K](lhm: Option[AnyRef]) {
def extract(k: K) = lhm match {
case Some(m) => Option(m.asInstanceOf[java.util.LinkedHashMap[K, AnyRef]].get(k))
case None => None
}
def extractKeys: Set[K] =
lhm.map(_.asInstanceOf[java.util.LinkedHashMap[K, Any]].keySet().asScala.toSet).getOrElse(Set.empty[K])
def extractOneValueBy[V](selector: K): Map[K, V] =
lhm
.map(_.asInstanceOf[java.util.LinkedHashMap[K, Any]].asScala.map {
case (k, vs) => k -> vs.asInstanceOf[java.util.LinkedHashMap[K, V]].get(selector)
}.toMap)
.getOrElse(Map[K, V]())
}
val req = client.admin().cluster().prepareState()
val f = injectFuture[ClusterStateResponse](req.execute)
val csf: Future[ClusterState] = f.map(_.getState)
csf.map(
_.getMetaData.iterator.asScala
.filter(_.index().startsWith("cm"))
.map { imd =>
val nested = Some(imd.mapping("infoclone").getSourceAsMap.get("properties"))
val flds = nested.extract("fields").extract("properties")
flds.extractOneValueBy[String]("type").map { case (k, v) => s"$k:$v" }
}
.flatten
.toSet
)
}
def bulkIndex(currentInfotons: Seq[Infoton],
previousInfotons: Seq[Infoton] = Nil,
partition: String = defaultPartition)(implicit executionContext: ExecutionContext) = {
val bulkRequest = client.prepareBulk()
currentInfotons.foreach { current =>
val indexSuffix = if (current.isInstanceOf[DeletedInfoton]) "_history" else "_current"
bulkRequest.add(
client
.prepareIndex(partition + indexSuffix, "infoclone", current.uuid)
.setSource(JsonSerializer.encodeInfoton(current, toEs = true))
)
}
previousInfotons.foreach { previous =>
bulkRequest.add(
client
.prepareIndex(partition + "_history", "infoclone", previous.uuid)
.setSource(JsonSerializer.encodeInfoton(previous, toEs = true))
)
bulkRequest.add(client.prepareDelete(partition + "_current", "infoclone", previous.uuid))
}
injectFuture[BulkResponse](bulkRequest.execute)
}
/**
* Deletes given infoton from Current index, save it to the history index and put a tomb stone
* to mark it is deleted
*
* @param deletedInfoton Deleted Infoton (tombstone) to index in history index
* @param previousInfoton last version of this infoton to index in history and remove from current
* @param partition logical name of partition. Used for targeting a specific index
*/
def delete(deletedInfoton: Infoton, previousInfoton: Infoton, partition: String = defaultPartition)(
implicit executionContext: ExecutionContext
): Future[Boolean] = {
for {
// index previous Infoton in history index
prev <- injectFuture[IndexResponse](
client
.prepareIndex(partition + "_history", "infoclone", previousInfoton.uuid)
.setSource(JsonSerializer.encodeInfoton(previousInfoton, toEs = true))
.execute(_)
)
// index tomb stone in history index
tomb <- injectFuture[IndexResponse](
client
.prepareIndex(partition + "_history", "infoclone", deletedInfoton.uuid)
.setSource(JsonSerializer.encodeInfoton(deletedInfoton, toEs = true))
.execute(_)
)
// delete from current index
deleted <- injectFuture[DeleteResponse](
client.prepareDelete(partition + "_current", "infoclone", previousInfoton.uuid).execute(_)
)
} yield true
}
/**
* Completely erase !! infoton of given UUID from History index.
*
* @param uuid
* @param partition logical name of partition. Used for targeting a specific index
*/
def purge(uuid: String,
partition: String = defaultPartition)(implicit executionContext: ExecutionContext): Future[Boolean] = {
injectFuture[DeleteResponse](client.prepareDelete(partition + "_history", "infoclone", uuid).execute(_))
.map(x => true)
}
def purgeByUuids(historyUuids: Seq[String], currentUuid: Option[String], partition: String = defaultPartition)(
implicit executionContext: ExecutionContext
): Future[BulkResponse] = {
// empty request -> empty response
if (historyUuids.isEmpty && currentUuid.isEmpty)
Future.successful(new BulkResponse(Array[BulkItemResponse](), 0))
else {
val currentIndices = getIndicesNamesByType("current", partition)
val historyIndices = getIndicesNamesByType("history", partition)
val bulkRequest = client.prepareBulk()
for (uuid <- historyUuids; index <- historyIndices) {
bulkRequest.add(
client
.prepareDelete(index, "infoclone", uuid)
.setVersionType(VersionType.FORCE)
.setVersion(1L)
)
}
for (uuid <- currentUuid; index <- currentIndices) {
bulkRequest.add(
client
.prepareDelete(index, "infoclone", uuid)
.setVersionType(VersionType.FORCE)
.setVersion(1L)
)
}
injectFuture[BulkResponse](bulkRequest.execute(_))
}
}
def purgeByUuidsAndIndexes(uuidsAtIndexes: Vector[(String, String)], partition: String = defaultPartition)(
implicit executionContext: ExecutionContext
): Future[BulkResponse] = {
// empty request -> empty response
if (uuidsAtIndexes.isEmpty)
Future.successful(new BulkResponse(Array[BulkItemResponse](), 0))
else {
val bulkRequest = client.prepareBulk()
uuidsAtIndexes.foreach {
case (uuid, index) =>
bulkRequest.add(
client
.prepareDelete(index, "infoclone", uuid)
.setVersionType(VersionType.FORCE)
.setVersion(1L)
)
}
injectFuture[BulkResponse](bulkRequest.execute(_))
}
}
def purgeByUuidsFromAllIndexes(uuids: Vector[String], partition: String = defaultPartition)(
implicit executionContext: ExecutionContext
): Future[BulkResponse] = {
// empty request -> empty response
if (uuids.isEmpty)
Future.successful(new BulkResponse(Array[BulkItemResponse](), 0))
else {
val currentIndices = getIndicesNamesByType("current", partition)
val historyIndices = getIndicesNamesByType("history", partition)
val bulkRequest = client.prepareBulk()
for (uuid <- uuids; index <- historyIndices) {
bulkRequest.add(
client
.prepareDelete(index, "infoclone", uuid)
.setVersionType(VersionType.FORCE)
.setVersion(1L)
)
}
for (uuid <- uuids; index <- currentIndices) {
bulkRequest.add(
client
.prepareDelete(index, "infoclone", uuid)
.setVersionType(VersionType.FORCE)
.setVersion(1L)
)
}
injectFuture[BulkResponse](bulkRequest.execute(_))
}
}
/**
*
* ONLY USE THIS IF YOU HAVE NO CHOICE, IF YOU CAN SOMEHOW GET UUIDS - PURGING BY UUIDS HAS BETTER PERFORMANCE
*
* Completely erase all versions of infoton with given path
*
* @param path
* @param isRecursive whether to erase all children of this infoton of all versions (found by path)
* @param partition logical name of partition. Used for targeting a specific index
*/
def purgeAll(path: String, isRecursive: Boolean = true, partition: String = defaultPartition)(
implicit executionContext: ExecutionContext
): Future[Boolean] = {
val indices = getIndicesNamesByType("history") ++ getIndicesNamesByType("current")
injectFuture[DeleteByQueryResponse](
client
.prepareDeleteByQuery((indices): _*)
.setTypes("infoclone")
.setQuery(termQuery("path", path))
.execute(_)
).map(x => true)
}
/**
*
* ONLY USE THIS IF YOU HAVE NO CHOICE, IF YOU CAN SOMEHOW GET UUIDS - PURGING BY UUIDS HAS BETTER PERFORMANCE
*
*
* Completely erase the current one, but none of historical versions of infoton with given path
* This makes no sense unless you re-index the current one right away. Currently the only usage is by x-fix-dc
*
* @param path
* @param isRecursive whether to erase all children of this infoton of all versions (found by path)
* @param partition logical name of partition. Used for targeting a specific index
*/
def purgeCurrent(path: String, isRecursive: Boolean = true, partition: String = defaultPartition)(
implicit executionContext: ExecutionContext
): Future[Boolean] = {
val indices = getIndicesNamesByType("current")
injectFuture[DeleteByQueryResponse](
client
.prepareDeleteByQuery((indices): _*)
.setTypes("infoclone")
.setQuery(termQuery("path", path))
.execute(_)
).map(x => true)
}
/**
*
* ONLY USE THIS IF YOU HAVE NO CHOICE, IF YOU CAN SOMEHOW GET UUIDS - PURGING BY UUIDS HAS BETTER PERFORMANCE
*
*
* Completely erase all historical versions of infoton with given path, but not the current one
*
* @param path
* @param isRecursive whether to erase all children of this infoton of all versions (found by path)
* @param partition logical name of partition. Used for targeting a specific index
*/
def purgeHistory(path: String, isRecursive: Boolean = true, partition: String = defaultPartition)(
implicit executionContext: ExecutionContext
): Future[Boolean] = {
val indices = getIndicesNamesByType("history")
injectFuture[DeleteByQueryResponse](
client
.prepareDeleteByQuery((indices): _*)
.setTypes("infoclone")
.setQuery(termQuery("path", path))
.execute(_)
).map(x => true)
}
def getIndicesNamesByType(suffix: String, partition: String = defaultPartition) = {
val currentAliasRes = client.admin.indices().prepareGetAliases(s"${partition}_${suffix}").execute().actionGet()
val indices = currentAliasRes.getAliases.keysIt().asScala.toSeq
indices
}
private def getValueAs[T](hit: SearchHit, fieldName: String): Try[T] = {
Try[T](hit.field(fieldName).getValue[T])
}
private def tryLongThenInt[V](hit: SearchHit,
fieldName: String,
f: Long => V,
default: V,
uuid: String,
pathForLog: String): V =
try {
getValueAs[Long](hit, fieldName) match {
case Success(l) => f(l)
case Failure(e) => {
e.setStackTrace(Array.empty) // no need to fill the logs with redundant stack trace
loger.trace(s"$fieldName not Long (outer), uuid = $uuid, path = $pathForLog", e)
tryInt(hit, fieldName, f, default, uuid)
}
}
} catch {
case e: Throwable => {
loger.trace(s"$fieldName not Long (inner), uuid = $uuid", e)
tryInt(hit, fieldName, f, default, uuid)
}
}
private def tryInt[V](hit: SearchHit, fieldName: String, f: Long => V, default: V, uuid: String): V =
try {
getValueAs[Int](hit, fieldName) match {
case Success(i) => f(i.toLong)
case Failure(e) => {
loger.error(s"$fieldName not Int (outer), uuid = $uuid", e)
default
}
}
} catch {
case e: Throwable => {
loger.error(s"$fieldName not Int (inner), uuid = $uuid", e)
default
}
}
private def esResponseToThinInfotons(
esResponse: org.elasticsearch.action.search.SearchResponse,
includeScore: Boolean
)(implicit executionContext: ExecutionContext): Seq[FTSThinInfoton] = {
if (esResponse.getHits().hits().nonEmpty) {
val hits = esResponse.getHits().hits()
hits.map { hit =>
val path = hit.field("system.path").value.asInstanceOf[String]
val uuid = hit.field("system.uuid").value.asInstanceOf[String]
val lastModified = hit.field("system.lastModified").value.asInstanceOf[String]
val indexTime = tryLongThenInt[Long](hit, "system.indexTime", identity, -1L, uuid, path)
val score = if (includeScore) Some(hit.score()) else None
FTSThinInfoton(path, uuid, lastModified, indexTime, score)
}.toSeq
} else {
Seq.empty
}
}
private def esResponseToInfotons(esResponse: org.elasticsearch.action.search.SearchResponse,
includeScore: Boolean): Vector[Infoton] = {
if (esResponse.getHits().hits().nonEmpty) {
val hits = esResponse.getHits().hits()
hits.map { hit =>
val path = hit.field("system.path").getValue.asInstanceOf[String]
val lastModified = new DateTime(hit.field("system.lastModified").getValue.asInstanceOf[String])
val id = hit.field("system.uuid").getValue.asInstanceOf[String]
val dc = Try(hit.field("system.dc").getValue.asInstanceOf[String]).getOrElse(Settings.dataCenter)
val indexTime = tryLongThenInt[Option[Long]](hit, "system.indexTime", Some.apply[Long], None, id, path)
val score: Option[Map[String, Set[FieldValue]]] =
if (includeScore) Some(Map("$score" -> Set(FExtra(hit.score(), sysQuad)))) else None
hit.field("type").getValue.asInstanceOf[String] match {
case "ObjectInfoton" =>
new ObjectInfoton(path, dc, indexTime, lastModified, score) {
override def uuid = id
override def kind = "ObjectInfoton"
}
case "FileInfoton" =>
val contentLength = tryLongThenInt[Long](hit, "content.length", identity, -1L, id, path)
new FileInfoton(
path,
dc,
indexTime,
lastModified,
score,
Some(FileContent(hit.field("content.mimeType").getValue.asInstanceOf[String], contentLength))
) {
override def uuid = id
override def kind = "FileInfoton"
}
case "LinkInfoton" =>
new LinkInfoton(path,
dc,
indexTime,
lastModified,
score,
hit.field("linkTo").getValue.asInstanceOf[String],
hit.field("linkType").getValue[Int]) {
override def uuid = id
override def kind = "LinkInfoton"
}
case "DeletedInfoton" =>
new DeletedInfoton(path, dc, indexTime, lastModified) {
override def uuid = id
override def kind = "DeletedInfoton"
}
case unknown =>
throw new IllegalArgumentException(s"content returned from elasticsearch is illegal [$unknown]") // TODO change to our appropriate exception
}
}.toVector
} else {
Vector.empty
}
}
/**
* Get Infoton's current version children
*
* @param path
* @param partition logical name of partition. Used for targeting a specific index
* @return list of found infotons
*/
def listChildren(
path: String,
offset: Int = 0,
length: Int = 20,
descendants: Boolean = false,
partition: String = defaultPartition
)(implicit executionContext: ExecutionContext): Future[FTSSearchResponse] = {
search(pathFilter = Some(PathFilter(path, descendants)), paginationParams = PaginationParams(offset, length))
}
trait FieldType {
def unmapped: String
}
case object DateType extends FieldType {
override def unmapped = "date"
}
case object IntType extends FieldType {
override def unmapped = "integer"
}
case object LongType extends FieldType {
override def unmapped = "long"
}
case object FloatType extends FieldType {
override def unmapped = "float"
}
case object DoubleType extends FieldType {
override def unmapped = "double"
}
case object BooleanType extends FieldType {
override def unmapped = "boolean"
}
case object StringType extends FieldType {
override def unmapped = "string"
}
private def fieldType(fieldName: String) = {
fieldName match {
case "system.lastModified" => DateType
case "type" | "system.parent" | "system.path" | "system.uuid" | "system.dc" | "system.quad" | "content.data" |
"content.base64-data" | "content.mimeType" =>
StringType
case "content.length" | "system.indexTime" => LongType
case other =>
other.take(2) match {
case "d$" => DateType
case "i$" => IntType
case "l$" => LongType
case "f$" => FloatType
case "w$" => DoubleType
case "b$" => BooleanType
case _ => StringType
}
}
}
private def applyFiltersToRequest(request: SearchRequestBuilder,
pathFilter: Option[PathFilter] = None,
fieldFilterOpt: Option[FieldFilter] = None,
datesFilter: Option[DatesFilter] = None,
withHistory: Boolean = false,
withDeleted: Boolean = false,
preferFilter: Boolean = false) = {
val boolFilterBuilder: BoolFilterBuilder = boolFilter()
pathFilter.foreach { pf =>
if (pf.path.equals("/")) {
if (!pf.descendants) {
boolFilterBuilder.must(termFilter("parent", "/"))
}
} else {
boolFilterBuilder.must(termFilter(if (pf.descendants) "parent_hierarchy" else "parent", pf.path))
}
}
datesFilter.foreach { df =>
boolFilterBuilder.must(
rangeFilter("lastModified")
.from(df.from.map[Any](_.getMillis).orNull)
.to(df.to.map[Any](_.getMillis).orNull)
)
}
val fieldsOuterQueryBuilder = boolQuery()
fieldFilterOpt.foreach { ff =>
applyFieldFilter(ff, fieldsOuterQueryBuilder)
}
def applyFieldFilter(fieldFilter: FieldFilter, outerQueryBuilder: BoolQueryBuilder): Unit = {
fieldFilter match {
case SingleFieldFilter(fieldOperator, valueOperator, name, valueOpt) =>
if (valueOpt.isDefined) {
val value = valueOpt.get
val exactFieldName = fieldType(name) match {
case StringType if (!name.startsWith("system")) => s"fields.${name}.%exact"
case _ => name
}
val valueQuery = valueOperator match {
case Contains => matchPhraseQuery(name, value)
case Equals => termQuery(exactFieldName, value)
case GreaterThan => rangeQuery(exactFieldName).gt(value)
case GreaterThanOrEquals => rangeQuery(exactFieldName).gte(value)
case LessThan => rangeQuery(exactFieldName).lt(value)
case LessThanOrEquals => rangeQuery(exactFieldName).lte(value)
case Like => fuzzyLikeThisFieldQuery(name).likeText(value)
}
fieldOperator match {
case Must => outerQueryBuilder.must(valueQuery)
case MustNot => outerQueryBuilder.mustNot(valueQuery)
case Should => outerQueryBuilder.should(valueQuery)
}
} else {
fieldOperator match {
case Must => outerQueryBuilder.must(filteredQuery(matchAllQuery(), existsFilter(name)))
case MustNot => outerQueryBuilder.must(filteredQuery(matchAllQuery(), missingFilter(name)))
case _ => outerQueryBuilder.should(filteredQuery(matchAllQuery(), existsFilter(name)))
}
}
case MultiFieldFilter(fieldOperator, filters) =>
val innerQueryBuilder = boolQuery()
filters.foreach { ff =>
applyFieldFilter(ff, innerQueryBuilder)
}
fieldOperator match {
case Must => outerQueryBuilder.must(innerQueryBuilder)
case MustNot => outerQueryBuilder.mustNot(innerQueryBuilder)
case Should => outerQueryBuilder.should(innerQueryBuilder)
}
}
}
// val preferFilter = false
//
// (fieldFilterOpt.nonEmpty, pathFilter.isDefined || datesFilter.isDefined, preferFilter) match {
// case (true,_, false) =>
// val query = filteredQuery(fieldsOuterQueryBuilder, boolFilterBuilder)
// request.setQuery(query)
// case (true, _, true) =>
// val query = filteredQuery(matchAllQuery(), andFilter(queryFilter(fieldsOuterQueryBuilder), boolFilterBuilder))
// request.setQuery(query)
// case (false, true, _) =>
// request.setQuery(filteredQuery(matchAllQuery(), boolFilterBuilder))
// case (false, false, _) => // this option is not possible due to the validation at the beginning of the method
// }
val query = (fieldsOuterQueryBuilder.hasClauses, boolFilterBuilder.hasClauses) match {
case (true, true) =>
filteredQuery(fieldsOuterQueryBuilder, boolFilterBuilder)
case (false, true) =>
filteredQuery(matchAllQuery(), boolFilterBuilder)
case (true, false) =>
if (preferFilter)
filteredQuery(matchAllQuery(), queryFilter(fieldsOuterQueryBuilder))
else
fieldsOuterQueryBuilder
case _ => matchAllQuery()
}
request.setQuery(query)
}
implicit def sortOrder2SortOrder(fieldSortOrder: FieldSortOrder): SortOrder = {
fieldSortOrder match {
case Desc => SortOrder.DESC
case Asc => SortOrder.ASC
}
}
def aggregate(
pathFilter: Option[PathFilter] = None,
fieldFilter: Option[FieldFilter],
datesFilter: Option[DatesFilter] = None,
paginationParams: PaginationParams = DefaultPaginationParams,
aggregationFilters: Seq[AggregationFilter],
withHistory: Boolean = false,
partition: String = defaultPartition,
debugInfo: Boolean = false
)(implicit executionContext: ExecutionContext): Future[AggregationsResponse] = {
val indices = (partition + "_current") :: (withHistory match {
case true => partition + "_history" :: Nil
case false => Nil
})
val request = client
.prepareSearch(indices: _*)
.setTypes("infoclone")
.setFrom(paginationParams.offset)
.setSize(paginationParams.length)
.setSearchType(SearchType.COUNT)
if (pathFilter.isDefined || fieldFilter.nonEmpty || datesFilter.isDefined) {
applyFiltersToRequest(request, pathFilter, fieldFilter, datesFilter)
}
var counter = 0
val filtersMap: collection.mutable.Map[String, AggregationFilter] = collection.mutable.Map.empty
def filterToBuilder(filter: AggregationFilter): AbstractAggregationBuilder = {
implicit def fieldValueToValue(fieldValue: Field) = fieldValue.operator match {
case AnalyzedField => fieldValue.value
case NonAnalyzedField => s"infoclone.fields.${fieldValue.value}.%exact"
}
val name = filter.name + "_" + counter
counter += 1
filtersMap.put(name, filter)
val aggBuilder = filter match {
case TermAggregationFilter(_, field, size, _) =>
AggregationBuilders.terms(name).field(field).size(size)
case StatsAggregationFilter(_, field) =>
AggregationBuilders.stats(name).field(field)
case HistogramAggregationFilter(_, field, interval, minDocCount, extMin, extMax, _) =>
val eMin: java.lang.Long = extMin.getOrElse(null).asInstanceOf[java.lang.Long]
val eMax: java.lang.Long = extMax.getOrElse(null).asInstanceOf[java.lang.Long]
AggregationBuilders
.histogram(name)
.field(field)
.interval(interval)
.minDocCount(minDocCount)
.extendedBounds(eMin, eMax)
case SignificantTermsAggregationFilter(_, field, backGroundTermOpt, minDocCount, size, _) =>
val sigTermsBuilder =
AggregationBuilders.significantTerms(name).field(field).minDocCount(minDocCount).size(size)
backGroundTermOpt.foreach { backGroundTerm =>
sigTermsBuilder.backgroundFilter(termFilter(backGroundTerm._1, backGroundTerm._2))
}
sigTermsBuilder
case CardinalityAggregationFilter(_, field, precisionThresholdOpt) =>
val cardinalityAggBuilder = AggregationBuilders.cardinality(name).field(field)
precisionThresholdOpt.foreach { precisionThreshold =>
cardinalityAggBuilder.precisionThreshold(precisionThreshold)
}
cardinalityAggBuilder
}
if (filter.isInstanceOf[BucketAggregationFilter]) {
filter.asInstanceOf[BucketAggregationFilter].subFilters.foreach { subFilter =>
aggBuilder
.asInstanceOf[AggregationBuilder[_ <: AggregationBuilder[_ <: Any]]]
.subAggregation(filterToBuilder(subFilter))
}
}
aggBuilder
}
aggregationFilters.foreach { filter =>
request.addAggregation(filterToBuilder(filter))
}
val searchQueryStr = if (debugInfo) Some(request.toString) else None
val resFuture = injectFuture[SearchResponse](request.execute(_))
def esAggsToOurAggs(aggregations: Aggregations, debugInfo: Option[String] = None): AggregationsResponse = {
AggregationsResponse(
aggregations.asScala.map {
case ta: InternalTerms =>
TermsAggregationResponse(
filtersMap.get(ta.getName).get.asInstanceOf[TermAggregationFilter],
ta.getBuckets.asScala.map { b =>
val subAggregations: Option[AggregationsResponse] =
b.asInstanceOf[HasAggregations].getAggregations match {
case null => None
case subAggs => if (subAggs.asList().size() > 0) Some(esAggsToOurAggs(subAggs)) else None
}
Bucket(FieldValue(b.getKey), b.getDocCount, subAggregations)
}.toSeq
)
case sa: InternalStats =>
StatsAggregationResponse(
filtersMap.get(sa.getName).get.asInstanceOf[StatsAggregationFilter],
sa.getCount,
sa.getMin,
sa.getMax,
sa.getAvg,
sa.getSum
)
case ca: InternalCardinality =>
CardinalityAggregationResponse(filtersMap.get(ca.getName).get.asInstanceOf[CardinalityAggregationFilter],
ca.getValue)
case ha: Histogram =>
HistogramAggregationResponse(
filtersMap.get(ha.getName).get.asInstanceOf[HistogramAggregationFilter],
ha.getBuckets.asScala.map { b =>
val subAggregations: Option[AggregationsResponse] =
b.asInstanceOf[HasAggregations].getAggregations match {
case null => None
case subAggs => Some(esAggsToOurAggs(subAggs))
}
Bucket(FieldValue(b.getKeyAsNumber.longValue()), b.getDocCount, subAggregations)
}.toSeq
)
case sta: InternalSignificantTerms =>
val buckets = sta.getBuckets.asScala.toSeq
SignificantTermsAggregationResponse(
filtersMap.get(sta.getName).get.asInstanceOf[SignificantTermsAggregationFilter],
if (!buckets.isEmpty) buckets(0).getSubsetSize else 0,
buckets.map { b =>
val subAggregations: Option[AggregationsResponse] =
b.asInstanceOf[HasAggregations].getAggregations match {
case null => None
case subAggs => Some(esAggsToOurAggs(subAggs))
}
SignificantTermsBucket(FieldValue(b.getKey),
b.getDocCount,
b.getSignificanceScore,
b.getSubsetDf,
subAggregations)
}.toSeq
)
case _ => ???
}.toSeq,
debugInfo
)
}
resFuture.map { searchResponse =>
esAggsToOurAggs(searchResponse.getAggregations, searchQueryStr)
}
}
def search(
pathFilter: Option[PathFilter] = None,
fieldsFilter: Option[FieldFilter] = None,
datesFilter: Option[DatesFilter] = None,
paginationParams: PaginationParams = DefaultPaginationParams,
sortParams: SortParam = SortParam.empty,
withHistory: Boolean = false,
withDeleted: Boolean = false,
partition: String = defaultPartition,
debugInfo: Boolean = false,
timeout: Option[Duration] = None
)(implicit executionContext: ExecutionContext, logger: Logger = loger): Future[FTSSearchResponse] = {
logger.debug(
s"Search request: $pathFilter, $fieldsFilter, $datesFilter, $paginationParams, $sortParams, $withHistory, $partition, $debugInfo"
)
if (pathFilter.isEmpty && fieldsFilter.isEmpty && datesFilter.isEmpty) {
throw new IllegalArgumentException("at least one of the filters is needed in order to search")
}
val indices = (partition + "_current") :: (if (withHistory) partition + "_history" :: Nil else Nil)
val fields = "type" :: "system.path" :: "system.uuid" :: "system.lastModified" :: "content.length" ::
"content.mimeType" :: "linkTo" :: "linkType" :: "system.dc" :: "system.indexTime" :: "system.quad" :: Nil
val request = client
.prepareSearch(indices: _*)
.setTypes("infoclone")
.addFields(fields: _*)
.setFrom(paginationParams.offset)
.setSize(paginationParams.length)
sortParams match {
case NullSortParam => // don't sort.
case FieldSortParams(fsp) if fsp.isEmpty => request.addSort("system.lastModified", SortOrder.DESC)
case FieldSortParams(fsp) =>
fsp.foreach {
case (name, order) => {
val unmapped = name match {
// newly added sys fields should be stated explicitly since not existing in old indices
case "system.indexTime" => "long"
case "system.dc" => "string"
case "system.quad" => "string"
case _ => {
if (name.startsWith("system.") || name.startsWith("content.")) null
else
name.take(2) match {
case "d$" => "date"
case "i$" => "integer"
case "l$" => "long"
case "f$" => "float"
case "w$" => "double"
case "b$" => "boolean"
case _ => "string"
}
}
}
val uname = if (unmapped == "string" && name != "type") s"fields.${name}.%exact" else name
request.addSort(fieldSort(uname).order(order).unmappedType(unmapped))
}
}
}
applyFiltersToRequest(request, pathFilter, fieldsFilter, datesFilter, withHistory, withDeleted)
val searchQueryStr = if (debugInfo) Some(request.toString) else None
logger.debug(s"^^^^^^^(**********************\\n\\n request: ${request.toString}\\n\\n")
val resFuture = timeout match {
case Some(t) => injectFuture[SearchResponse](request.execute, t)
case None => injectFuture[SearchResponse](request.execute)
}
resFuture.map { response =>
FTSSearchResponse(
response.getHits.getTotalHits,
paginationParams.offset,
response.getHits.getHits.size,
esResponseToInfotons(response, sortParams eq NullSortParam),
searchQueryStr
)
}
}
def thinSearch(
pathFilter: Option[PathFilter] = None,
fieldsFilter: Option[FieldFilter] = None,
datesFilter: Option[DatesFilter] = None,
paginationParams: PaginationParams = DefaultPaginationParams,
sortParams: SortParam = SortParam.empty,
withHistory: Boolean = false,
withDeleted: Boolean,
partition: String = defaultPartition,
debugInfo: Boolean = false,
timeout: Option[Duration] = None
)(implicit executionContext: ExecutionContext, logger: Logger = loger): Future[FTSThinSearchResponse] = {
logger.debug(
s"Search request: $pathFilter, $fieldsFilter, $datesFilter, $paginationParams, $sortParams, $withHistory, $partition, $debugInfo"
)
if (pathFilter.isEmpty && fieldsFilter.isEmpty && datesFilter.isEmpty) {
throw new IllegalArgumentException("at least one of the filters is needed in order to search")
}
val indices = (partition + "_current") :: (withHistory match {
case true => partition + "_history" :: Nil
case false => Nil
})
val fields = "system.path" :: "system.uuid" :: "system.lastModified" :: "system.indexTime" :: Nil
val request = client
.prepareSearch(indices: _*)
.setTypes("infoclone")
.addFields(fields: _*)
.setFrom(paginationParams.offset)
.setSize(paginationParams.length)
sortParams match {
case NullSortParam => // don't sort.
case FieldSortParams(fsp) if fsp.isEmpty => request.addSort("system.lastModified", SortOrder.DESC)
case FieldSortParams(fsp) =>
fsp.foreach {
case (name, order) => {
val unmapped = name match {
// newly added sys fields should be stated explicitly since not existing in old indices
case "system.indexTime" => "long"
case "system.dc" => "string"
case "system.quad" => "string"
case _ => {
if (name.startsWith("system.") || name.startsWith("content.")) null
else
name.take(2) match {
case "d$" => "date"
case "i$" => "integer"
case "l$" => "long"
case "f$" => "float"
case "w$" => "double"
case "b$" => "boolean"
case _ => "string"
}
}
}
request.addSort(fieldSort(name).order(order).unmappedType(unmapped))
}
}
}
applyFiltersToRequest(request, pathFilter, fieldsFilter, datesFilter, withHistory, withDeleted)
var oldTimestamp = 0L
if (debugInfo) {
oldTimestamp = System.currentTimeMillis()
logger.debug(s"thinSearch debugInfo request ($oldTimestamp): ${request.toString}")
}
val resFuture = timeout match {
case Some(t) => injectFuture[SearchResponse](request.execute, t)
case None => injectFuture[SearchResponse](request.execute)
}
val searchQueryStr = if (debugInfo) Some(request.toString) else None
resFuture
.map { response =>
if (debugInfo)
logger.debug(
s"thinSearch debugInfo response: ($oldTimestamp - ${System.currentTimeMillis()}): ${response.toString}"
)
FTSThinSearchResponse(
response.getHits.getTotalHits,
paginationParams.offset,
response.getHits.getHits.size,
esResponseToThinInfotons(response, sortParams eq NullSortParam),
searchQueryStr = searchQueryStr
)
}
.andThen {
case Failure(err) =>
logger.error(
s"thinSearch failed, time took: [$oldTimestamp - ${System.currentTimeMillis()}], request:\\n${request.toString}"
)
}
}
override def getLastIndexTimeFor(
dc: String,
withHistory: Boolean,
partition: String = defaultPartition,
fieldFilters: Option[FieldFilter]
)(implicit executionContext: ExecutionContext): Future[Option[Long]] = {
val partitionsToSearch =
if (withHistory) List(partition + "_current", partition + "_history")
else List(partition + "_current")
val request = client
.prepareSearch(partitionsToSearch: _*)
.setTypes("infoclone")
.addFields("system.indexTime")
.setSize(1)
.addSort("system.indexTime", SortOrder.DESC)
val filtersSeq: List[FieldFilter] = List(
SingleFieldFilter(Must, Equals, "system.dc", Some(dc)), //ONLY DC
SingleFieldFilter(MustNot, Contains, "system.parent.parent_hierarchy", Some("/meta/")) //NO META
)
applyFiltersToRequest(
request,
None,
Some(MultiFieldFilter(Must, fieldFilters.fold(filtersSeq)(filtersSeq.::))),
None,
withHistory = withHistory
)
injectFuture[SearchResponse](request.execute).map { sr =>
val hits = sr.getHits().hits()
if (hits.length < 1) None
else {
hits.headOption.map(_.field("system.indexTime").getValue.asInstanceOf[Long])
}
}
}
private def startShardScroll(
pathFilter: Option[PathFilter] = None,
fieldsFilter: Option[FieldFilter] = None,
datesFilter: Option[DatesFilter] = None,
withHistory: Boolean,
withDeleted: Boolean,
offset: Int,
length: Int,
scrollTTL: Long = scrollTTL,
index: String,
nodeId: String,
shard: Int
)(implicit executionContext: ExecutionContext): Future[FTSStartScrollResponse] = {
val fields = "type" :: "system.path" :: "system.uuid" :: "system.lastModified" :: "content.length" ::
"content.mimeType" :: "linkTo" :: "linkType" :: "system.dc" :: "system.indexTime" :: "system.quad" :: Nil
val request = clients
.get(nodeId)
.getOrElse(client)
.prepareSearch(index)
.setTypes("infoclone")
.addFields(fields: _*)
.setSearchType(SearchType.SCAN)
.setScroll(TimeValue.timeValueSeconds(scrollTTL))
.setSize(length)
.setFrom(offset)
.setPreference(s"_shards:$shard;_only_node:$nodeId")
if (!pathFilter.isDefined && fieldsFilter.isEmpty && !datesFilter.isDefined) {
request.setPostFilter(matchAllFilter())
} else {
applyFiltersToRequest(request, pathFilter, fieldsFilter, datesFilter, withHistory, withDeleted)
}
val scrollResponseFuture = injectFuture[SearchResponse](request.execute(_))
scrollResponseFuture.map { scrollResponse =>
FTSStartScrollResponse(scrollResponse.getHits.totalHits, scrollResponse.getScrollId, Some(nodeId))
}
}
def startSuperScroll(
pathFilter: Option[PathFilter] = None,
fieldsFilter: Option[FieldFilter] = None,
datesFilter: Option[DatesFilter] = None,
paginationParams: PaginationParams = DefaultPaginationParams,
scrollTTL: Long = scrollTTL,
withHistory: Boolean,
withDeleted: Boolean
)(implicit executionContext: ExecutionContext): Seq[() => Future[FTSStartScrollResponse]] = {
val aliases = if (withHistory) List("cmwell_current", "cmwell_history") else List("cmwell_current")
val ssr = client.admin().cluster().prepareSearchShards(aliases: _*).setTypes("infoclone").execute().actionGet()
val targetedShards = ssr.getGroups.flatMap { shardGroup =>
shardGroup.getShards.filter(_.primary()).map { shard =>
(shard.index(), shard.currentNodeId(), shard.id())
}
}
targetedShards.map {
case (index, node, shard) =>
() =>
startShardScroll(pathFilter,
fieldsFilter,
datesFilter,
withHistory,
withDeleted,
paginationParams.offset,
paginationParams.length,
scrollTTL,
index,
node,
shard)
}
}
/**
*
* @param pathFilter
* @param fieldsFilter
* @param datesFilter
* @param paginationParams
* @param scrollTTL
* @param withHistory
* @param indexNames indices to search on, empty means all.
* @param onlyNode ES NodeID to restrict search to ("local" means local node), or None for no restriction
* @return
*/
def startScroll(
pathFilter: Option[PathFilter] = None,
fieldsFilter: Option[FieldFilter] = None,
datesFilter: Option[DatesFilter] = None,
paginationParams: PaginationParams = DefaultPaginationParams,
scrollTTL: Long = scrollTTL,
withHistory: Boolean = false,
withDeleted: Boolean,
indexNames: Seq[String] = Seq.empty,
onlyNode: Option[String] = None,
partition: String,
debugInfo: Boolean
)(implicit executionContext: ExecutionContext, logger: Logger = loger): Future[FTSStartScrollResponse] = {
logger.debug(s"StartScroll request: $pathFilter, $fieldsFilter, $datesFilter, $paginationParams, $withHistory")
if (partition != defaultPartition) {
logger.warn("old implementation ignores partition parameter")
}
val indices = {
if (indexNames.nonEmpty) indexNames
else
"cmwell_current" :: (withHistory match {
case true => "cmwell_history" :: Nil
case false => Nil
})
}
val fields = "type" :: "system.path" :: "system.uuid" :: "system.lastModified" :: "content.length" ::
"content.mimeType" :: "linkTo" :: "linkType" :: "system.dc" :: "system.indexTime" :: "system.quad" :: Nil
// since in ES scroll API, size is per shard, we need to convert our paginationParams.length parameter to be per shard
// We need to find how many shards are relevant for this query. For that we'll issue a fake search request
val fakeRequest = client.prepareSearch(indices: _*).setTypes("infoclone").addFields(fields: _*)
if (pathFilter.isEmpty && fieldsFilter.isEmpty && datesFilter.isEmpty) {
fakeRequest.setPostFilter(matchAllFilter())
} else {
applyFiltersToRequest(fakeRequest, pathFilter, fieldsFilter, datesFilter)
}
val fakeResponse = fakeRequest.execute().get()
val relevantShards = fakeResponse.getSuccessfulShards
// rounded to lowest multiplacations of shardsperindex or to mimimum of 1
val infotonsPerShard = (paginationParams.length / relevantShards).max(1)
val request = client
.prepareSearch(indices: _*)
.setTypes("infoclone")
.addFields(fields: _*)
.setSearchType(SearchType.SCAN)
.setScroll(TimeValue.timeValueSeconds(scrollTTL))
.setSize(infotonsPerShard)
.setFrom(paginationParams.offset)
if (onlyNode.isDefined) {
request.setPreference(s"_only_node_primary:${onlyNode.map { case "local" => localNodeId; case n => n }.get}")
}
if (pathFilter.isEmpty && fieldsFilter.isEmpty && datesFilter.isEmpty) {
request.setPostFilter(matchAllFilter())
} else {
applyFiltersToRequest(request, pathFilter, fieldsFilter, datesFilter, withHistory, withDeleted)
}
val scrollResponseFuture = injectFuture[SearchResponse](request.execute(_))
val searchQueryStr = if (debugInfo) Some(request.toString) else None
scrollResponseFuture.map { scrollResponse =>
FTSStartScrollResponse(scrollResponse.getHits.totalHits,
scrollResponse.getScrollId,
searchQueryStr = searchQueryStr)
}
}
def startSuperMultiScroll(
pathFilter: Option[PathFilter] = None,
fieldsFilter: Option[FieldFilter] = None,
datesFilter: Option[DatesFilter] = None,
paginationParams: PaginationParams = DefaultPaginationParams,
scrollTTL: Long = scrollTTL,
withHistory: Boolean = false,
withDeleted: Boolean,
partition: String
)(implicit executionContext: ExecutionContext, logger: Logger): Seq[Future[FTSStartScrollResponse]] = {
logger.debug(s"StartMultiScroll request: $pathFilter, $fieldsFilter, $datesFilter, $paginationParams, $withHistory")
if (partition != defaultPartition) {
logger.warn("old implementation ignores partition parameter")
}
def indicesNames(indexName: String): Seq[String] = {
val currentAliasRes = client.admin.indices().prepareGetAliases(indexName).execute().actionGet()
val indices = currentAliasRes.getAliases.keysIt().asScala.toSeq
indices
}
def dataNodeIDs = {
client
.admin()
.cluster()
.prepareNodesInfo()
.execute()
.actionGet()
.getNodesMap
.asScala
.filter {
case (id, node) =>
node.getNode.isDataNode
}
.map { _._1 }
.toSeq
}
val indices = indicesNames("cmwell_current") ++ (if (withHistory) indicesNames("_history") else Nil)
indices.flatMap { indexName =>
dataNodeIDs.map { nodeId =>
startScroll(pathFilter,
fieldsFilter,
datesFilter,
paginationParams,
scrollTTL,
withHistory,
withDeleted,
Seq(indexName),
Some(nodeId))
}
}
}
def startMultiScroll(
pathFilter: Option[PathFilter] = None,
fieldsFilter: Option[FieldFilter] = None,
datesFilter: Option[DatesFilter] = None,
paginationParams: PaginationParams = DefaultPaginationParams,
scrollTTL: Long = scrollTTL,
withHistory: Boolean = false,
withDeleted: Boolean,
partition: String
)(implicit executionContext: ExecutionContext, logger: Logger = loger): Seq[Future[FTSStartScrollResponse]] = {
logger.debug(s"StartMultiScroll request: $pathFilter, $fieldsFilter, $datesFilter, $paginationParams, $withHistory")
if (partition != defaultPartition) {
logger.warn("old implementation ignores partition parameter")
}
def indicesNames(indexName: String): Seq[String] = {
val currentAliasRes = client.admin.indices().prepareGetAliases(indexName).execute().actionGet()
val indices = currentAliasRes.getAliases.keysIt().asScala.toSeq
indices
}
val indices = indicesNames("cmwell_current") ++ (if (withHistory) indicesNames("_history") else Nil)
indices.map { indexName =>
startScroll(pathFilter,
fieldsFilter,
datesFilter,
paginationParams,
scrollTTL,
withHistory,
withDeleted,
Seq(indexName))
}
}
def scroll(scrollId: String, scrollTTL: Long, nodeId: Option[String])(
implicit executionContext: ExecutionContext,
logger: Logger = loger
): Future[FTSScrollResponse] = {
logger.debug(s"Scroll request: $scrollId, $scrollTTL")
val clint = nodeId.map { clients(_) }.getOrElse(client)
val scrollResponseFuture = injectFuture[SearchResponse](
clint.prepareSearchScroll(scrollId).setScroll(TimeValue.timeValueSeconds(scrollTTL)).execute(_)
)
val p = Promise[FTSScrollResponse]()
scrollResponseFuture.onComplete {
case Failure(exception) => p.failure(exception)
case Success(scrollResponse) => {
val status = scrollResponse.status().getStatus
if (status >= 400) p.failure(new Exception(s"bad scroll response: $scrollResponse"))
else {
if (status != 200)
logger.warn(s"scroll($scrollId, $scrollTTL, $nodeId) resulted with status[$status] != 200: $scrollResponse")
p.complete(Try(esResponseToInfotons(scrollResponse, includeScore = false)).map { infotons =>
FTSScrollResponse(scrollResponse.getHits.getTotalHits, scrollResponse.getScrollId, infotons)
})
}
}
}
p.future
}
def rInfo(
path: String,
scrollTTL: Long = scrollTTL,
paginationParams: PaginationParams = DefaultPaginationParams,
withHistory: Boolean = false,
partition: String = defaultPartition
)(implicit executionContext: ExecutionContext): Future[Source[Vector[(Long, String, String)], NotUsed]] = {
val indices = (partition + "_current") :: (if (withHistory) partition + "_history" :: Nil else Nil)
val fields = "system.uuid" :: "system.lastModified" :: Nil // "system.indexTime" :: Nil // TODO: fix should add indexTime, so why not pull it now?
// since in ES scroll API, size is per shard, we need to convert our paginationParams.length parameter to be per shard
// We need to find how many shards are relevant for this query. For that we'll issue a fake search request
val fakeRequest = client.prepareSearch(indices: _*).setTypes("infoclone").addFields(fields: _*)
fakeRequest.setQuery(QueryBuilders.matchQuery("path", path))
injectFuture[SearchResponse](al => fakeRequest.execute(al)).flatMap { fakeResponse =>
val relevantShards = fakeResponse.getSuccessfulShards
// rounded to lowest multiplacations of shardsperindex or to mimimum of 1
val infotonsPerShard = (paginationParams.length / relevantShards).max(1)
val request = client
.prepareSearch(indices: _*)
.setTypes("infoclone")
.addFields(fields: _*)
.setSearchType(SearchType.SCAN)
.setScroll(TimeValue.timeValueSeconds(scrollTTL))
.setSize(infotonsPerShard)
.setQuery(QueryBuilders.matchQuery("path", path))
val scrollResponseFuture = injectFuture[SearchResponse](al => request.execute(al))
scrollResponseFuture.map { scrollResponse =>
if (scrollResponse.getHits.totalHits == 0) Source.empty[Vector[(Long, String, String)]]
else
Source.unfoldAsync(scrollResponse.getScrollId) { scrollID =>
injectFuture[SearchResponse]({ al =>
client
.prepareSearchScroll(scrollID)
.setScroll(TimeValue.timeValueSeconds(scrollTTL))
.execute(al)
}, FiniteDuration(30, SECONDS)).map { scrollResponse =>
val info = rExtractInfo(scrollResponse)
if (info.isEmpty) None
else Some(scrollResponse.getScrollId -> info)
}
}
}
}
}
override def latestIndexNameAndCount(prefix: String): Option[(String, Long)] = ???
private def rExtractInfo(
esResponse: org.elasticsearch.action.search.SearchResponse
): Vector[(Long, String, String)] = {
val sHits = esResponse.getHits.hits()
if (sHits.isEmpty) Vector.empty
else {
val hits = esResponse.getHits.hits()
hits.map { hit =>
val uuid = hit.field("system.uuid").getValue.asInstanceOf[String]
val lastModified = new DateTime(hit.field("system.lastModified").getValue.asInstanceOf[String]).getMillis
val index = hit.getIndex
(lastModified, uuid, index)
}(collection.breakOut)
}
}
def info(
path: String,
paginationParams: PaginationParams = DefaultPaginationParams,
withHistory: Boolean = false,
partition: String = defaultPartition
)(implicit executionContext: ExecutionContext): Future[Vector[(String, String)]] = {
val indices = (partition + "_current") :: (if (withHistory) partition + "_history" :: Nil else Nil)
// val fields = "system.path" :: "system.uuid" :: "system.lastModified" :: Nil
val request = client
.prepareSearch(indices: _*)
.setTypes("infoclone")
.addFields("system.uuid")
.setFrom(paginationParams.offset)
.setSize(paginationParams.length)
val qb: QueryBuilder = QueryBuilders.matchQuery("path", path)
request.setQuery(qb)
val resFuture = injectFuture[SearchResponse](request.execute)
resFuture.map { response =>
extractInfo(response)
}
}
val bo = collection.breakOut[Array[SearchHit], (String, Long, String), Vector[(String, Long, String)]]
def uinfo(uuid: String,
partition: String)(implicit executionContext: ExecutionContext): Future[Vector[(String, Long, String)]] = {
val indices = (partition + "_current") :: (partition + "_history") :: Nil
val request = client.prepareSearch(indices: _*).setTypes("infoclone").setFetchSource(true).setVersion(true)
val qb: QueryBuilder = QueryBuilders.matchQuery("uuid", uuid)
request.setQuery(qb)
injectFuture[SearchResponse](request.execute).map { response =>
val hits = response.getHits.hits()
hits.map { hit =>
(hit.getIndex, hit.getVersion, hit.getSourceAsString)
}(bo)
}
}
private def extractInfo(esResponse: org.elasticsearch.action.search.SearchResponse): Vector[(String, String)] = {
if (esResponse.getHits.hits().nonEmpty) {
val hits = esResponse.getHits.hits()
hits.map { hit =>
val uuid = hit.field("system.uuid").getValue.asInstanceOf[String]
val index = hit.getIndex
(uuid, index)
}.toVector
} else Vector.empty
}
private def injectFuture[A](f: ActionListener[A] => Unit, timeout: Duration = FiniteDuration(10, SECONDS))(
implicit executionContext: ExecutionContext
) = {
val p = Promise[A]()
val timeoutTask = TimeoutScheduler.tryScheduleTimeout(p, timeout)
f(new ActionListener[A] {
def onFailure(t: Throwable): Unit = {
timeoutTask.cancel()
loger.error(
"Exception from ElasticSearch. %s\\n%s".format(t.getLocalizedMessage, t.getStackTrace().mkString("", EOL, EOL))
)
p.tryFailure(t)
}
def onResponse(res: A): Unit = {
timeoutTask.cancel()
loger.debug("Response from ElasticSearch:\\n%s".format(res.toString))
p.trySuccess(res)
}
})
p.future
}
def countSearchOpenContexts(): Array[(String, Long)] = {
val response = client.admin().cluster().prepareNodesStats().setIndices(true).execute().get()
response.getNodes
.map { nodeStats =>
nodeStats.getHostname -> nodeStats.getIndices.getSearch.getOpenContexts
}
.sortBy(_._1)
}
}
object TimeoutScheduler {
val timer = new HashedWheelTimer(10, TimeUnit.MILLISECONDS)
def scheduleTimeout(promise: Promise[_], after: Duration) = {
timer.newTimeout(
new TimerTask {
override def run(timeout: Timeout) = {
promise.failure(new TimeoutException("Operation timed out after " + after.toMillis + " millis"))
}
},
after.toNanos,
TimeUnit.NANOSECONDS
)
}
def tryScheduleTimeout[T](promise: Promise[T], after: Duration) = {
timer.newTimeout(
new TimerTask {
override def run(timeout: Timeout) = {
promise.tryFailure(new TimeoutException("Operation timed out after " + after.toMillis + " millis"))
}
},
after.toNanos,
TimeUnit.NANOSECONDS
)
}
}
object TimeoutFuture {
def withTimeout[T](fut: Future[T], after: Duration)(implicit executionContext: ExecutionContext) = {
val prom = Promise[T]()
val timeout = TimeoutScheduler.scheduleTimeout(prom, after)
val combinedFut = Future.firstCompletedOf(List(fut, prom.future))
fut.onComplete { case result => timeout.cancel() }
combinedFut
}
}
sealed abstract class InfotonToIndex(val infoton: Infoton)
case class CurrentInfotonToIndex(override val infoton: Infoton) extends InfotonToIndex(infoton)
case class PreviousInfotonToIndex(override val infoton: Infoton) extends InfotonToIndex(infoton)
case class DeletedInfotonToIndex(override val infoton: Infoton) extends InfotonToIndex(infoton)
sealed abstract class FieldSortOrder
case object Desc extends FieldSortOrder
case object Asc extends FieldSortOrder
sealed abstract class SortParam
object SortParam {
type FieldSortParam = (String, FieldSortOrder)
val empty = FieldSortParams(Nil)
def apply(sortParam: (String, FieldSortOrder)*) = new FieldSortParams(sortParam.toList)
val indexTimeAscending = new FieldSortParams(List("system.indexTime" -> Asc))
val indexTimeDescending = new FieldSortParams(List("system.indexTime" -> Asc))
}
case class FieldSortParams(fieldSortParams: List[SortParam.FieldSortParam]) extends SortParam
case object NullSortParam extends SortParam
sealed abstract class FieldOperator {
def applyTo(softBoolean: SoftBoolean): SoftBoolean
}
case object Must extends FieldOperator {
override def applyTo(softBoolean: SoftBoolean): SoftBoolean = softBoolean match {
case SoftFalse => False
case unsoftVal => unsoftVal
}
}
case object Should extends FieldOperator {
override def applyTo(softBoolean: SoftBoolean): SoftBoolean = softBoolean match {
case False => SoftFalse
case softOrTrue => softOrTrue
}
}
case object MustNot extends FieldOperator {
override def applyTo(softBoolean: SoftBoolean): SoftBoolean = softBoolean match {
case True => False
case _ => True
}
}
sealed abstract class ValueOperator
case object Contains extends ValueOperator
case object Equals extends ValueOperator
case object GreaterThan extends ValueOperator
case object GreaterThanOrEquals extends ValueOperator
case object LessThan extends ValueOperator
case object LessThanOrEquals extends ValueOperator
case object Like extends ValueOperator
case class PathFilter(path: String, descendants: Boolean)
sealed trait FieldFilter {
def fieldOperator: FieldOperator
def filter(i: Infoton): SoftBoolean
}
case class SingleFieldFilter(override val fieldOperator: FieldOperator = Must,
valueOperator: ValueOperator,
name: String,
value: Option[String])
extends FieldFilter {
def filter(i: Infoton): SoftBoolean = {
require(valueOperator != Like, s"unsupported ValueOperator: $valueOperator")
val valOp: (FieldValue, String) => Boolean = valueOperator match {
case Contains =>
(infotonValue, inputValue) =>
infotonValue.value.toString.contains(inputValue)
case Equals =>
(infotonValue, inputValue) =>
infotonValue.compareToString(inputValue).map(0.==).getOrElse(false)
case GreaterThan =>
(infotonValue, inputValue) =>
infotonValue.compareToString(inputValue).map(0.<).getOrElse(false)
case GreaterThanOrEquals =>
(infotonValue, inputValue) =>
infotonValue.compareToString(inputValue).map(0.<=).getOrElse(false)
case LessThan =>
(infotonValue, inputValue) =>
infotonValue.compareToString(inputValue).map(0.>).getOrElse(false)
case LessThanOrEquals =>
(infotonValue, inputValue) =>
infotonValue.compareToString(inputValue).map(0.>=).getOrElse(false)
case Like => ???
}
val unmangled = if (name.length > 2 && name.charAt(1) == '$') name.drop(2) else name
fieldOperator match {
case Must =>
i.fields
.flatMap(_.get(unmangled).map(_.exists(fv => value.forall(v => valOp(fv, v)))))
.fold[SoftBoolean](False)(SoftBoolean.hard)
case Should =>
i.fields
.flatMap(_.get(unmangled).map(_.exists(fv => value.forall(v => valOp(fv, v)))))
.fold[SoftBoolean](SoftFalse)(SoftBoolean.soft)
case MustNot =>
i.fields
.flatMap(_.get(unmangled).map(_.forall(fv => !value.exists(v => valOp(fv, v)))))
.fold[SoftBoolean](True)(SoftBoolean.hard)
}
}
}
/**
* SoftBoolean is a 3-state "boolean" where we need a 2-way mapping
* between regular booleans and this 3-state booleans.
*
* `true` is mapped to `True`
* `false` is mapped to either `False` or `SoftFalse`, depending on business logic.
*
* `True` is mapped to `true`
* `False` & `SoftFalse` are both mapped to `false`.
*
* You may think of `SoftFalse` as an un-commited false,
* where we don't "fail fast" an expression upon `SoftFalse`,
* and may still succeed with `True` up ahead.
*/
object SoftBoolean {
def hard(b: Boolean): SoftBoolean = if (b) True else False
def soft(b: Boolean): SoftBoolean = if (b) True else SoftFalse
def zero: SoftBoolean = SoftFalse
}
sealed trait SoftBoolean {
def value: Boolean
def combine(that: SoftBoolean): SoftBoolean = this match {
case False => this
case SoftFalse => that
case True =>
that match {
case False => that
case _ => this
}
}
}
case object True extends SoftBoolean { override val value = true }
case object False extends SoftBoolean { override val value = false }
case object SoftFalse extends SoftBoolean { override val value = false }
case class MultiFieldFilter(override val fieldOperator: FieldOperator = Must, filters: Seq[FieldFilter])
extends FieldFilter {
def filter(i: Infoton): SoftBoolean = {
fieldOperator.applyTo(filters.foldLeft(SoftBoolean.zero) {
case (b, f) => b.combine(f.filter(i))
})
}
}
object FieldFilter {
def apply(fieldOperator: FieldOperator, valueOperator: ValueOperator, name: String, value: String) =
new SingleFieldFilter(fieldOperator, valueOperator, name, Some(value))
}
case class DatesFilter(from: Option[DateTime], to: Option[DateTime])
case class PaginationParams(offset: Int, length: Int)
object DefaultPaginationParams extends PaginationParams(0, 100)
case class FTSSearchResponse(total: Long,
offset: Long,
length: Long,
infotons: Seq[Infoton],
searchQueryStr: Option[String] = None)
case class FTSStartScrollResponse(total: Long,
scrollId: String,
nodeId: Option[String] = None,
searchQueryStr: Option[String] = None)
case class FTSScrollResponse(total: Long, scrollId: String, infotons: Seq[Infoton], nodeId: Option[String] = None)
case class FTSScrollThinResponse(total: Long,
scrollId: String,
thinInfotons: Seq[FTSThinInfoton],
nodeId: Option[String] = None)
case object FTSTimeout
case class FTSThinInfoton(path: String, uuid: String, lastModified: String, indexTime: Long, score: Option[Float])
case class FTSThinSearchResponse(total: Long,
offset: Long,
length: Long,
thinInfotons: Seq[FTSThinInfoton],
searchQueryStr: Option[String] = None)
| hochgi/CM-Well | server/cmwell-fts/src/main/scala/cmwell/fts/FTSServiceES.scala | Scala | apache-2.0 | 75,466 |
// /////////////////////////////////////////// //
// Fureteur - https://github.com/gip/fureteur //
// /////////////////////////////////////////// //
package fureteur.collection
import scala.collection.mutable.Queue
// A mutable FIFO with optional alert callback
//
class FIFO[T](x:Option[(Int, Int => Unit)]) {
val queue= new Queue[T]
var requested= false
var enabled= true
val (th,f) = x match { case Some((i,f)) => (i,f)
case None => (-1,((x:Int) => Unit)) }
def length() = { queue.size }
def push(e:T) = { queue += e; requested= false; check() }
def pushn(l:List[T]) = { queue ++= l; requested= false; check() }
def pop() : T = { check(); val v= queue.dequeue; check(); v }
def check() : Unit = {
val l= queue.size
if (enabled && !requested && l<=th) { f(l); requested= true }
}
def setEnabled(e:Boolean) : Unit = {
enabled= e;
}
def isEmpty() : Boolean = {
queue.isEmpty
}
def init() = { check() }
} | gip/fureteur | src/main/scala/collection.scala | Scala | mit | 998 |
/**
* @(#) AutoBrand.scala 2015年3月4日
* TURBO CRAWLER高性能网络爬虫
*/
package turbo.crawler.sample
import java.util.Date
import javax.persistence.Column
import javax.persistence.Entity
import javax.persistence.GeneratedValue
import javax.persistence.Id
import javax.persistence.Table
import javax.persistence.GenerationType
import javax.persistence.OneToMany
import turbo.crawler.Fetchable
import javax.persistence.CascadeType
/**
* @author Administrator
*
*/
@Entity
@Table(name = "AUTO_BRAND")
class AutoBrand extends Fetchable {
@Id
@GeneratedValue(strategy = GenerationType.IDENTITY)
private var id = 0
def setId(id: Int) = this.id = id
def getId = id
@Column(name = "NAME", nullable = false)
private var name = ""
def setName(name: String) = this.name = name
def getName = name
@Column(name = "URL", nullable = false)
private var url = ""
def setUrl(url: String) = this.url = url
def getUrl = this.url
@Column(name = "FETCHED_AT", nullable = false)
private var fetchedAt: Date = null
def setFetchedAt(date: Date) = this.fetchedAt = date
def getFetchedAt = fetchedAt
@OneToMany(targetEntity = classOf[AutoCar], cascade = Array[CascadeType] { CascadeType.ALL })
private var autos: java.util.List[AutoCar] = new java.util.ArrayList[AutoCar]()
def setAutos(autos: java.util.List[AutoCar]) = this.autos = autos
def getAutos = this.autos
override def getDirectUrl = ""
} | MyCATApache/Mycat-spider | src/test/scala/turbo/crawler/sample/AutoBrand.scala | Scala | apache-2.0 | 1,490 |
package com.stulsoft.ysps.ptraits
/**
* @author Yuriy Stul
* @since 4/1/2018
*/
class TraitWithLogImpl1 extends TraitWithLog {
}
| ysden123/ysps | src/main/scala/com/stulsoft/ysps/ptraits/TraitWithLogImpl1.scala | Scala | mit | 137 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.streaming.dstream
import java.io._
import java.net.{ConnectException, Socket}
import java.nio.charset.StandardCharsets
import scala.reflect.ClassTag
import scala.util.control.NonFatal
import org.apache.spark.internal.Logging
import org.apache.spark.storage.StorageLevel
import org.apache.spark.streaming.StreamingContext
import org.apache.spark.streaming.receiver.Receiver
import org.apache.spark.util.NextIterator
private[streaming]
class SocketInputDStream[T: ClassTag](
_ssc: StreamingContext,
host: String,
port: Int,
bytesToObjects: InputStream => Iterator[T],
storageLevel: StorageLevel
) extends ReceiverInputDStream[T](_ssc) {
def getReceiver(): Receiver[T] = {
new SocketReceiver(host, port, bytesToObjects, storageLevel)
}
}
private[streaming]
class SocketReceiver[T: ClassTag](
host: String,
port: Int,
bytesToObjects: InputStream => Iterator[T],
storageLevel: StorageLevel
) extends Receiver[T](storageLevel) with Logging {
private var socket: Socket = _
def onStart() {
logInfo(s"Connecting to $host:$port")
try {
socket = new Socket(host, port)
} catch {
case e: ConnectException =>
restart(s"Error connecting to $host:$port", e)
return
}
logInfo(s"Connected to $host:$port")
// Start the thread that receives data over a connection
new Thread("Socket Receiver") {
setDaemon(true)
override def run() { receive() }
}.start()
}
def onStop() {
// in case restart thread close it twice
synchronized {
if (socket != null) {
socket.close()
socket = null
logInfo(s"Closed socket to $host:$port")
}
}
}
/** Create a socket connection and receive data until receiver is stopped */
def receive() {
try {
val iterator = bytesToObjects(socket.getInputStream())
while(!isStopped && iterator.hasNext) {
store(iterator.next())
}
if (!isStopped()) {
restart("Socket data stream had no more data")
} else {
logInfo("Stopped receiving")
}
} catch {
case NonFatal(e) =>
logWarning("Error receiving data", e)
restart("Error receiving data", e)
} finally {
onStop()
}
}
}
private[streaming]
object SocketReceiver {
/**
* This methods translates the data from an inputstream (say, from a socket)
* to '\\n' delimited strings and returns an iterator to access the strings.
*/
def bytesToLines(inputStream: InputStream): Iterator[String] = {
val dataInputStream = new BufferedReader(
new InputStreamReader(inputStream, StandardCharsets.UTF_8))
new NextIterator[String] {
protected override def getNext() = {
val nextValue = dataInputStream.readLine()
if (nextValue == null) {
finished = true
}
nextValue
}
protected override def close() {
dataInputStream.close()
}
}
}
}
| aokolnychyi/spark | streaming/src/main/scala/org/apache/spark/streaming/dstream/SocketInputDStream.scala | Scala | apache-2.0 | 3,798 |
package aima.core.agent
/**
* @author Shawn Garner
*/
trait SimpleProblemSolvingAgentProgram[PERCEPT, ACTION, STATE, GOAL, PROBLEM] extends AgentProgram[PERCEPT, ACTION] {
def initialState: STATE
def noAction: ACTION
var actions = List.empty[ACTION]
var state = initialState
def agentFunction: AgentFunction = { percept =>
state = updateState(state, percept)
if (actions.isEmpty) {
val goal = formulateGoal(state)
val problem = formulateProblem(state, goal)
actions = search(problem)
}
val (firstAction, restOfActions) = actions match {
case Nil => (noAction, Nil)
case first :: rest => (first, rest)
}
actions = restOfActions
firstAction
}
def updateState(state: STATE, percept: PERCEPT): STATE
def formulateGoal(state: STATE): GOAL
def formulateProblem(state: STATE, goal: GOAL): PROBLEM
def search(problem: PROBLEM): List[ACTION]
}
| aimacode/aima-scala | core/src/main/scala/aima/core/agent/SimpleProblemSolvingAgentProgram.scala | Scala | mit | 939 |
package net.benchmark.akka.http.world
import slick.jdbc.PostgresProfile.api._
object WorldTable {
val worldTableQuery: TableQuery[WorldTable] = TableQuery[WorldTable]
}
class WorldTable(tag: Tag) extends Table[World](tag, "World") {
def id = column[Int]("id", O.PrimaryKey)
def randomNumber = column[Int]("randomnumber")
def * = (id, randomNumber).<>(World.tupled, World.unapply)
}
| treefrogframework/FrameworkBenchmarks | frameworks/Scala/akka-http/akka-http-slick-postgres/src/main/scala/net/benchmark/akka/http/world/WorldTable.scala | Scala | bsd-3-clause | 398 |
package colossus.metrics.senders
import akka.actor._
import scala.concurrent.duration._
import scala.concurrent.ExecutionContext.Implicits.global
import java.net._
import colossus.metrics.logging.ColossusLogging
import colossus.metrics.{MetricFragment, MetricSender, OpenTsdbFormatter}
class OpenTsdbWatchdog(socket: Socket, timeout: FiniteDuration) extends Actor with ColossusLogging {
import OpenTsdbWatchdog._
private var lastPing = 0
def receive = idle
def idle: Receive = {
case StartSend => {
val now = System.currentTimeMillis
context.system.scheduler.scheduleOnce(timeout, self, CheckTimeout(now))
context.become(timing(now))
}
}
def timing(start: Long): Receive = {
case EndSend => context.become(idle)
case CheckTimeout(time) if (time == start) => {
warn("TSDB sender has timed out, force closing socket")
socket.close()
self ! PoisonPill
}
}
}
object OpenTsdbWatchdog {
case object StartSend
case object EndSend
case class CheckTimeout(time: Long)
}
//TODO : OH jeez don't use raw socket
class OpenTsdbSenderActor(val host: String, val port: Int, timeout: FiniteDuration)
extends Actor
with ColossusLogging
with MetricsLogger {
import OpenTsdbWatchdog._
val address = new InetSocketAddress(host, port)
case object Initialize
val socket = new Socket
val watchdog = context.actorOf(Props(classOf[OpenTsdbWatchdog], socket, timeout))
override def postStop(): Unit = {
socket.close()
watchdog ! PoisonPill
}
def put(stats: Seq[MetricFragment], ts: Long) {
watchdog ! StartSend
val os = socket.getOutputStream
val now = ts / 1000
stats.foreach { stat =>
os.write(OpenTsdbFormatter.format(stat, now).toCharArray.map { _.toByte })
}
os.flush()
info(s"Sent ${stats.size} stats to OpenTSDB")
watchdog ! EndSend
}
def receive = {
case Initialize => {
info("Initializing new stats sender")
socket.connect(address, 500)
context.become(accepting)
}
}
def accepting: Receive = {
case s: MetricSender.Send => {
logMetrics(s)
put(s.fragments, s.timestamp)
}
}
override def postRestart(reason: Throwable) {
context.system.scheduler.scheduleOnce(5 seconds, self, Initialize)
}
override def preStart() {
self ! Initialize
}
}
case class OpenTsdbSender(host: String, port: Int) extends MetricSender {
val defaultTimeout: FiniteDuration = 1.minute
val name = "tsdb"
def props = Props(classOf[OpenTsdbSenderActor], host, port, defaultTimeout).withDispatcher("opentsdb-dispatcher")
}
| tumblr/colossus | colossus-metrics/src/main/scala/colossus/metrics/senders/OpenTsdbSender.scala | Scala | apache-2.0 | 2,675 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cs.ucla.edu.bwaspark.datatype
class SWPreResultType(rmax_c: Array[Long],
srt_c: Array[SRTType],
rseq_c: Array[Byte],
rlen_l: Long) {
var rmax = rmax_c;
var srt = srt_c;
var rseq = rseq_c;
var rlen = rlen_l;
}
| ytchen0323/cloud-scale-bwamem | src/main/scala/cs/ucla/edu/bwaspark/datatype/SWPreResultType.scala | Scala | apache-2.0 | 1,067 |
package com.ytsebro.nlp
import java.io.InputStream
import org.apache.commons.io.IOUtils
import org.jsoup.Jsoup
import org.junit.{Test}
import org.junit.Assert._
/**
* Created by yegor on 8/9/16.
*/
class ParagraphServiceTest {
val service: ParagraphService = new ParagraphService()
def readTestFile(filename: String): String ={
val is = getTestStream(filename)
IOUtils.toString(is)
}
def getTestStream(filename: String): InputStream ={
this.getClass.getResourceAsStream(filename)
}
@Test
def testExtract1(): Unit ={
val p1 = service.extract(readTestFile("/paragraph/text1.txt"), "(?m)^(\\\\d+[.]?)+", false)
assertEquals(p1.children.length, 2)
assertEquals(p1.children(0).name, "2")
assertEquals(p1.children(0).children(1).name, "2.2")
println(p1)
}
@Test
def testExtract2(): Unit ={
val p1 = service.extract(readTestFile("/paragraph/text2.txt"), "(?m)^(\\\\d+[.]?)+", false)
assertEquals(p1.children.length, 2)
assertEquals(p1.children(0).name, "1.")
assertEquals(p1.children(1).children(1).name, "2.2")
println(p1)
}
@Test
def testExtract3(): Unit ={
val p1 = service.extract(readTestFile("/paragraph/text3.txt"), "(?m)^.*?\\\\[edit\\\\]", true)
println(p1)
}
@Test
def testExtract4(): Unit ={
val p1 = service.extract(readTestFile("/paragraph/text4.txt"), "(?m)^Scala$", true)
println(p1)
}
@Test
def testJsoup(): Unit ={
val doc = Jsoup.connect("https://apacheignite.readme.io/docs/getting-started").get();
val text: String = doc.body.text
println(text)
}
}
| egorsz/textview | src/test/scala/com/ytsebro/nlp/ParagraphServiceTest.scala | Scala | gpl-3.0 | 1,588 |
package epic.parser
/*
Copyright 2012 David Hall
Licensed under the Apache License, Version 2.0 (the "License")
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
import epic.constraints.ChartConstraints
import epic.lexicon.Lexicon
import epic.trees._
/**
* A Parser produces a syntactic representation of a sentence, called a [[epic.trees.Tree]], which
* has internal nodes that demarcate syntactic functions
*
* @author dlwh
*/
@SerialVersionUID(2L)
final case class Parser[L,W](topology: RuleTopology[L],
lexicon: Lexicon[L, W],
constraintsFactory: ChartConstraints.Factory[L, W],
marginalFactory: ParseMarginal.Factory[L, W],
decoder: ChartDecoder[L, W] = ChartDecoder[L, W]())
(implicit val debinarizer: Debinarizer[L]) extends (IndexedSeq[W]=>Tree[L]) {
/**
* Returns the best parse (calls bestParse) for the sentence.
*
* @param s the sentence
*/
def apply(s: IndexedSeq[W]): Tree[L] = debinarizer(bestBinarizedTree(s))
/**
* Returns the best parse for the sentence without debinarizing
* @param s sentence
*/
def bestBinarizedTree(s: IndexedSeq[W]):BinarizedTree[L] = {
decoder.extractBestParse(marginal(s))
}
def marginal(w: IndexedSeq[W]): ParseMarginal[L, W] = try {
marginalFactory.apply(w, constraintsFactory.constraints(w))
} catch {
case ex: NoParseException =>
try {
marginalFactory.apply(w, ChartConstraints.noSparsity)
} catch {
case ex: NoParseException =>
???
}
}
}
object Parser {
def apply[L, W](grammar: Grammar[L, W])(implicit deb: Debinarizer[L]): Parser[L, W]= {
Parser(grammar.topology, grammar.lexicon, ChartConstraints.Factory.noSparsity, StandardChartFactory(grammar), ChartDecoder())
}
def apply[L, W](refined: Grammar[L, W], decoder: ChartDecoder[L, W])(implicit deb: Debinarizer[L]): Parser[L, W] = {
apply(ChartConstraints.Factory.noSparsity, refined, decoder)
}
def apply[L, L2, W](refinedGrammar: SimpleGrammar[L, L2, W], decoder: ChartDecoder[L, W])(implicit deb: Debinarizer[L]): Parser[L, W] = {
new Parser(refinedGrammar.topology, refinedGrammar.lexicon, ChartConstraints.Factory.noSparsity[L, W], new SimpleChartMarginal.SimpleChartFactory(refinedGrammar, decoder.wantsMaxMarginal), decoder)
}
def apply[L, W](core: ChartConstraints.Factory[L, W], grammar: Grammar[L, W], decoder: ChartDecoder[L, W])(implicit deb: Debinarizer[L]): Parser[L, W] = {
Parser(grammar.topology, grammar.lexicon, core, StandardChartFactory(grammar, decoder.wantsMaxMarginal), decoder)
}
def apply[L, W](core: ChartConstraints.Factory[L, W], refinedGrammar: Grammar[L, W])(implicit deb: Debinarizer[L]): Parser[L, W] = {
apply(core, refinedGrammar, new MaxConstituentDecoder)
}
}
| langkilde/epic | src/main/scala/epic/parser/Parser.scala | Scala | apache-2.0 | 3,313 |
package org.sbuild.plugins.http
import java.io.FileInputStream
import org.sbuild.SBuildException
import java.net.URL
import java.text.DecimalFormat
import java.io.FileOutputStream
import java.io.FileNotFoundException
import java.io.BufferedInputStream
import java.io.BufferedOutputStream
import org.sbuild.CmdlineMonitor
import java.io.IOException
import java.io.File
import org.sbuild.NoopCmdlineMonitor
import java.net.Proxy
import java.net.InetSocketAddress
import org.sbuild.Project
import scala.util.Try
import org.sbuild.Logger
object HttpSupport {
def download(url: String, target: String, monitor: CmdlineMonitor = NoopCmdlineMonitor, userAgent: Option[String], retryCount: Int = 5): Option[Throwable] = {
try {
val targetFile = new File(target)
targetFile.exists match {
case true => { // File already exists
monitor.info(CmdlineMonitor.Verbose, "File '" + target + "' already downloaded")
true
}
case false => { // File needs download
monitor.info(CmdlineMonitor.Default, s"Downloading ${url}")
// copy into temp file
val dir = targetFile.getAbsoluteFile.getParentFile
dir.mkdirs
val downloadTargetFile = File.createTempFile(".~" + targetFile.getName, "", dir)
def cleanup =
if (downloadTargetFile.exists)
if (!downloadTargetFile.delete)
downloadTargetFile.deleteOnExit
val outStream = new BufferedOutputStream(new FileOutputStream(downloadTargetFile))
try {
var lastContentLength: Option[Long] = None
var len = 0
var lastRetryLen = 0
var retry = true
var retries = 0
while (retry) {
retry = false
val connection = new URL(url).openConnection
userAgent.map { agent => connection.setRequestProperty("User-Agent", agent) }
if (len > 0) {
// TODO: also check http header Accept-Ranges
connection.setRequestProperty("Range", s"bytes=${len}-")
}
val inStream = new BufferedInputStream(connection.getInputStream())
// TODO: evaluate status code, e.g. 404
// connection opened
val contentLength = lastContentLength.getOrElse {
val cl = connection.getHeaderField("content-length") match {
case null => -1
case length => try { length.toLong } catch { case _: Exception => -1 }
}
lastContentLength = Some(cl)
cl
}
var last = System.currentTimeMillis
var break = false
var alreadyLogged = false
val forceLogAfter = 5000
val bufferSize = 1024
val format = new DecimalFormat("#,##0.#")
def formatLength(length: Long): String = format.format(length / 1024)
def logProgress = if (contentLength > 0) {
monitor.info(CmdlineMonitor.Default, s"Downloaded ${formatLength(len)} of ${formatLength(contentLength)} kb (${format.format((len.toDouble * 1000 / contentLength.toDouble).toLong.toDouble / 10)}%) from ${url}")
} else {
monitor.info(CmdlineMonitor.Default, s"Downloaded ${formatLength(len)} kb from ${url}")
}
var buffer = new Array[Byte](bufferSize)
while (!break) {
val now = System.currentTimeMillis
if (len > 0 && now > last + forceLogAfter) {
alreadyLogged = true
logProgress
last = now;
}
inStream.read(buffer, 0, bufferSize) match {
case x if x < 0 => break = true
case count => {
len = len + count
outStream.write(buffer, 0, count)
}
}
}
if (alreadyLogged && len > 0) {
logProgress
}
// TODO: if no resume is supported, retry n times before giving up
// TODO: implement a timeout, to avoid ever-blocking downloads
contentLength match {
case l if l < 0 => // cannot use contentLength to verify result
case l if len == l => // download size is equal to expected size => good
case l if len < l =>
// stream closed to early
monitor.info(CmdlineMonitor.Default, s"Download stream closed before download was complete from ${url}")
if (retries < retryCount) {
monitor.info(CmdlineMonitor.Default, s"Resuming download from ${url}")
retry = true
alreadyLogged = true
retries = if (len > lastRetryLen) 0 else (retries + 1)
lastRetryLen = len
} else {
outStream.close
cleanup
throw new SBuildException(s"To many failed retries (s${retries}). Cannot download from ${url}");
}
case _ =>
outStream.close
cleanup
throw new SBuildException(s"Size of downloaded file does not match expected size: ${url}");
}
inStream.close
}
} catch {
case e: FileNotFoundException =>
outStream.close
cleanup
throw new SBuildException("Download resource does not exists: " + url, e);
case e: IOException =>
outStream.close
cleanup
throw new SBuildException("Error while downloading file: " + url, e);
} finally {
outStream.close
}
val renameSuccess = downloadTargetFile.renameTo(targetFile)
if (!renameSuccess) {
// move temp file to dest file
val out = new FileOutputStream(targetFile)
val in = new FileInputStream(downloadTargetFile)
try {
out.getChannel.transferFrom(in.getChannel, 0, Long.MaxValue)
} finally {
out.close
in.close
}
cleanup
}
}
}
None
} catch {
case x: Throwable => Some(x)
}
}
} | SBuild-org/sbuild-http-plugin | org.sbuild.plugins.http/src/main/scala/org/sbuild/plugins/http/HttpSupport.scala | Scala | apache-2.0 | 6,508 |
package it.nerdammer.spark.hbase.conversion
import org.apache.hadoop.hbase.util.Bytes
trait FieldWriter[T] extends FieldMapper {
def map(data: T): HBaseData
}
/**
* Utility class used to simplify the creation of custom mappers.
* FieldWriterProxy's can reuse predefined FieldWriter's.
*/
abstract class FieldWriterProxy[T, P](implicit writer: FieldWriter[P]) extends FieldWriter[T] {
override def map(data: T): HBaseData = writer.map(convert(data))
def convert(data: T): P
}
trait SingleColumnFieldWriter[T] extends FieldWriter[T] {
override def map(data: T): HBaseData = Seq(mapColumn(data))
def mapColumn(data: T): Option[Array[Byte]]
}
trait FieldWriterConversions extends Serializable {
implicit def intWriter: FieldWriter[Int] = new SingleColumnFieldWriter[Int] {
override def mapColumn(data: Int): Option[Array[Byte]] = Some(Bytes.toBytes(data))
}
implicit def longWriter: FieldWriter[Long] = new SingleColumnFieldWriter[Long] {
override def mapColumn(data: Long): Option[Array[Byte]] = Some(Bytes.toBytes(data))
}
implicit def shortWriter: FieldWriter[Short] = new SingleColumnFieldWriter[Short] {
override def mapColumn(data: Short): Option[Array[Byte]] = Some(Bytes.toBytes(data))
}
implicit def doubleWriter: FieldWriter[Double] = new SingleColumnFieldWriter[Double] {
override def mapColumn(data: Double): Option[Array[Byte]] = Some(Bytes.toBytes(data))
}
implicit def floatWriter: FieldWriter[Float] = new SingleColumnFieldWriter[Float] {
override def mapColumn(data: Float): Option[Array[Byte]] = Some(Bytes.toBytes(data))
}
implicit def booleanWriter: FieldWriter[Boolean] = new SingleColumnFieldWriter[Boolean] {
override def mapColumn(data: Boolean): Option[Array[Byte]] = Some(Bytes.toBytes(data))
}
implicit def bigDecimalWriter: FieldWriter[BigDecimal] = new SingleColumnFieldWriter[BigDecimal] {
override def mapColumn(data: BigDecimal): Option[Array[Byte]] = Some(Bytes.toBytes(data.bigDecimal))
}
implicit def stringWriter: FieldWriter[String] = new SingleColumnFieldWriter[String] {
override def mapColumn(data: String): Option[Array[Byte]] = Some(Bytes.toBytes(data))
}
// Options
implicit def optionWriter[T](implicit c: FieldWriter[T]): FieldWriter[Option[T]] = new FieldWriter[Option[T]] {
override def map(data: Option[T]): HBaseData = if(data.nonEmpty) c.map(data.get) else Seq(None)
}
// Tuples
implicit def tupleWriter2[T1, T2](implicit c1: FieldWriter[T1], c2: FieldWriter[T2]): FieldWriter[(T1, T2)] = new FieldWriter[(T1, T2)] {
override def map(data: (T1, T2)): HBaseData = c1.map(data._1) ++ c2.map(data._2)
}
implicit def tupleWriter3[T1, T2, T3](implicit c1: FieldWriter[T1], c2: FieldWriter[T2], c3: FieldWriter[T3]): FieldWriter[(T1, T2, T3)] = new FieldWriter[(T1, T2, T3)] {
override def map(data: (T1, T2, T3)): HBaseData = c1.map(data._1) ++ c2.map(data._2) ++ c3.map(data._3)
}
implicit def tupleWriter4[T1, T2, T3, T4](implicit c1: FieldWriter[T1], c2: FieldWriter[T2], c3: FieldWriter[T3], c4: FieldWriter[T4]): FieldWriter[(T1, T2, T3, T4)] = new FieldWriter[(T1, T2, T3, T4)] {
override def map(data: (T1, T2, T3, T4)): HBaseData = c1.map(data._1) ++ c2.map(data._2) ++ c3.map(data._3) ++ c4.map(data._4)
}
implicit def tupleWriter5[T1, T2, T3, T4, T5](implicit c1: FieldWriter[T1], c2: FieldWriter[T2], c3: FieldWriter[T3], c4: FieldWriter[T4], c5: FieldWriter[T5]): FieldWriter[(T1, T2, T3, T4, T5)] = new FieldWriter[(T1, T2, T3, T4, T5)] {
override def map(data: (T1, T2, T3, T4, T5)): HBaseData = c1.map(data._1) ++ c2.map(data._2) ++ c3.map(data._3) ++ c4.map(data._4) ++ c5.map(data._5)
}
implicit def tupleWriter6[T1, T2, T3, T4, T5, T6](implicit c1: FieldWriter[T1], c2: FieldWriter[T2], c3: FieldWriter[T3], c4: FieldWriter[T4], c5: FieldWriter[T5], c6: FieldWriter[T6]): FieldWriter[(T1, T2, T3, T4, T5, T6)] = new FieldWriter[(T1, T2, T3, T4, T5, T6)] {
override def map(data: (T1, T2, T3, T4, T5, T6)): HBaseData = c1.map(data._1) ++ c2.map(data._2) ++ c3.map(data._3) ++ c4.map(data._4) ++ c5.map(data._5) ++ c6.map(data._6)
}
implicit def tupleWriter7[T1, T2, T3, T4, T5, T6, T7](implicit c1: FieldWriter[T1], c2: FieldWriter[T2], c3: FieldWriter[T3], c4: FieldWriter[T4], c5: FieldWriter[T5], c6: FieldWriter[T6], c7: FieldWriter[T7]): FieldWriter[(T1, T2, T3, T4, T5, T6, T7)] = new FieldWriter[(T1, T2, T3, T4, T5, T6, T7)] {
override def map(data: (T1, T2, T3, T4, T5, T6, T7)): HBaseData = c1.map(data._1) ++ c2.map(data._2) ++ c3.map(data._3) ++ c4.map(data._4) ++ c5.map(data._5) ++ c6.map(data._6) ++ c7.map(data._7)
}
implicit def tupleWriter8[T1, T2, T3, T4, T5, T6, T7, T8](implicit c1: FieldWriter[T1], c2: FieldWriter[T2], c3: FieldWriter[T3], c4: FieldWriter[T4], c5: FieldWriter[T5], c6: FieldWriter[T6], c7: FieldWriter[T7], c8: FieldWriter[T8]): FieldWriter[(T1, T2, T3, T4, T5, T6, T7, T8)] = new FieldWriter[(T1, T2, T3, T4, T5, T6, T7, T8)] {
override def map(data: (T1, T2, T3, T4, T5, T6, T7, T8)): HBaseData = c1.map(data._1) ++ c2.map(data._2) ++ c3.map(data._3) ++ c4.map(data._4) ++ c5.map(data._5) ++ c6.map(data._6) ++ c7.map(data._7) ++ c8.map(data._8)
}
implicit def tupleWriter9[T1, T2, T3, T4, T5, T6, T7, T8, T9](implicit c1: FieldWriter[T1], c2: FieldWriter[T2], c3: FieldWriter[T3], c4: FieldWriter[T4], c5: FieldWriter[T5], c6: FieldWriter[T6], c7: FieldWriter[T7], c8: FieldWriter[T8], c9: FieldWriter[T9]): FieldWriter[(T1, T2, T3, T4, T5, T6, T7, T8, T9)] = new FieldWriter[(T1, T2, T3, T4, T5, T6, T7, T8, T9)] {
override def map(data: (T1, T2, T3, T4, T5, T6, T7, T8, T9)): HBaseData = c1.map(data._1) ++ c2.map(data._2) ++ c3.map(data._3) ++ c4.map(data._4) ++ c5.map(data._5) ++ c6.map(data._6) ++ c7.map(data._7) ++ c8.map(data._8) ++ c9.map(data._9)
}
implicit def tupleWriter10[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10](implicit c1: FieldWriter[T1], c2: FieldWriter[T2], c3: FieldWriter[T3], c4: FieldWriter[T4], c5: FieldWriter[T5], c6: FieldWriter[T6], c7: FieldWriter[T7], c8: FieldWriter[T8], c9: FieldWriter[T9], c10: FieldWriter[T10]): FieldWriter[(T1, T2, T3, T4, T5, T6, T7, T8, T9, T10)] = new FieldWriter[(T1, T2, T3, T4, T5, T6, T7, T8, T9, T10)] {
override def map(data: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10)): HBaseData = c1.map(data._1) ++ c2.map(data._2) ++ c3.map(data._3) ++ c4.map(data._4) ++ c5.map(data._5) ++ c6.map(data._6) ++ c7.map(data._7) ++ c8.map(data._8) ++ c9.map(data._9) ++ c10.map(data._10)
}
implicit def tupleWriter11[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11](implicit c1: FieldWriter[T1], c2: FieldWriter[T2], c3: FieldWriter[T3], c4: FieldWriter[T4], c5: FieldWriter[T5], c6: FieldWriter[T6], c7: FieldWriter[T7], c8: FieldWriter[T8], c9: FieldWriter[T9], c10: FieldWriter[T10], c11: FieldWriter[T11]): FieldWriter[(T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11)] = new FieldWriter[(T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11)] {
override def map(data: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11)): HBaseData = c1.map(data._1) ++ c2.map(data._2) ++ c3.map(data._3) ++ c4.map(data._4) ++ c5.map(data._5) ++ c6.map(data._6) ++ c7.map(data._7) ++ c8.map(data._8) ++ c9.map(data._9) ++ c10.map(data._10) ++ c11.map(data._11)
}
implicit def tupleWriter12[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12](implicit c1: FieldWriter[T1], c2: FieldWriter[T2], c3: FieldWriter[T3], c4: FieldWriter[T4], c5: FieldWriter[T5], c6: FieldWriter[T6], c7: FieldWriter[T7], c8: FieldWriter[T8], c9: FieldWriter[T9], c10: FieldWriter[T10], c11: FieldWriter[T11], c12: FieldWriter[T12]): FieldWriter[(T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12)] = new FieldWriter[(T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12)] {
override def map(data: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12)): HBaseData = c1.map(data._1) ++ c2.map(data._2) ++ c3.map(data._3) ++ c4.map(data._4) ++ c5.map(data._5) ++ c6.map(data._6) ++ c7.map(data._7) ++ c8.map(data._8) ++ c9.map(data._9) ++ c10.map(data._10) ++ c11.map(data._11) ++ c12.map(data._12)
}
implicit def tupleWriter13[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13](implicit c1: FieldWriter[T1], c2: FieldWriter[T2], c3: FieldWriter[T3], c4: FieldWriter[T4], c5: FieldWriter[T5], c6: FieldWriter[T6], c7: FieldWriter[T7], c8: FieldWriter[T8], c9: FieldWriter[T9], c10: FieldWriter[T10], c11: FieldWriter[T11], c12: FieldWriter[T12], c13: FieldWriter[T13]): FieldWriter[(T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13)] = new FieldWriter[(T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13)] {
override def map(data: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13)): HBaseData = c1.map(data._1) ++ c2.map(data._2) ++ c3.map(data._3) ++ c4.map(data._4) ++ c5.map(data._5) ++ c6.map(data._6) ++ c7.map(data._7) ++ c8.map(data._8) ++ c9.map(data._9) ++ c10.map(data._10) ++ c11.map(data._11) ++ c12.map(data._12) ++ c13.map(data._13)
}
implicit def tupleWriter14[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14](implicit c1: FieldWriter[T1], c2: FieldWriter[T2], c3: FieldWriter[T3], c4: FieldWriter[T4], c5: FieldWriter[T5], c6: FieldWriter[T6], c7: FieldWriter[T7], c8: FieldWriter[T8], c9: FieldWriter[T9], c10: FieldWriter[T10], c11: FieldWriter[T11], c12: FieldWriter[T12], c13: FieldWriter[T13], c14: FieldWriter[T14]): FieldWriter[(T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14)] = new FieldWriter[(T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14)] {
override def map(data: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14)): HBaseData = c1.map(data._1) ++ c2.map(data._2) ++ c3.map(data._3) ++ c4.map(data._4) ++ c5.map(data._5) ++ c6.map(data._6) ++ c7.map(data._7) ++ c8.map(data._8) ++ c9.map(data._9) ++ c10.map(data._10) ++ c11.map(data._11) ++ c12.map(data._12) ++ c13.map(data._13) ++ c14.map(data._14)
}
implicit def tupleWriter15[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15](implicit c1: FieldWriter[T1], c2: FieldWriter[T2], c3: FieldWriter[T3], c4: FieldWriter[T4], c5: FieldWriter[T5], c6: FieldWriter[T6], c7: FieldWriter[T7], c8: FieldWriter[T8], c9: FieldWriter[T9], c10: FieldWriter[T10], c11: FieldWriter[T11], c12: FieldWriter[T12], c13: FieldWriter[T13], c14: FieldWriter[T14], c15: FieldWriter[T15]): FieldWriter[(T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15)] = new FieldWriter[(T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15)] {
override def map(data: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15)): HBaseData = c1.map(data._1) ++ c2.map(data._2) ++ c3.map(data._3) ++ c4.map(data._4) ++ c5.map(data._5) ++ c6.map(data._6) ++ c7.map(data._7) ++ c8.map(data._8) ++ c9.map(data._9) ++ c10.map(data._10) ++ c11.map(data._11) ++ c12.map(data._12) ++ c13.map(data._13) ++ c14.map(data._14) ++ c15.map(data._15)
}
} | lgscofield/spark-hbase-connector | src/main/scala/it/nerdammer/spark/hbase/conversion/FieldWriter.scala | Scala | apache-2.0 | 10,929 |
package org.helianto.ingress.repository
import org.helianto.ingress.domain.Moderator
import org.springframework.data.jpa.repository.JpaRepository
trait ModeratorRepository extends JpaRepository[Moderator, String] {
def findByUserId(token: String): Moderator
}
| iservport/helianto-spring | src/main/scala/org/helianto/ingress/repository/ModeratorRepository.scala | Scala | apache-2.0 | 266 |
package example
import underscoreio.holes._
object Example {
// CORRECT ERRORS:
// Hole found which needs to be filled with type: Int
//val x: Int = hole
// recursive value x needs type
//val x = hole
// Hole found which needs to be filled with type: Int
//def x: Int = hole
// Hole found which needs to be filled with type: A => C
//def compose[A,B,C](g: B => C, f: A => B): A => C = hole
// INCORRECT ERRORS:
// Hole found which needs to be filled with type: <notype>
// (we want: C)
def compose[A,B,C](g: B => C, f: A => B): A => C = a => hole
} | d6y/holes | app/src/main/scala/main.scala | Scala | apache-2.0 | 585 |
package dotty.tools.dotc
package transform
import dotty.tools.dotc.ast.{Trees, tpd, untpd}
import scala.collection.mutable
import core._
import dotty.tools.dotc.typer.Checking
import dotty.tools.dotc.typer.Inliner
import dotty.tools.dotc.typer.VarianceChecker
import Types._, Contexts._, Names._, Flags._, DenotTransformers._, Phases._
import SymDenotations._, StdNames._, Annotations._, Trees._, Scopes._
import Decorators._
import Symbols._, SymUtils._
import ContextFunctionResults.annotateContextResults
import config.Printers.typr
import reporting.messages._
object PostTyper {
val name: String = "posttyper"
}
/** A macro transform that runs immediately after typer and that performs the following functions:
*
* (1) Add super accessors (@see SuperAccessors)
*
* (2) Convert parameter fields that have the same name as a corresponding
* public parameter field in a superclass to a forwarder to the superclass
* field (corresponding = super class field is initialized with subclass field)
* @see forwardParamAccessors.
*
* (3) Add synthetic members (@see SyntheticMembers)
*
* (4) Check that `New` nodes can be instantiated, and that annotations are valid
*
* (5) Convert all trees representing types to TypeTrees.
*
* (6) Check the bounds of AppliedTypeTrees
*
* (7) Insert `.package` for selections of package object members
*
* (8) Replaces self references by name with `this`
*
* (9) Adds SourceFile annotations to all top-level classes and objects
*
* (10) Adds Child annotations to all sealed classes
*
* (11) Minimizes `call` fields of `Inlined` nodes to just point to the toplevel
* class from which code was inlined.
*
* The reason for making this a macro transform is that some functions (in particular
* super and protected accessors and instantiation checks) are naturally top-down and
* don't lend themselves to the bottom-up approach of a mini phase. The other two functions
* (forwarding param accessors and synthetic methods) only apply to templates and fit
* mini-phase or subfunction of a macro phase equally well. But taken by themselves
* they do not warrant their own group of miniphases before pickling.
*/
class PostTyper extends MacroTransform with IdentityDenotTransformer { thisPhase =>
import tpd._
/** the following two members override abstract members in Transform */
override def phaseName: String = PostTyper.name
override def checkPostCondition(tree: tpd.Tree)(implicit ctx: Context): Unit = tree match {
case tree: ValOrDefDef =>
assert(!tree.symbol.signature.isUnderDefined)
case _ =>
}
override def changesMembers: Boolean = true // the phase adds super accessors and synthetic members
override def transformPhase(implicit ctx: Context): Phase = thisPhase.next
protected def newTransformer(implicit ctx: Context): Transformer =
new PostTyperTransformer
val superAcc: SuperAccessors = new SuperAccessors(thisPhase)
val synthMbr: SyntheticMembers = new SyntheticMembers(thisPhase)
private def newPart(tree: Tree): Option[New] = methPart(tree) match {
case Select(nu: New, _) => Some(nu)
case _ => None
}
private def checkValidJavaAnnotation(annot: Tree)(implicit ctx: Context): Unit = {
// TODO fill in
}
class PostTyperTransformer extends Transformer {
private var inJavaAnnot: Boolean = false
private var noCheckNews: Set[New] = Set()
def withNoCheckNews[T](ts: List[New])(op: => T): T = {
val saved = noCheckNews
noCheckNews ++= ts
try op finally noCheckNews = saved
}
def isCheckable(t: New): Boolean = !inJavaAnnot && !noCheckNews.contains(t)
/** Mark parameter accessors that are aliases of like-named parameters
* in their superclass with SuperParamAlias.
* This info is used in phase ParamForwarding
*/
private def forwardParamAccessors(impl: Template)(using Context): Unit = impl.parents match
case superCall @ Apply(fn, superArgs) :: _ if superArgs.nonEmpty =>
fn.tpe.widen match
case MethodType(superParamNames) =>
for case stat: ValDef <- impl.body do
val sym = stat.symbol
if sym.isAllOf(PrivateParamAccessor, butNot = Mutable)
&& !sym.info.isInstanceOf[ExprType] // val-parameters cannot be call-by name, so no need to try to forward to them
then
val idx = superArgs.indexWhere(_.symbol == sym)
if idx >= 0 && superParamNames(idx) == stat.name then
// Supercall to like-named parameter.
// Having it have the same name is needed to maintain correctness in presence of subclassing
// if you would use parent param-name `a` to implement param-field `b`
// overriding field `b` will actually override field `a`, that is wrong!
typr.println(i"super alias: ${sym.showLocated}")
sym.setFlagFrom(thisPhase, SuperParamAlias)
case _ =>
case _ =>
private def transformAnnot(annot: Tree)(implicit ctx: Context): Tree = {
val saved = inJavaAnnot
inJavaAnnot = annot.symbol.is(JavaDefined)
if (inJavaAnnot) checkValidJavaAnnotation(annot)
try transform(annot)
finally inJavaAnnot = saved
}
private def transformAnnot(annot: Annotation)(implicit ctx: Context): Annotation =
annot.derivedAnnotation(transformAnnot(annot.tree))
private def processMemberDef(tree: Tree)(implicit ctx: Context): tree.type = {
val sym = tree.symbol
Checking.checkValidOperator(sym)
sym.transformAnnotations(transformAnnot)
sym.defTree = tree
tree
}
private def processValOrDefDef(tree: Tree)(using Context): tree.type =
tree match
case tree: ValOrDefDef if !tree.symbol.is(Synthetic) =>
checkInferredWellFormed(tree.tpt)
case _ =>
processMemberDef(tree)
private def checkInferredWellFormed(tree: Tree)(using Context): Unit = tree match
case tree: TypeTree
if tree.span.isZeroExtent
// don't check TypeTrees with non-zero extent;
// these are derived from explicit types
&& !ctx.reporter.errorsReported
// don't check if errors were already reported; this avoids follow-on errors
// for inferred types if explicit types are already ill-formed
=> Checking.checkAppliedTypesIn(tree)
case _ =>
private def transformSelect(tree: Select, targs: List[Tree])(implicit ctx: Context): Tree = {
val qual = tree.qualifier
qual.symbol.moduleClass.denot match {
case pkg: PackageClassDenotation =>
val pobj = pkg.packageObjFor(tree.symbol)
if (pobj.exists)
return transformSelect(cpy.Select(tree)(qual.select(pobj).withSpan(qual.span), tree.name), targs)
case _ =>
}
val tree1 = super.transform(tree)
constToLiteral(tree1) match {
case _: Literal => tree1
case _ => superAcc.transformSelect(tree1, targs)
}
}
private def normalizeTypeArgs(tree: TypeApply)(implicit ctx: Context): TypeApply = tree.tpe match {
case pt: PolyType => // wait for more arguments coming
tree
case _ =>
def decompose(tree: TypeApply): (Tree, List[Tree]) = tree.fun match {
case fun: TypeApply =>
val (tycon, args) = decompose(fun)
(tycon, args ++ tree.args)
case _ =>
(tree.fun, tree.args)
}
def reorderArgs(pnames: List[Name], namedArgs: List[NamedArg], otherArgs: List[Tree]): List[Tree] = pnames match {
case pname :: pnames1 =>
namedArgs.partition(_.name == pname) match {
case (NamedArg(_, arg) :: _, namedArgs1) =>
arg :: reorderArgs(pnames1, namedArgs1, otherArgs)
case _ =>
val otherArg :: otherArgs1 = otherArgs
otherArg :: reorderArgs(pnames1, namedArgs, otherArgs1)
}
case nil =>
assert(namedArgs.isEmpty && otherArgs.isEmpty)
Nil
}
val (tycon, args) = decompose(tree)
tycon.tpe.widen match {
case tp: PolyType if args.exists(isNamedArg) =>
val (namedArgs, otherArgs) = args.partition(isNamedArg)
val args1 = reorderArgs(tp.paramNames, namedArgs.asInstanceOf[List[NamedArg]], otherArgs)
TypeApply(tycon, args1).withSpan(tree.span).withType(tree.tpe)
case _ =>
tree
}
}
private object dropInlines extends TreeMap {
override def transform(tree: Tree)(implicit ctx: Context): Tree = tree match {
case Inlined(call, _, _) =>
cpy.Inlined(tree)(call, Nil, Typed(ref(defn.Predef_undefined), TypeTree(tree.tpe)).withSpan(tree.span))
case _ => super.transform(tree)
}
}
override def transform(tree: Tree)(implicit ctx: Context): Tree =
try tree match {
case tree: Ident if !tree.isType =>
tree.tpe match {
case tpe: ThisType => This(tpe.cls).withSpan(tree.span)
case _ => tree
}
case tree @ Select(qual, name) =>
if (name.isTypeName) {
Checking.checkRealizable(qual.tpe, qual.posd)
super.transform(tree)(using ctx.addMode(Mode.Type))
}
else
transformSelect(tree, Nil)
case tree: Apply =>
val methType = tree.fun.tpe.widen
val app =
if (methType.isErasedMethod)
tpd.cpy.Apply(tree)(
tree.fun,
tree.args.mapConserve(arg =>
if (methType.isImplicitMethod && arg.span.isSynthetic) ref(defn.Predef_undefined)
else dropInlines.transform(arg)))
else
tree
def app1 =
// reverse order of transforming args and fun. This way, we get a chance to see other
// well-formedness errors before reporting errors in possible inferred type args of fun.
val args1 = transform(app.args)
cpy.Apply(app)(transform(app.fun), args1)
methPart(app) match
case Select(nu: New, nme.CONSTRUCTOR) if isCheckable(nu) =>
// need to check instantiability here, because the type of the New itself
// might be a type constructor.
Checking.checkInstantiable(tree.tpe, nu.posd)
withNoCheckNews(nu :: Nil)(app1)
case _ =>
app1
case UnApply(fun, implicits, patterns) =>
// Reverse transform order for the same reason as in `app1` above.
val patterns1 = transform(patterns)
cpy.UnApply(tree)(transform(fun), transform(implicits), patterns1)
case tree: TypeApply =>
val tree1 @ TypeApply(fn, args) = normalizeTypeArgs(tree)
args.foreach(checkInferredWellFormed)
if (fn.symbol != defn.ChildAnnot.primaryConstructor)
// Make an exception for ChildAnnot, which should really have AnyKind bounds
Checking.checkBounds(args, fn.tpe.widen.asInstanceOf[PolyType])
fn match {
case sel: Select =>
val args1 = transform(args)
val sel1 = transformSelect(sel, args1)
cpy.TypeApply(tree1)(sel1, args1)
case _ =>
super.transform(tree1)
}
case Inlined(call, bindings, expansion) if !call.isEmpty =>
val pos = call.sourcePos
val callTrace = Inliner.inlineCallTrace(call.symbol, pos)(using ctx.withSource(pos.source))
cpy.Inlined(tree)(callTrace, transformSub(bindings), transform(expansion)(using inlineContext(call)))
case templ: Template =>
withNoCheckNews(templ.parents.flatMap(newPart)) {
Checking.checkEnumParentOK(templ.symbol.owner)
forwardParamAccessors(templ)
synthMbr.addSyntheticMembers(
superAcc.wrapTemplate(templ)(
super.transform(_).asInstanceOf[Template]))
}
case tree: ValDef =>
val tree1 = cpy.ValDef(tree)(rhs = normalizeErasedRhs(tree.rhs, tree.symbol))
processValOrDefDef(super.transform(tree1))
case tree: DefDef =>
annotateContextResults(tree)
val tree1 = cpy.DefDef(tree)(rhs = normalizeErasedRhs(tree.rhs, tree.symbol))
processValOrDefDef(superAcc.wrapDefDef(tree1)(super.transform(tree1).asInstanceOf[DefDef]))
case tree: TypeDef =>
val sym = tree.symbol
if (sym.isClass)
VarianceChecker.check(tree)
// Add SourceFile annotation to top-level classes
if sym.owner.is(Package)
&& ctx.compilationUnit.source.exists
&& sym != defn.SourceFileAnnot
then
sym.addAnnotation(Annotation.makeSourceFile(ctx.compilationUnit.source.file.path))
else (tree.rhs, sym.info) match
case (rhs: LambdaTypeTree, bounds: TypeBounds) =>
VarianceChecker.checkLambda(rhs, bounds)
case _ =>
processMemberDef(super.transform(tree))
case tree: New if isCheckable(tree) =>
Checking.checkInstantiable(tree.tpe, tree.posd)
super.transform(tree)
case tree: Closure if !tree.tpt.isEmpty =>
Checking.checkRealizable(tree.tpt.tpe, tree.posd, "SAM type")
super.transform(tree)
case tree @ Annotated(annotated, annot) =>
cpy.Annotated(tree)(transform(annotated), transformAnnot(annot))
case tree: AppliedTypeTree =>
if (tree.tpt.symbol == defn.andType)
Checking.checkNonCyclicInherited(tree.tpe, tree.args.tpes, EmptyScope, tree.posd)
// Ideally, this should be done by Typer, but we run into cyclic references
// when trying to typecheck self types which are intersections.
else if (tree.tpt.symbol == defn.orType)
() // nothing to do
else
Checking.checkAppliedType(tree)
super.transform(tree)
case SingletonTypeTree(ref) =>
Checking.checkRealizable(ref.tpe, ref.posd)
super.transform(tree)
case tree: TypeTree =>
tree.withType(
tree.tpe match {
case AnnotatedType(tpe, annot) => AnnotatedType(tpe, transformAnnot(annot))
case tpe => tpe
}
)
case Import(expr, selectors) =>
val exprTpe = expr.tpe
val seen = mutable.Set.empty[Name]
def checkIdent(sel: untpd.ImportSelector): Unit =
if !exprTpe.member(sel.name).exists && !exprTpe.member(sel.name.toTypeName).exists then
ctx.error(NotAMember(exprTpe, sel.name, "value"), sel.imported.sourcePos)
if seen.contains(sel.name) then
ctx.error(ImportRenamedTwice(sel.imported), sel.imported.sourcePos)
seen += sel.name
for sel <- selectors do
if !sel.isWildcard then checkIdent(sel)
super.transform(tree)
case Typed(Ident(nme.WILDCARD), _) =>
super.transform(tree)(using ctx.addMode(Mode.Pattern))
// The added mode signals that bounds in a pattern need not
// conform to selector bounds. I.e. assume
// type Tree[T >: Null <: Type]
// One is still allowed to write
// case x: Tree[?]
// (which translates to)
// case x: (_: Tree[?])
case m @ MatchTypeTree(bounds, selector, cases) =>
// Analog to the case above for match types
def tranformIgnoringBoundsCheck(x: CaseDef): CaseDef =
super.transform(x)(using ctx.addMode(Mode.Pattern)).asInstanceOf[CaseDef]
cpy.MatchTypeTree(tree)(
super.transform(bounds),
super.transform(selector),
cases.mapConserve(tranformIgnoringBoundsCheck)
)
case tree =>
super.transform(tree)
}
catch {
case ex : AssertionError =>
println(i"error while transforming $tree")
throw ex
}
/** Transforms the rhs tree into a its default tree if it is in an `erased` val/def.
* Performed to shrink the tree that is known to be erased later.
*/
private def normalizeErasedRhs(rhs: Tree, sym: Symbol)(implicit ctx: Context) =
if (sym.isEffectivelyErased) dropInlines.transform(rhs) else rhs
}
}
| som-snytt/dotty | compiler/src/dotty/tools/dotc/transform/PostTyper.scala | Scala | apache-2.0 | 16,606 |
package org.jetbrains.plugins.scala.lang.autoImport
import com.intellij.psi.PsiElement
import org.intellij.lang.annotations.Language
import org.jetbrains.plugins.scala.ScalaFileType
import org.jetbrains.plugins.scala.autoImport.ImportOrderings._
import org.jetbrains.plugins.scala.base.ScalaLightCodeInsightFixtureTestAdapter
import org.jetbrains.plugins.scala.lang.psi.api.base.ScReference
import org.jetbrains.plugins.scala.util.PsiSelectionUtil
class AutoImportSortingTest extends ScalaLightCodeInsightFixtureTestAdapter with PsiSelectionUtil {
import org.junit.Assert._
def check(@Language("Scala") code: String, refPath: NamedElementPath, localOrdering: PsiElement => Ordering[String], possibilities: Seq[String]): Unit = {
val file = myFixture.configureByText(ScalaFileType.INSTANCE, code)
val ref = selectElement[ScReference](file, refPath)
// reverse them to make the input different from the result
val imports = possibilities.reverse
val ordering = localOrdering(ref)
val result = imports.sorted(ordering)
assertEquals(possibilities.mkString("\n"), result.mkString("\n"))
}
val alphabeticalSort: PsiElement => Ordering[String] = _ => orderingByPackageName
val packageDistSort: PsiElement => Ordering[String] = e => orderingByDistanceToLocalImports(e) orElse specialPackageOrdering orElse orderingByPackageName
def test_alphabetical_sorting(): Unit = check(
"""
|new Ref
""".stripMargin,
path("Ref"),
alphabeticalSort,
Seq(
"com.test.Ref",
"com.test.inner.Ref",
"com.xxx.Ref",
"com.xxx.y.Ref"
)
)
def test_package_dist_sorting(): Unit = check(
"""
|package com.my.here
|
|import abc.test.last.SomethingElse
|
|object Obj {
| import zzz.zzz.SomethingElse2
|
| new Ref
|}
""".stripMargin,
path("Obj", "Ref"),
packageDistSort,
Seq(
"com.my.here.Ref", // dist 0 to com.my.here and is inner+curpack
"com.my.here.inner.Ref", // dist 1 to com.my.here and is inner+curpack
"com.my.Ref", // dist 1 to com.my.here and is curpack
"abc.test.last.Ref", // dist 0 to abc.test.last
"zzz.zzz.Ref", // dist 0 to zzz.zzz
"abc.test.last.innerA.Ref", // dist 1 to abc.test.last and is inner
"abc.test.last.innerB.Ref", // dist 1 to abc.test.last and is inner
"zzz.zzz.a.Ref", // dist 1 to zzz.zzz and is inner
"abc.test.Ref", // dist 1 to abc.test.last
"zzz.zzz.a.b.Ref", // dist 2 to zzz.zzz and is inner
"abc.test.innerA.Ref", // dist 2 to abc.test.last
"abc.test.innerB.Ref", // dist 2 to abc.test.last
"zzz.zzz.a.b.c.Ref", // dist 3 to zzz.zzz
"abc.testa.Ref", // unrelated
"abc.testb.Ref", // unrelated
"abc.testc.Ref", // unrelated
"abc.unrelated.Ref" // unrelated
)
)
}
| JetBrains/intellij-scala | scala/scala-impl/test/org/jetbrains/plugins/scala/lang/autoImport/AutoImportSortingTest.scala | Scala | apache-2.0 | 2,993 |
//
// Scaled - a scalable editor extensible via JVM languages
// http://github.com/scaled/scaled/blob/master/LICENSE
package scaled.impl
import org.junit.Assert._
import org.junit._
import scaled._
import scaled.util.Properties
class ConfigImplTest {
object TestConfig extends Config.Defs(true) {
@Var("The default width of editor views, in characters.")
val viewWidth = key(100)
@Var("The default height of editor views, in characters.")
val viewHeight = key(40)
@Var("The number of entries retained by the kill ring.")
val killRingSize = key(40)
}
@Test def testReadInit () {
val impl = new ConfigImpl("editor", TestConfig :: Nil, None)
val props = Seq("# Scaled editor config",
"", "# View width", "view-width: 15",
"", "# View height", "view-height: 25")
Properties.read(TestData.log, "test", props)(impl.init(TestData.log))
assertEquals(15, impl(TestConfig.viewWidth))
assertEquals(25, impl(TestConfig.viewHeight))
}
@Test def testWrite () {
val impl = new ConfigImpl("editor", TestConfig :: Nil, None)
val allDefaults = Seq(
"", "# The number of entries retained by the kill ring.", "# kill-ring-size: 40",
"", "# The default height of editor views, in characters.", "# view-height: 40",
"", "# The default width of editor views, in characters.", "# view-width: 100")
assertTrue(impl.toProperties containsSlice allDefaults)
impl(TestConfig.viewWidth) = 15
val viewWidthChanged = Seq(
"", "# The number of entries retained by the kill ring.", "# kill-ring-size: 40",
"", "# The default height of editor views, in characters.", "# view-height: 40",
"", "# The default width of editor views, in characters.", "view-width: 15")
assertTrue(impl.toProperties containsSlice viewWidthChanged)
}
}
| swhgoon/scaled | editor/src/test/scala/scaled/impl/ConfigImplTest.scala | Scala | bsd-3-clause | 1,859 |
package org.jetbrains.plugins.scala
package codeInsight
package intention
package booleans
import com.intellij.testFramework.EditorTestUtil
/**
* @author Ksenia.Sautina
* @since 4/20/12
*/
class ReplaceEqualsOrEqualityInInfixExprIntentionTest extends intentions.ScalaIntentionTestBase {
import EditorTestUtil.{CARET_TAG => CARET}
override def familyName = ScalaCodeInsightBundle.message("family.name.replace.equals.or.equality.in.infix.expression")
def testReplaceQuality(): Unit = {
val text = s"if (a ==$CARET b) return"
val resultText = s"if (a ${CARET}equals b) return"
doTest(text, resultText)
}
def testReplaceQuality2(): Unit = {
val text = s"if (a ${CARET}equals false) return"
val resultText = s"if (a $CARET== false) return"
doTest(text, resultText)
}
}
| JetBrains/intellij-scala | scala/codeInsight/test/org/jetbrains/plugins/scala/codeInsight/intention/booleans/ReplaceEqualsOrEqualityInInfixExprIntentionTest.scala | Scala | apache-2.0 | 812 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive.execution
import scala.collection.JavaConverters._
import org.apache.hadoop.fs.{FileStatus, Path}
import org.apache.hadoop.hive.ql.exec.Utilities
import org.apache.hadoop.hive.ql.io.{HiveFileFormatUtils, HiveOutputFormat}
import org.apache.hadoop.hive.serde2.Serializer
import org.apache.hadoop.hive.serde2.objectinspector.{ObjectInspectorUtils, StructObjectInspector}
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils.ObjectInspectorCopyOption
import org.apache.hadoop.io.Writable
import org.apache.hadoop.mapred.{JobConf, Reporter}
import org.apache.hadoop.mapreduce.{Job, TaskAttemptContext}
import org.apache.spark.internal.Logging
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.execution.datasources.{FileFormat, OutputWriter, OutputWriterFactory}
import org.apache.spark.sql.hive.{HiveInspectors, HiveTableUtil}
import org.apache.spark.sql.hive.HiveShim.{ShimFileSinkDesc => FileSinkDesc}
import org.apache.spark.sql.sources.DataSourceRegister
import org.apache.spark.sql.types.StructType
import org.apache.spark.util.SerializableJobConf
/**
* `FileFormat` for writing Hive tables.
*
* TODO: implement the read logic.
*/
class HiveFileFormat(fileSinkConf: FileSinkDesc)
extends FileFormat with DataSourceRegister with Logging {
def this() = this(null)
override def shortName(): String = "hive"
override def inferSchema(
sparkSession: SparkSession,
options: Map[String, String],
files: Seq[FileStatus]): Option[StructType] = {
throw new UnsupportedOperationException(s"inferSchema is not supported for hive data source.")
}
override def prepareWrite(
sparkSession: SparkSession,
job: Job,
options: Map[String, String],
dataSchema: StructType): OutputWriterFactory = {
val conf = job.getConfiguration
val tableDesc = fileSinkConf.getTableInfo
conf.set("mapred.output.format.class", tableDesc.getOutputFileFormatClassName)
// When speculation is on and output committer class name contains "Direct", we should warn
// users that they may loss data if they are using a direct output committer.
val speculationEnabled = sparkSession.sparkContext.conf.getBoolean("spark.speculation", false)
val outputCommitterClass = conf.get("mapred.output.committer.class", "")
if (speculationEnabled && outputCommitterClass.contains("Direct")) {
val warningMessage =
s"$outputCommitterClass may be an output committer that writes data directly to " +
"the final location. Because speculation is enabled, this output committer may " +
"cause data loss (see the case in SPARK-10063). If possible, please use an output " +
"committer that does not have this behavior (e.g. FileOutputCommitter)."
logWarning(warningMessage)
}
// Add table properties from storage handler to hadoopConf, so any custom storage
// handler settings can be set to hadoopConf
HiveTableUtil.configureJobPropertiesForStorageHandler(tableDesc, conf, false)
Utilities.copyTableJobPropertiesToConf(tableDesc, conf)
// Avoid referencing the outer object.
val fileSinkConfSer = fileSinkConf
new OutputWriterFactory {
private val jobConf = new SerializableJobConf(new JobConf(conf))
@transient private lazy val outputFormat =
jobConf.value.getOutputFormat.asInstanceOf[HiveOutputFormat[AnyRef, Writable]]
override def getFileExtension(context: TaskAttemptContext): String = {
Utilities.getFileExtension(jobConf.value, fileSinkConfSer.getCompressed, outputFormat)
}
override def newInstance(
path: String,
dataSchema: StructType,
context: TaskAttemptContext): OutputWriter = {
new HiveOutputWriter(path, fileSinkConfSer, jobConf.value, dataSchema)
}
}
}
}
class HiveOutputWriter(
path: String,
fileSinkConf: FileSinkDesc,
jobConf: JobConf,
dataSchema: StructType) extends OutputWriter with HiveInspectors {
private def tableDesc = fileSinkConf.getTableInfo
private val serializer = {
val serializer = tableDesc.getDeserializerClass.newInstance().asInstanceOf[Serializer]
serializer.initialize(jobConf, tableDesc.getProperties)
serializer
}
private val hiveWriter = HiveFileFormatUtils.getHiveRecordWriter(
jobConf,
tableDesc,
serializer.getSerializedClass,
fileSinkConf,
new Path(path),
Reporter.NULL)
private val standardOI = ObjectInspectorUtils
.getStandardObjectInspector(
tableDesc.getDeserializer(jobConf).getObjectInspector,
ObjectInspectorCopyOption.JAVA)
.asInstanceOf[StructObjectInspector]
private val fieldOIs =
standardOI.getAllStructFieldRefs.asScala.map(_.getFieldObjectInspector).toArray
private val dataTypes = dataSchema.map(_.dataType).toArray
private val wrappers = fieldOIs.zip(dataTypes).map { case (f, dt) => wrapperFor(f, dt) }
private val outputData = new Array[Any](fieldOIs.length)
override def write(row: InternalRow): Unit = {
var i = 0
while (i < fieldOIs.length) {
outputData(i) = if (row.isNullAt(i)) null else wrappers(i)(row.get(i, dataTypes(i)))
i += 1
}
hiveWriter.write(serializer.serialize(outputData, standardOI))
}
override def close(): Unit = {
// Seems the boolean value passed into close does not matter.
hiveWriter.close(false)
}
}
| lxsmnv/spark | sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/HiveFileFormat.scala | Scala | apache-2.0 | 6,289 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.optimizer
import java.util
import scala.collection.JavaConverters._
import scala.collection.mutable.ArrayBuffer
import org.apache.spark.sql._
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.expressions.aggregate._
import org.apache.spark.sql.catalyst.optimizer.Optimizer
import org.apache.spark.sql.catalyst.plans.logical._
import org.apache.spark.sql.catalyst.rules.Rule
import org.apache.spark.sql.execution.RunnableCommand
import org.apache.spark.sql.execution.datasources.LogicalRelation
import org.apache.spark.sql.types.{IntegerType, StringType}
import org.apache.carbondata.common.logging.LogServiceFactory
import org.apache.carbondata.core.carbon.querystatistics.QueryStatistic
import org.apache.carbondata.core.util.CarbonTimeStatisticsFactory
import org.apache.carbondata.spark.CarbonFilters
/**
* Carbon Optimizer to add dictionary decoder.
*/
object CarbonOptimizer {
def optimizer(optimizer: Optimizer, conf: CarbonSQLConf, version: String): Optimizer = {
CodeGenerateFactory.getInstance().optimizerFactory.createOptimizer(optimizer, conf)
}
def execute(plan: LogicalPlan, optimizer: Optimizer): LogicalPlan = {
val executedPlan: LogicalPlan = optimizer.execute(plan)
val relations = CarbonOptimizer.collectCarbonRelation(plan)
if (relations.nonEmpty) {
new ResolveCarbonFunctions(relations).apply(executedPlan)
} else {
executedPlan
}
}
// get the carbon relation from plan.
def collectCarbonRelation(plan: LogicalPlan): Seq[CarbonDecoderRelation] = {
plan collect {
case l: LogicalRelation if l.relation.isInstanceOf[CarbonDatasourceRelation] =>
CarbonDecoderRelation(l.attributeMap, l.relation.asInstanceOf[CarbonDatasourceRelation])
}
}
}
/**
* It does two jobs. 1. Change the datatype for dictionary encoded column 2. Add the dictionary
* decoder plan.
*/
class ResolveCarbonFunctions(relations: Seq[CarbonDecoderRelation])
extends Rule[LogicalPlan] with PredicateHelper {
val LOGGER = LogServiceFactory.getLogService(this.getClass.getName)
def apply(plan: LogicalPlan): LogicalPlan = {
if (relations.nonEmpty && !isOptimized(plan)) {
LOGGER.info("Starting to optimize plan")
val recorder = CarbonTimeStatisticsFactory.createExecutorRecorder("");
val queryStatistic = new QueryStatistic()
val result = transformCarbonPlan(plan, relations)
queryStatistic.addStatistics("Time taken for Carbon Optimizer to optimize: ",
System.currentTimeMillis)
recorder.recordStatistics(queryStatistic)
recorder.logStatistics()
result
} else {
LOGGER.info("Skip CarbonOptimizer")
plan
}
}
def isOptimized(plan: LogicalPlan): Boolean = {
plan find {
case cd: CarbonDictionaryCatalystDecoder => true
case other => false
} isDefined
}
case class ExtraNodeInfo(var hasCarbonRelation: Boolean)
def fillNodeInfo(
plan: LogicalPlan,
extraNodeInfos: java.util.HashMap[LogicalPlan, ExtraNodeInfo]): ExtraNodeInfo = {
plan match {
case l: LogicalRelation if l.relation.isInstanceOf[CarbonDatasourceRelation] =>
val extraNodeInfo = ExtraNodeInfo(true)
extraNodeInfo
case others =>
val extraNodeInfo = ExtraNodeInfo(false)
others.children.foreach { childPlan =>
val childExtraNodeInfo = fillNodeInfo(childPlan, extraNodeInfos)
if (childExtraNodeInfo.hasCarbonRelation) {
extraNodeInfo.hasCarbonRelation = true
}
}
// only put no carbon realtion plan
if (!extraNodeInfo.hasCarbonRelation) {
extraNodeInfos.put(plan, extraNodeInfo)
}
extraNodeInfo
}
}
/**
* Steps for changing the plan.
* 1. It finds out the join condition columns and dimension aggregate columns which are need to
* be decoded just before that plan executes.
* 2. Plan starts transform by adding the decoder to the plan where it needs the decoded data
* like dimension aggregate columns decoder under aggregator and join condition decoder under
* join children.
*/
def transformCarbonPlan(plan: LogicalPlan,
relations: Seq[CarbonDecoderRelation]): LogicalPlan = {
if (plan.isInstanceOf[RunnableCommand]) {
return plan
}
var decoder = false
val mapOfNonCarbonPlanNodes = new java.util.HashMap[LogicalPlan, ExtraNodeInfo]
fillNodeInfo(plan, mapOfNonCarbonPlanNodes)
val aliasMap = CarbonAliasDecoderRelation()
// collect alias information before hand.
collectInformationOnAttributes(plan, aliasMap)
def hasCarbonRelation(currentPlan: LogicalPlan): Boolean = {
val extraNodeInfo = mapOfNonCarbonPlanNodes.get(currentPlan)
if (extraNodeInfo == null) {
true
} else {
extraNodeInfo.hasCarbonRelation
}
}
val attrMap = new util.HashMap[AttributeReferenceWrapper, CarbonDecoderRelation]()
relations.foreach(_.fillAttributeMap(attrMap))
def addTempDecoder(currentPlan: LogicalPlan): LogicalPlan = {
currentPlan match {
case sort: Sort if !sort.child.isInstanceOf[CarbonDictionaryTempDecoder] =>
val attrsOnSort = new util.HashSet[AttributeReferenceWrapper]()
sort.order.map { s =>
s.collect {
case attr: AttributeReference
if isDictionaryEncoded(attr, attrMap, aliasMap) =>
attrsOnSort.add(AttributeReferenceWrapper(aliasMap.getOrElse(attr, attr)))
}
}
var child = sort.child
if (attrsOnSort.size() > 0 && !child.isInstanceOf[Sort]) {
child = CarbonDictionaryTempDecoder(attrsOnSort,
new util.HashSet[AttributeReferenceWrapper](), sort.child)
}
if (!decoder) {
decoder = true
CarbonDictionaryTempDecoder(new util.HashSet[AttributeReferenceWrapper](),
new util.HashSet[AttributeReferenceWrapper](),
Sort(sort.order, sort.global, child),
isOuter = true)
} else {
Sort(sort.order, sort.global, child)
}
case union: Union
if !(union.left.isInstanceOf[CarbonDictionaryTempDecoder] ||
union.right.isInstanceOf[CarbonDictionaryTempDecoder]) =>
val leftCondAttrs = new util.HashSet[AttributeReferenceWrapper]
val rightCondAttrs = new util.HashSet[AttributeReferenceWrapper]
union.left.output.foreach(attr =>
leftCondAttrs.add(AttributeReferenceWrapper(aliasMap.getOrElse(attr, attr))))
union.right.output.foreach(attr =>
rightCondAttrs.add(AttributeReferenceWrapper(aliasMap.getOrElse(attr, attr))))
var leftPlan = union.left
var rightPlan = union.right
if (hasCarbonRelation(leftPlan) && leftCondAttrs.size() > 0 &&
!leftPlan.isInstanceOf[CarbonDictionaryCatalystDecoder]) {
leftPlan = CarbonDictionaryTempDecoder(leftCondAttrs,
new util.HashSet[AttributeReferenceWrapper](),
union.left)
}
if (hasCarbonRelation(rightPlan) && rightCondAttrs.size() > 0 &&
!rightPlan.isInstanceOf[CarbonDictionaryCatalystDecoder]) {
rightPlan = CarbonDictionaryTempDecoder(rightCondAttrs,
new util.HashSet[AttributeReferenceWrapper](),
union.right)
}
if (!decoder) {
decoder = true
CarbonDictionaryTempDecoder(new util.HashSet[AttributeReferenceWrapper](),
new util.HashSet[AttributeReferenceWrapper](),
Union(leftPlan, rightPlan),
isOuter = true)
} else {
Union(leftPlan, rightPlan)
}
case agg: Aggregate if !agg.child.isInstanceOf[CarbonDictionaryTempDecoder] =>
val attrsOndimAggs = new util.HashSet[AttributeReferenceWrapper]
agg.aggregateExpressions.map {
case attr: AttributeReference =>
case a@Alias(attr: AttributeReference, name) =>
case aggExp: AggregateExpression =>
aggExp.transform {
case aggExp: AggregateExpression =>
collectDimensionAggregates(aggExp, attrsOndimAggs, aliasMap, attrMap)
aggExp
}
case others =>
others.collect {
case attr: AttributeReference
if isDictionaryEncoded(attr, attrMap, aliasMap) =>
attrsOndimAggs.add(AttributeReferenceWrapper(aliasMap.getOrElse(attr, attr)))
}
}
var child = agg.child
// Incase if the child also aggregate then push down decoder to child
if (attrsOndimAggs.size() > 0 && !child.equals(agg)) {
child = CarbonDictionaryTempDecoder(attrsOndimAggs,
new util.HashSet[AttributeReferenceWrapper](),
agg.child)
}
if (!decoder) {
decoder = true
CarbonDictionaryTempDecoder(new util.HashSet[AttributeReferenceWrapper](),
new util.HashSet[AttributeReferenceWrapper](),
Aggregate(agg.groupingExpressions, agg.aggregateExpressions, child),
isOuter = true)
} else {
Aggregate(agg.groupingExpressions, agg.aggregateExpressions, child)
}
case expand: Expand if !expand.child.isInstanceOf[CarbonDictionaryTempDecoder] =>
val attrsOnExpand = new util.HashSet[AttributeReferenceWrapper]
expand.projections.map {s =>
s.map {
case attr: AttributeReference =>
case a@Alias(attr: AttributeReference, name) =>
case others =>
others.collect {
case attr: AttributeReference
if isDictionaryEncoded(attr, attrMap, aliasMap) =>
attrsOnExpand.add(AttributeReferenceWrapper(aliasMap.getOrElse(attr, attr)))
}
}
}
var child = expand.child
if (attrsOnExpand.size() > 0 && !child.isInstanceOf[Expand]) {
child = CarbonDictionaryTempDecoder(attrsOnExpand,
new util.HashSet[AttributeReferenceWrapper](),
expand.child)
}
if (!decoder) {
decoder = true
CarbonDictionaryTempDecoder(new util.HashSet[AttributeReferenceWrapper](),
new util.HashSet[AttributeReferenceWrapper](),
CodeGenerateFactory.getInstance().expandFactory.createExpand(expand, child),
isOuter = true)
} else {
CodeGenerateFactory.getInstance().expandFactory.createExpand(expand, child)
}
case filter: Filter if !filter.child.isInstanceOf[CarbonDictionaryTempDecoder] =>
val attrsOnConds = new util.HashSet[AttributeReferenceWrapper]
// In case the child is join then we cannot push down the filters so decode them earlier
if (filter.child.isInstanceOf[Join] || filter.child.isInstanceOf[Sort]) {
filter.condition.collect {
case attr: AttributeReference =>
attrsOnConds.add(AttributeReferenceWrapper(aliasMap.getOrElse(attr, attr)))
}
} else {
CarbonFilters
.selectFilters(splitConjunctivePredicates(filter.condition), attrsOnConds, aliasMap)
}
var child = filter.child
if (attrsOnConds.size() > 0 && !child.isInstanceOf[Filter]) {
child = CarbonDictionaryTempDecoder(attrsOnConds,
new util.HashSet[AttributeReferenceWrapper](),
filter.child)
}
if (!decoder) {
decoder = true
CarbonDictionaryTempDecoder(new util.HashSet[AttributeReferenceWrapper](),
new util.HashSet[AttributeReferenceWrapper](),
Filter(filter.condition, child),
isOuter = true)
} else {
Filter(filter.condition, child)
}
case j: Join
if !(j.left.isInstanceOf[CarbonDictionaryTempDecoder] ||
j.right.isInstanceOf[CarbonDictionaryTempDecoder]) =>
val attrsOnJoin = new util.HashSet[Attribute]
j.condition match {
case Some(expression) =>
expression.collect {
case attr: AttributeReference
if isDictionaryEncoded(attr, attrMap, aliasMap) =>
attrsOnJoin.add(aliasMap.getOrElse(attr, attr))
}
case _ =>
}
val leftCondAttrs = new util.HashSet[AttributeReferenceWrapper]
val rightCondAttrs = new util.HashSet[AttributeReferenceWrapper]
if (attrsOnJoin.size() > 0) {
attrsOnJoin.asScala.map { attr =>
if (qualifierPresence(j.left, attr)) {
leftCondAttrs.add(AttributeReferenceWrapper(attr))
}
if (qualifierPresence(j.right, attr)) {
rightCondAttrs.add(AttributeReferenceWrapper(attr))
}
}
var leftPlan = j.left
var rightPlan = j.right
if (leftCondAttrs.size() > 0 &&
!leftPlan.isInstanceOf[CarbonDictionaryCatalystDecoder]) {
leftPlan = CarbonDictionaryTempDecoder(leftCondAttrs,
new util.HashSet[AttributeReferenceWrapper](),
j.left)
}
if (rightCondAttrs.size() > 0 &&
!rightPlan.isInstanceOf[CarbonDictionaryCatalystDecoder]) {
rightPlan = CarbonDictionaryTempDecoder(rightCondAttrs,
new util.HashSet[AttributeReferenceWrapper](),
j.right)
}
if (!decoder) {
decoder = true
CarbonDictionaryTempDecoder(new util.HashSet[AttributeReferenceWrapper](),
new util.HashSet[AttributeReferenceWrapper](),
Join(leftPlan, rightPlan, j.joinType, j.condition),
isOuter = true)
} else {
Join(leftPlan, rightPlan, j.joinType, j.condition)
}
} else {
j
}
case p: Project
if relations.nonEmpty && !p.child.isInstanceOf[CarbonDictionaryTempDecoder] =>
val attrsOnProjects = new util.HashSet[AttributeReferenceWrapper]
p.projectList.map {
case attr: AttributeReference =>
case a@Alias(attr: AttributeReference, name) =>
case others =>
others.collect {
case attr: AttributeReference
if isDictionaryEncoded(attr, attrMap, aliasMap) =>
attrsOnProjects.add(AttributeReferenceWrapper(aliasMap.getOrElse(attr, attr)))
}
}
var child = p.child
if (attrsOnProjects.size() > 0 && !child.isInstanceOf[Project]) {
child = CarbonDictionaryTempDecoder(attrsOnProjects,
new util.HashSet[AttributeReferenceWrapper](),
p.child)
}
if (!decoder) {
decoder = true
CarbonDictionaryTempDecoder(new util.HashSet[AttributeReferenceWrapper](),
new util.HashSet[AttributeReferenceWrapper](),
Project(p.projectList, child),
isOuter = true)
} else {
Project(p.projectList, child)
}
case wd: Window if !wd.child.isInstanceOf[CarbonDictionaryTempDecoder] =>
val attrsOnProjects = new util.HashSet[AttributeReferenceWrapper]
wd.projectList.map {
case attr: AttributeReference =>
case others =>
others.collect {
case attr: AttributeReference
if isDictionaryEncoded(attr, attrMap, aliasMap) =>
attrsOnProjects.add(AttributeReferenceWrapper(aliasMap.getOrElse(attr, attr)))
}
}
wd.windowExpressions.map {
case others =>
others.collect {
case attr: AttributeReference
if isDictionaryEncoded(attr, attrMap, aliasMap) =>
attrsOnProjects.add(AttributeReferenceWrapper(aliasMap.getOrElse(attr, attr)))
}
}
wd.partitionSpec.map{
case attr: AttributeReference =>
case others =>
others.collect {
case attr: AttributeReference
if isDictionaryEncoded(attr, attrMap, aliasMap) =>
attrsOnProjects.add(AttributeReferenceWrapper(aliasMap.getOrElse(attr, attr)))
}
}
wd.orderSpec.map { s =>
s.collect {
case attr: AttributeReference
if isDictionaryEncoded(attr, attrMap, aliasMap) =>
attrsOnProjects.add(AttributeReferenceWrapper(aliasMap.getOrElse(attr, attr)))
}
}
wd.partitionSpec.map { s =>
s.collect {
case attr: AttributeReference
if isDictionaryEncoded(attr, attrMap, aliasMap) =>
attrsOnProjects.add(AttributeReferenceWrapper(aliasMap.getOrElse(attr, attr)))
}
}
var child = wd.child
if (attrsOnProjects.size() > 0 && !child.isInstanceOf[Project]) {
child = CarbonDictionaryTempDecoder(attrsOnProjects,
new util.HashSet[AttributeReferenceWrapper](),
wd.child)
}
if (!decoder) {
decoder = true
CarbonDictionaryTempDecoder(new util.HashSet[AttributeReferenceWrapper](),
new util.HashSet[AttributeReferenceWrapper](),
Window(wd.projectList, wd.windowExpressions, wd.partitionSpec, wd.orderSpec, child),
isOuter = true)
} else {
Window(wd.projectList, wd.windowExpressions, wd.partitionSpec, wd.orderSpec, child)
}
case l: LogicalRelation if l.relation.isInstanceOf[CarbonDatasourceRelation] =>
if (!decoder) {
decoder = true
CarbonDictionaryTempDecoder(new util.HashSet[AttributeReferenceWrapper](),
new util.HashSet[AttributeReferenceWrapper](), l, isOuter = true)
} else {
l
}
case others => others
}
}
val transFormedPlan =
plan transformDown {
case cd: CarbonDictionaryTempDecoder if cd.isOuter =>
decoder = true
cd
case currentPlan =>
hasCarbonRelation(currentPlan) match {
case true => addTempDecoder(currentPlan)
case false => currentPlan
}
}
val processor = new CarbonDecoderProcessor
processor.updateDecoders(processor.getDecoderList(transFormedPlan))
updateProjection(updateTempDecoder(transFormedPlan, aliasMap, attrMap))
}
private def updateTempDecoder(plan: LogicalPlan,
aliasMap: CarbonAliasDecoderRelation,
attrMap: java.util.HashMap[AttributeReferenceWrapper, CarbonDecoderRelation]):
LogicalPlan = {
var allAttrsNotDecode: util.Set[AttributeReferenceWrapper] =
new util.HashSet[AttributeReferenceWrapper]()
val marker = new CarbonPlanMarker
plan transformDown {
case cd: CarbonDictionaryTempDecoder if !cd.processed =>
cd.processed = true
allAttrsNotDecode = cd.attrsNotDecode
marker.pushMarker(allAttrsNotDecode)
if (cd.isOuter) {
CarbonDictionaryCatalystDecoder(relations,
ExcludeProfile(cd.getAttrsNotDecode.asScala.toSeq),
aliasMap,
isOuter = true,
cd.child)
} else {
CarbonDictionaryCatalystDecoder(relations,
IncludeProfile(cd.getAttrList.asScala.toSeq),
aliasMap,
isOuter = false,
cd.child)
}
case cd: CarbonDictionaryCatalystDecoder =>
cd
case sort: Sort =>
val sortExprs = sort.order.map { s =>
s.transform {
case attr: AttributeReference =>
updateDataType(attr, attrMap, allAttrsNotDecode, aliasMap)
}.asInstanceOf[SortOrder]
}
Sort(sortExprs, sort.global, sort.child)
case agg: Aggregate if !agg.child.isInstanceOf[CarbonDictionaryCatalystDecoder] =>
val aggExps = agg.aggregateExpressions.map { aggExp =>
aggExp.transform {
case attr: AttributeReference =>
updateDataType(attr, attrMap, allAttrsNotDecode, aliasMap)
}
}.asInstanceOf[Seq[NamedExpression]]
val grpExps = agg.groupingExpressions.map { gexp =>
gexp.transform {
case attr: AttributeReference =>
updateDataType(attr, attrMap, allAttrsNotDecode, aliasMap)
}
}
Aggregate(grpExps, aggExps, agg.child)
case expand: Expand =>
expand.transformExpressions {
case attr: AttributeReference =>
updateDataType(attr, attrMap, allAttrsNotDecode, aliasMap)
}
case filter: Filter =>
val filterExps = filter.condition transform {
case attr: AttributeReference =>
updateDataType(attr, attrMap, allAttrsNotDecode, aliasMap)
}
Filter(filterExps, filter.child)
case j: Join =>
marker.pushBinaryMarker(allAttrsNotDecode)
j
case u: Union =>
marker.pushBinaryMarker(allAttrsNotDecode)
u
case p: Project if relations.nonEmpty =>
val prExps = p.projectList.map { prExp =>
prExp.transform {
case attr: AttributeReference =>
updateDataType(attr, attrMap, allAttrsNotDecode, aliasMap)
}
}.asInstanceOf[Seq[NamedExpression]]
Project(prExps, p.child)
case wd: Window if relations.nonEmpty =>
val prExps = wd.projectList.map { prExp =>
prExp.transform {
case attr: AttributeReference =>
updateDataType(attr, attrMap, allAttrsNotDecode, aliasMap)
}
}.asInstanceOf[Seq[Attribute]]
val wdExps = wd.windowExpressions.map { gexp =>
gexp.transform {
case attr: AttributeReference =>
updateDataType(attr, attrMap, allAttrsNotDecode, aliasMap)
}
}.asInstanceOf[Seq[NamedExpression]]
val partitionSpec = wd.partitionSpec.map{ exp =>
exp.transform {
case attr: AttributeReference =>
updateDataType(attr, attrMap, allAttrsNotDecode, aliasMap)
}
}
val orderSpec = wd.orderSpec.map { exp =>
exp.transform {
case attr: AttributeReference =>
updateDataType(attr, attrMap, allAttrsNotDecode, aliasMap)
}
}.asInstanceOf[Seq[SortOrder]]
Window(prExps, wdExps, partitionSpec, orderSpec, wd.child)
case l: LogicalRelation if l.relation.isInstanceOf[CarbonDatasourceRelation] =>
allAttrsNotDecode = marker.revokeJoin()
l
case others => others
}
}
private def updateProjection(plan: LogicalPlan): LogicalPlan = {
val transFormedPlan = plan transform {
case p@Project(projectList: Seq[NamedExpression], cd: CarbonDictionaryCatalystDecoder) =>
if (cd.child.isInstanceOf[Filter] || cd.child.isInstanceOf[LogicalRelation]) {
Project(projectList: Seq[NamedExpression], cd.child)
} else {
p
}
case f@Filter(condition: Expression, cd: CarbonDictionaryCatalystDecoder) =>
if (cd.child.isInstanceOf[Project] || cd.child.isInstanceOf[LogicalRelation]) {
Filter(condition, cd.child)
} else {
f
}
}
// Remove unnecessary decoders
val finalPlan = transFormedPlan transform {
case CarbonDictionaryCatalystDecoder(_, profile, _, false, child)
if profile.isInstanceOf[IncludeProfile] && profile.isEmpty =>
child
}
finalPlan
}
private def collectInformationOnAttributes(plan: LogicalPlan,
aliasMap: CarbonAliasDecoderRelation) {
plan transformAllExpressions {
case a@Alias(exp, name) =>
exp match {
case attr: Attribute => aliasMap.put(a.toAttribute, attr)
case _ => aliasMap.put(a.toAttribute, new AttributeReference("", StringType)())
}
a
}
// collect the output of expand and add projections attributes as alias to it.
plan.collect {
case expand: Expand =>
expand.projections.foreach {s =>
s.zipWithIndex.foreach { f =>
f._1 match {
case attr: AttributeReference =>
aliasMap.put(expand.output(f._2).toAttribute, attr)
case a@Alias(attr: AttributeReference, name) =>
aliasMap.put(expand.output(f._2).toAttribute, attr)
case others =>
}
}
}
}
}
// Collect aggregates on dimensions so that we can add decoder to it.
private def collectDimensionAggregates(aggExp: AggregateExpression,
attrsOndimAggs: util.HashSet[AttributeReferenceWrapper],
aliasMap: CarbonAliasDecoderRelation,
attrMap: java.util.HashMap[AttributeReferenceWrapper, CarbonDecoderRelation]) {
aggExp collect {
case attr: AttributeReference if isDictionaryEncoded(attr, attrMap, aliasMap) =>
attrsOndimAggs.add(AttributeReferenceWrapper(aliasMap.getOrElse(attr, attr)))
}
}
/**
* Update the attribute datatype with [IntegerType] if the carbon column is encoded with
* dictionary.
*
*/
private def updateDataType(attr: Attribute,
attrMap: java.util.HashMap[AttributeReferenceWrapper, CarbonDecoderRelation],
allAttrsNotDecode: java.util.Set[AttributeReferenceWrapper],
aliasMap: CarbonAliasDecoderRelation): Attribute = {
val uAttr = aliasMap.getOrElse(attr, attr)
val relation = Option(attrMap.get(AttributeReferenceWrapper(uAttr)))
if (relation.isDefined) {
relation.get.dictionaryMap.get(uAttr.name) match {
case Some(true)
if !allAttrsNotDecode.contains(AttributeReferenceWrapper(uAttr)) =>
val newAttr = AttributeReference(attr.name,
IntegerType,
attr.nullable,
attr.metadata)(attr.exprId, attr.qualifiers)
newAttr
case _ => attr
}
} else {
attr
}
}
private def isDictionaryEncoded(attr: Attribute,
attrMap: java.util.HashMap[AttributeReferenceWrapper, CarbonDecoderRelation],
aliasMap: CarbonAliasDecoderRelation): Boolean = {
val uAttr = aliasMap.getOrElse(attr, attr)
val relation = Option(attrMap.get(AttributeReferenceWrapper(uAttr)))
if (relation.isDefined) {
relation.get.dictionaryMap.get(uAttr.name) match {
case Some(true) => true
case _ => false
}
} else {
false
}
}
def qualifierPresence(plan: LogicalPlan, attr: Attribute): Boolean = {
var present = false
plan collect {
case l: LogicalRelation if l.attributeMap.contains(attr) =>
present = true
}
present
}
}
case class CarbonDecoderRelation(
attributeMap: AttributeMap[AttributeReference],
carbonRelation: CarbonDatasourceRelation) {
val extraAttrs = new ArrayBuffer[Attribute]()
def addAttribute(attr: Attribute): Unit = {
extraAttrs += attr
}
def contains(attr: Attribute): Boolean = {
val exists =
attributeMap.exists(entry => entry._1.name.equalsIgnoreCase(attr.name) &&
entry._1.exprId.equals(attr.exprId)) ||
extraAttrs.exists(entry => entry.name.equalsIgnoreCase(attr.name) &&
entry.exprId.equals(attr.exprId))
exists
}
def fillAttributeMap(attrMap: java.util.HashMap[AttributeReferenceWrapper,
CarbonDecoderRelation]): Unit = {
attributeMap.foreach { attr =>
attrMap.put(AttributeReferenceWrapper(attr._1), this)
}
}
lazy val dictionaryMap = carbonRelation.carbonRelation.metaData.dictionaryMap
}
case class CarbonAliasDecoderRelation() {
val attrMap = new java.util.HashMap[AttributeReferenceWrapper, Attribute]
def put(key: Attribute, value: Attribute): Unit = {
attrMap.put(AttributeReferenceWrapper(key), value)
}
def getOrElse(key: Attribute, default: Attribute): Attribute = {
val value = attrMap.get(AttributeReferenceWrapper(key))
if (value == null) {
default
} else {
if (value.equals(key)) {
value
} else {
getOrElse(value, value)
}
}
}
}
| bill1208/incubator-carbondata | integration/spark/src/main/scala/org/apache/spark/sql/optimizer/CarbonOptimizer.scala | Scala | apache-2.0 | 29,576 |
/*
* Copyright (c) 2012, The Broad Institute
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use,
* copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following
* conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
package org.broadinstitute.sting.queue.engine
import org.broadinstitute.sting.queue.function.QFunction
import java.io.{StringWriter, PrintWriter}
import org.broadinstitute.sting.queue.util.Logging
import org.broadinstitute.sting.utils.io.IOUtils
import org.apache.commons.io.FileUtils
import org.apache.commons.lang.StringUtils
/**
* An edge in the QGraph that runs a QFunction.
* The edge is created first to determine inter-node dependencies,
* and then the runner is specified later when the time comes to
* execute the function in the edge.
*/
class FunctionEdge(val function: QFunction, val inputs: QNode, val outputs: QNode) extends QEdge with Logging {
var runner: JobRunner[_] =_
/**
* The number of times this edge has been run.
*/
var retries = 0
/**
* The depth of this edge in the graph.
*/
var depth = -1
val myRunInfo: JobRunInfo = JobRunInfo.default // purely for dryRun testing
/**
* When using reset status this variable tracks the old status
*/
var resetFromStatus: RunnerStatus.Value = null
/**
* Initializes with the current status of the function.
*/
private var currentStatus = {
if (function.isFail)
RunnerStatus.FAILED
else if (function.isDone)
RunnerStatus.DONE
else
RunnerStatus.PENDING
}
def start() {
try {
if (logger.isDebugEnabled) {
logger.debug("Starting: " + function.commandDirectory + " > " + function.description)
} else {
logger.info("Starting: " + function.description)
}
logger.info("Output written to " + function.jobOutputFile)
if (function.jobErrorFile != null)
logger.info("Errors written to " + function.jobErrorFile)
function.deleteLogs()
function.deleteOutputs()
function.mkOutputDirectories()
runner.init()
runner.start()
} catch {
case e =>
currentStatus = RunnerStatus.FAILED
try {
runner.cleanup()
function.failOutputs.foreach(_.createNewFile())
writeStackTrace(e)
} catch {
case _ => /* ignore errors in the exception handler */
}
logger.error("Error: " + function.description, e)
}
}
/**
* Returns the current status of the edge.
*/
def status = {
if (currentStatus == RunnerStatus.PENDING || currentStatus == RunnerStatus.RUNNING) {
if (runner != null) {
try {
currentStatus = runner.status
if (currentStatus == RunnerStatus.FAILED) {
try {
runner.cleanup()
function.failOutputs.foreach(_.createNewFile())
} catch {
case _ => /* ignore errors in the error handler */
}
logger.error("Error: " + function.description)
tailError()
} else if (currentStatus == RunnerStatus.DONE) {
try {
runner.cleanup()
function.doneOutputs.foreach(_.createNewFile())
} catch {
case _ => /* ignore errors in the done handler */
}
logger.info("Done: " + function.description)
}
} catch {
case e =>
currentStatus = RunnerStatus.FAILED
try {
runner.cleanup()
function.failOutputs.foreach(_.createNewFile())
writeStackTrace(e)
} catch {
case _ => /* ignore errors in the exception handler */
}
logger.error("Error retrieving status: " + function.description, e)
}
}
}
currentStatus
}
/**
* Explicitly sets the status of the runner to done..
*/
def markAsDone() {
currentStatus = RunnerStatus.DONE
}
/**
* Marks this edge as skipped as it is not needed for the current run.
*/
def markAsSkipped() {
currentStatus = RunnerStatus.SKIPPED
}
/**
* Resets the edge to pending status.
*/
def resetToPending(cleanOutputs: Boolean) {
if (resetFromStatus == null)
resetFromStatus = currentStatus
currentStatus = RunnerStatus.PENDING
if (cleanOutputs)
function.deleteOutputs()
runner = null
}
override def shortDescription = function.shortDescription
/**
* Returns the path to the file to use for logging errors.
* @return the path to the file to use for logging errors.
*/
private def functionErrorFile = if (function.jobErrorFile != null) function.jobErrorFile else function.jobOutputFile
/**
* Outputs the last lines of the error logs.
*/
private def tailError() {
val errorFile = functionErrorFile
if (IOUtils.waitFor(errorFile, 120)) {
val maxLines = 100
val tailLines = IOUtils.tail(errorFile, maxLines)
val nl = "%n".format()
val summary = if (tailLines.size > maxLines) "Last %d lines".format(maxLines) else "Contents"
logger.error("%s of %s:%n%s".format(summary, errorFile, StringUtils.join(tailLines, nl)))
} else {
logger.error("Unable to access log file: %s".format(errorFile))
}
}
/**
* Writes the stack trace to the error file.
*/
private def writeStackTrace(e: Throwable) {
val stackTrace = new StringWriter
val printWriter = new PrintWriter(stackTrace)
printWriter.println(function.description)
e.printStackTrace(printWriter)
printWriter.close()
FileUtils.writeStringToFile(functionErrorFile, stackTrace.toString)
}
def getRunInfo = {
if ( runner == null ) myRunInfo else runner.getRunInfo
}
}
| iontorrent/Torrent-Variant-Caller-stable | public/scala/src/org/broadinstitute/sting/queue/engine/FunctionEdge.scala | Scala | mit | 6,661 |
//
// Copyright (c) 2015 IronCore Labs
//
package com.ironcorelabs.davenport.tagobjects
import org.scalatest.Tag
object RequiresCouch extends Tag("com.ironcorelabs.davenport.tags.RequiresCouch")
| BobWall23/davenport | src/test/scala/com/ironcorelabs/davenport/Tags.scala | Scala | mit | 198 |
package com.twitter.server.util
import com.twitter.finagle.stats.MetricBuilder.{CounterType, GaugeType, HistogramType}
import com.twitter.finagle.stats._
import com.twitter.finagle.stats.exp.{ExpressionSchema, ExpressionSchemaKey}
import org.scalatest.funsuite.AnyFunSuite
class MetricSchemaSourceTest extends AnyFunSuite {
private val schemaMap: Map[String, MetricBuilder] = Map(
"my/cool/counter" ->
MetricBuilder(
keyIndicator = true,
description = "Counts how many cools are seen",
units = Requests,
role = Server,
verbosity = Verbosity.Default,
sourceClass = Some("finagle.stats.cool"),
name = Seq("my", "cool", "counter"),
processPath = Some("dc/role/zone/service"),
percentiles = IndexedSeq(0.5, 0.9, 0.95, 0.99, 0.999, 0.9999),
metricType = CounterType,
statsReceiver = null
),
"your/fine/gauge" ->
MetricBuilder(
keyIndicator = false,
description = "Measures how fine the downstream system is",
units = Percentage,
role = Client,
verbosity = Verbosity.Debug,
sourceClass = Some("finagle.stats.your"),
name = Seq("your", "fine", "gauge"),
processPath = Some("dc/your_role/zone/your_service"),
percentiles = IndexedSeq(0.5, 0.9, 0.95, 0.99, 0.999, 0.9999),
metricType = GaugeType,
statsReceiver = null
),
"my/only/histo" ->
MetricBuilder(
name = Seq("my", "only", "histo"),
percentiles = IndexedSeq(0.5, 0.9, 0.95, 0.99, 0.999, 0.9999),
metricType = HistogramType,
statsReceiver = null
)
)
private val latchedPopulatedRegistry: SchemaRegistry = new SchemaRegistry {
def hasLatchedCounters: Boolean = true
def schemas(): Map[String, MetricBuilder] = schemaMap
def expressions(): Map[ExpressionSchemaKey, ExpressionSchema] = Map.empty
}
private val unlatchedPopulatedRegistry: SchemaRegistry = new SchemaRegistry {
def hasLatchedCounters: Boolean = false
def schemas(): Map[String, MetricBuilder] = schemaMap
def expressions(): Map[ExpressionSchemaKey, ExpressionSchema] = Map.empty
}
private val metricSchemaSource = new MetricSchemaSource(Seq(latchedPopulatedRegistry))
private val unlatchedMetricSchemaSource = new MetricSchemaSource(Seq(unlatchedPopulatedRegistry))
private val emptyMetricSchemaSource = new MetricSchemaSource(Seq())
test("hasLatchedCounters asserts if there is no SchemaRegistry") {
assertThrows[AssertionError](emptyMetricSchemaSource.hasLatchedCounters)
}
test("hasLatchedCounters returns the underlying SchemaRegistry's hasLatchedCounters value") {
assert(metricSchemaSource.hasLatchedCounters)
assert(!unlatchedMetricSchemaSource.hasLatchedCounters)
}
test("getSchema returns the appropriate MetricSchema when there is one") {
assert(metricSchemaSource.getSchema("my/cool/counter") == schemaMap.get("my/cool/counter"))
}
test("getSchema returns the None when absent") {
assert(metricSchemaSource.getSchema("my/dull/counter") == None)
}
test("schemaList returns the full list of MetricSchemas") {
assert(metricSchemaSource.schemaList() == schemaMap.values.toVector)
}
test("schemaList returns empty list if there is no registry") {
assert(emptyMetricSchemaSource.schemaList() == Seq())
}
test(
"contains accurately reflect the presence or absence of a Metric from the MetricSchema map") {
assert(metricSchemaSource.contains("my/cool/counter"))
assert(!metricSchemaSource.contains("my/dull/counter"))
assert(metricSchemaSource.contains("my/only/histo"))
assert(!emptyMetricSchemaSource.contains("my/cool/counter"))
assert(!emptyMetricSchemaSource.contains("my/dull/counter"))
assert(!emptyMetricSchemaSource.contains("my/only/histo"))
}
test("keySet returns Set of Metric names (key portion of the schema map)") {
assert(metricSchemaSource.keySet == schemaMap.keySet)
}
test("keySet returns empty Set if there is no registry") {
assert(emptyMetricSchemaSource.keySet == Set())
}
}
| twitter/twitter-server | server/src/test/scala/com/twitter/server/util/MetricSchemaSourceTest.scala | Scala | apache-2.0 | 4,123 |
package de.zalando.model
import de.zalando.apifirst.Application._
import de.zalando.apifirst.Domain._
import de.zalando.apifirst.ParameterPlace
import de.zalando.apifirst.naming._
import de.zalando.apifirst.Hypermedia._
import de.zalando.apifirst.Http._
import de.zalando.apifirst.Security
import java.net.URL
import Security._
//noinspection ScalaStyle
object basic_polymorphism_yaml extends WithModel {
def types = Map[Reference, Type](
Reference("⌿definitions⌿Cat") →
AllOf(Reference("⌿definitions⌿Cat⌿Cat"), TypeMeta(Some("Schemas: 2"), List()), Seq(
TypeDef(Reference("⌿definitions⌿Pet"),
Seq(
Field(Reference("⌿definitions⌿Pet⌿name"), Str(None, TypeMeta(None, List()))),
Field(Reference("⌿definitions⌿Pet⌿petType"), Str(None, TypeMeta(None, List())))
), TypeMeta(Some("Named types: 2"), List())),
TypeDef(Reference("⌿definitions⌿Cat"),
Seq(
Field(Reference("⌿definitions⌿Cat⌿huntingSkill"), EnumTrait(Str(None, TypeMeta(Some("The measured skill for hunting"), List("""enum("clueless,lazy,adventurous,aggressive")"""))), TypeMeta(Some("Enum type : 4"), List()),
Set(
EnumObject(Str(None, TypeMeta(Some("The measured skill for hunting"), List("""enum("clueless,lazy,adventurous,aggressive")"""))), "clueless", TypeMeta(Some("clueless"), List())),
EnumObject(Str(None, TypeMeta(Some("The measured skill for hunting"), List("""enum("clueless,lazy,adventurous,aggressive")"""))), "lazy", TypeMeta(Some("lazy"), List())),
EnumObject(Str(None, TypeMeta(Some("The measured skill for hunting"), List("""enum("clueless,lazy,adventurous,aggressive")"""))), "adventurous", TypeMeta(Some("adventurous"), List())),
EnumObject(Str(None, TypeMeta(Some("The measured skill for hunting"), List("""enum("clueless,lazy,adventurous,aggressive")"""))), "aggressive", TypeMeta(Some("aggressive"), List()))
)))
), TypeMeta(Some("Named types: 1"), List()))) , Some(Reference("⌿definitions⌿Pet⌿petType"))),
Reference("⌿definitions⌿Dog") →
AllOf(Reference("⌿definitions⌿Dog⌿Dog"), TypeMeta(Some("Schemas: 2"), List()), Seq(
TypeDef(Reference("⌿definitions⌿Pet"),
Seq(
Field(Reference("⌿definitions⌿Pet⌿name"), Str(None, TypeMeta(None, List()))),
Field(Reference("⌿definitions⌿Pet⌿petType"), Str(None, TypeMeta(None, List())))
), TypeMeta(Some("Named types: 2"), List())),
TypeDef(Reference("⌿definitions⌿Dog"),
Seq(
Field(Reference("⌿definitions⌿Dog⌿packSize"), Intgr(TypeMeta(Some("the size of the pack the dog is from"), List("min(0.toInt, false)"))))
), TypeMeta(Some("Named types: 1"), List()))) , Some(Reference("⌿definitions⌿Pet⌿petType"))),
Reference("⌿definitions⌿CatNDog") →
AllOf(Reference("⌿definitions⌿CatNDog⌿CatNDog"), TypeMeta(Some("Schemas: 2"), List()), Seq(
AllOf(Reference("⌿definitions⌿CatNDog⌿CatNDog"), TypeMeta(Some("Schemas: 2"), List()), Seq(
TypeDef(Reference("⌿definitions⌿Pet"),
Seq(
Field(Reference("⌿definitions⌿Pet⌿name"), Str(None, TypeMeta(None, List()))),
Field(Reference("⌿definitions⌿Pet⌿petType"), Str(None, TypeMeta(None, List())))
), TypeMeta(Some("Named types: 2"), List())),
TypeDef(Reference("⌿definitions⌿CatNDog"),
Seq(
Field(Reference("⌿definitions⌿CatNDog⌿packSize"), Intgr(TypeMeta(Some("the size of the pack the dog is from"), List("min(0.toInt, false)"))))
), TypeMeta(Some("Named types: 1"), List()))) , Some(Reference("⌿definitions⌿Pet⌿petType"))),
AllOf(Reference("⌿definitions⌿CatNDog⌿CatNDog"), TypeMeta(Some("Schemas: 2"), List()), Seq(
TypeDef(Reference("⌿definitions⌿Pet"),
Seq(
Field(Reference("⌿definitions⌿Pet⌿name"), Str(None, TypeMeta(None, List()))),
Field(Reference("⌿definitions⌿Pet⌿petType"), Str(None, TypeMeta(None, List())))
), TypeMeta(Some("Named types: 2"), List())),
TypeDef(Reference("⌿definitions⌿CatNDog"),
Seq(
Field(Reference("⌿definitions⌿CatNDog⌿huntingSkill"), EnumTrait(Str(None, TypeMeta(Some("The measured skill for hunting"), List("""enum("clueless,lazy,adventurous,aggressive")"""))), TypeMeta(Some("Enum type : 4"), List()),
Set(
EnumObject(Str(None, TypeMeta(Some("The measured skill for hunting"), List("""enum("clueless,lazy,adventurous,aggressive")"""))), "clueless", TypeMeta(Some("clueless"), List())),
EnumObject(Str(None, TypeMeta(Some("The measured skill for hunting"), List("""enum("clueless,lazy,adventurous,aggressive")"""))), "lazy", TypeMeta(Some("lazy"), List())),
EnumObject(Str(None, TypeMeta(Some("The measured skill for hunting"), List("""enum("clueless,lazy,adventurous,aggressive")"""))), "adventurous", TypeMeta(Some("adventurous"), List())),
EnumObject(Str(None, TypeMeta(Some("The measured skill for hunting"), List("""enum("clueless,lazy,adventurous,aggressive")"""))), "aggressive", TypeMeta(Some("aggressive"), List()))
)))
), TypeMeta(Some("Named types: 1"), List()))) , Some(Reference("⌿definitions⌿Pet⌿petType")))) , Some(Reference("⌿definitions⌿Pet⌿petType"))),
Reference("⌿definitions⌿Pet") →
TypeDef(Reference("⌿definitions⌿Pet"),
Seq(
Field(Reference("⌿definitions⌿Pet⌿name"), Str(None, TypeMeta(None, List()))),
Field(Reference("⌿definitions⌿Pet⌿petType"), Str(None, TypeMeta(None, List())))
), TypeMeta(Some("Named types: 2"), List())),
Reference("⌿definitions⌿Labrador") →
AllOf(Reference("⌿definitions⌿Labrador⌿Labrador"), TypeMeta(Some("Schemas: 2"), List()), Seq(
AllOf(Reference("⌿definitions⌿Labrador⌿Labrador"), TypeMeta(Some("Schemas: 2"), List()), Seq(
TypeDef(Reference("⌿definitions⌿Pet"),
Seq(
Field(Reference("⌿definitions⌿Pet⌿name"), Str(None, TypeMeta(None, List()))),
Field(Reference("⌿definitions⌿Pet⌿petType"), Str(None, TypeMeta(None, List())))
), TypeMeta(Some("Named types: 2"), List())),
TypeDef(Reference("⌿definitions⌿Labrador"),
Seq(
Field(Reference("⌿definitions⌿Labrador⌿packSize"), Intgr(TypeMeta(Some("the size of the pack the dog is from"), List("min(0.toInt, false)"))))
), TypeMeta(Some("Named types: 1"), List()))) , Some(Reference("⌿definitions⌿Pet⌿petType"))),
TypeDef(Reference("⌿definitions⌿Labrador"),
Seq(
Field(Reference("⌿definitions⌿Labrador⌿cuteness"), Intgr(TypeMeta(Some("the cuteness of the animal in percent"), List("min(0.toInt, false)"))))
), TypeMeta(Some("Named types: 1"), List()))) , Some(Reference("⌿definitions⌿Pet⌿petType")))
)
def parameters = Map[ParameterRef, Parameter](
)
def basePath: String =null
def discriminators: DiscriminatorLookupTable = Map[Reference, Reference](
Reference("⌿definitions⌿CatNDog") -> Reference("⌿definitions⌿Pet⌿petType"),
Reference("⌿definitions⌿Dog") -> Reference("⌿definitions⌿Pet⌿petType"),
Reference("⌿definitions⌿Cat") -> Reference("⌿definitions⌿Pet⌿petType"),
Reference("⌿definitions⌿Labrador") -> Reference("⌿definitions⌿Pet⌿petType"),
Reference("⌿definitions⌿Pet") -> Reference("⌿definitions⌿Pet⌿petType"))
def securityDefinitions: SecurityDefinitionsTable = Map[String, Security.Definition](
)
def stateTransitions: StateTransitionsTable = Map[State, Map[State, TransitionProperties]]()
def calls: Seq[ApiCall] = Seq()
def packageName: Option[String] = None
def model = new StrictModel(calls, types, parameters, discriminators, basePath, packageName, stateTransitions, securityDefinitions)
} | zalando/play-swagger | api-first-core/src/test/scala/model/resources.basic_polymorphism_yaml.scala | Scala | mit | 7,738 |
/**
* Copyright 2012-2013 greencheek.org (www.greencheek.org)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.greencheek.jms.yankeedo.structure.scenario
import concurrent.duration.Duration
import org.greencheek.jms.yankeedo.stats.{TimingServices, DefaultOutputStats, OutputStats}
/**
* User: dominictootell
* Date: 06/01/2013
* Time: 15:39
*
* Interface for uses to extend to provide their custom scenarios
*/
trait ScenarioContainer {
@volatile private var _outputStatsEnabled : Boolean = false;
@volatile private var _outputStatsOptions : Option[OutputStats] = None;
@volatile private var _totalDuration : Duration = Duration.Inf
@volatile private var _scenarios : Seq[Scenario] = Nil
@volatile private var _useNanoTimings : Boolean = true
@volatile private var _recordFirstMessageTiming : Boolean = false
/**
* The number of defined scenarios
* @return
*/
final def size : Int = this.scenarios.size
final def useNanoTiming(useNanoTiming : Boolean) : ScenarioContainer = {
_useNanoTimings = useNanoTiming
this
}
final def recordFirstMessageTiming(recordFirstMessageTiming : Boolean) : ScenarioContainer = {
_recordFirstMessageTiming = recordFirstMessageTiming
this
}
final def outputStatsOptions(outputStatsOption : Option[OutputStats] ) : ScenarioContainer = {
_outputStatsOptions = outputStatsOption
_outputStatsEnabled = outputStatsOption match {
case Some(_) => true
case None => false
}
this
}
/**
* output stats to the defaults (System.out)
* @return
*/
final def outputStats() : ScenarioContainer = {
_outputStatsOptions = Some(new DefaultOutputStats)
_outputStatsEnabled = true
this
}
/**
* The amount of time given for all scenarios to execute in, other wise the
* app will terminate the scenarios, and shut down.
* @param totalDuration
*/
final def runFor(totalDuration : Duration) : ScenarioContainer = {
_totalDuration = totalDuration
this
}
/**
* Passes the list of scenarios to be run
* @param scenariosToRun
*/
final def withScenarios(scenariosToRun : Seq[Scenario]) : ScenarioContainer = {
_scenarios = scenariosToRun
this
}
/**
* Adds a scenario to the start of all existing scenarios to be run. The scenarios are started
* in the order they appear in the list
* @param scenario
* @return
*/
final def addScenario(scenario : Scenario) : ScenarioContainer = {
scenario +: _scenarios
this
}
/**
* Adds a scenario to the end of the list of existing scenarios to be run. The scenarios are
* started in the order they appear in the list. This operation is slower than that of addScenario
* @see http://www.scala-lang.org/docu/files/collections-api/collections_40.html
*
* @param scenario Scenario to add to the end of the list of scenarios
*/
final def appendScenario(scenario : Scenario) : ScenarioContainer = {
_scenarios = _scenarios :+ scenario
this
}
final def totalDuration = {
_totalDuration
}
final def scenarios = {
_scenarios
}
final def outputStatsEnabled : Boolean = {
_outputStatsEnabled
}
final def outputStatsOptions : Option[OutputStats] = {
_outputStatsOptions
}
final def useNanoTiming : Boolean = {
_useNanoTimings
}
final def recordFirstMessageTiming : Boolean = {
_recordFirstMessageTiming
}
final def createTimingServices : TimingServices = {
new TimingServices(useNanoTiming,recordFirstMessageTiming)
}
}
object ScenarioContainer {
def apply(scenarios : Scenario*) = {
val scenarioContainer = new Object with ScenarioContainer
scenarioContainer.withScenarios(scenarios)
}
}
| tootedom/yankeedo | yankeedo-core/src/main/scala/org/greencheek/jms/yankeedo/structure/scenario/ScenarioContainer.scala | Scala | apache-2.0 | 4,256 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.feature
import org.apache.spark.ml.linalg.{Vector, Vectors}
import org.apache.spark.ml.param.ParamsSuite
import org.apache.spark.ml.util.{DefaultReadWriteTest, MLTest, MLTestingUtils}
import org.apache.spark.ml.util.TestingUtils._
import org.apache.spark.sql.Row
class RobustScalerSuite extends MLTest with DefaultReadWriteTest {
import testImplicits._
@transient var data: Array[Vector] = _
@transient var resWithScaling: Array[Vector] = _
@transient var resWithCentering: Array[Vector] = _
@transient var resWithBoth: Array[Vector] = _
@transient var dataWithNaN: Array[Vector] = _
@transient var resWithNaN: Array[Vector] = _
@transient var highDimData: Array[Vector] = _
@transient var highDimRes: Array[Vector] = _
override def beforeAll(): Unit = {
super.beforeAll()
// median = [2.0, -2.0]
// 1st quartile = [1.0, -3.0]
// 3st quartile = [3.0, -1.0]
// quantile range = IQR = [2.0, 2.0]
data = Array(
Vectors.dense(0.0, 0.0),
Vectors.dense(1.0, -1.0),
Vectors.dense(2.0, -2.0),
Vectors.dense(3.0, -3.0),
Vectors.dense(4.0, -4.0)
)
/*
Using the following Python code to load the data and train the model using
scikit-learn package.
from sklearn.preprocessing import RobustScaler
import numpy as np
X = np.array([[0, 0], [1, -1], [2, -2], [3, -3], [4, -4]], dtype=np.float)
scaler = RobustScaler(with_centering=True, with_scaling=False).fit(X)
>>> scaler.center_
array([ 2., -2.])
>>> scaler.scale_
array([2., 2.])
>>> scaler.transform(X)
array([[-2., 2.],
[-1., 1.],
[ 0., 0.],
[ 1., -1.],
[ 2., -2.]])
*/
resWithCentering = Array(
Vectors.dense(-2.0, 2.0),
Vectors.dense(-1.0, 1.0),
Vectors.dense(0.0, 0.0),
Vectors.dense(1.0, -1.0),
Vectors.dense(2.0, -2.0)
)
/*
Python code:
scaler = RobustScaler(with_centering=False, with_scaling=True).fit(X)
>>> scaler.transform(X)
array([[ 0. , 0. ],
[ 0.5, -0.5],
[ 1. , -1. ],
[ 1.5, -1.5],
[ 2. , -2. ]])
*/
resWithScaling = Array(
Vectors.dense(0.0, 0.0),
Vectors.dense(0.5, -0.5),
Vectors.dense(1.0, -1.0),
Vectors.dense(1.5, -1.5),
Vectors.dense(2.0, -2.0)
)
/*
Python code:
scaler = RobustScaler(with_centering=True, with_scaling=True).fit(X)
>>> scaler.transform(X)
array([[-1. , 1. ],
[-0.5, 0.5],
[ 0. , 0. ],
[ 0.5, -0.5],
[ 1. , -1. ]])
*/
resWithBoth = Array(
Vectors.dense(-1.0, 1.0),
Vectors.dense(-0.5, 0.5),
Vectors.dense(0.0, 0.0),
Vectors.dense(0.5, -0.5),
Vectors.dense(1.0, -1.0)
)
dataWithNaN = Array(
Vectors.dense(0.0, Double.NaN),
Vectors.dense(Double.NaN, 0.0),
Vectors.dense(1.0, -1.0),
Vectors.dense(2.0, -2.0),
Vectors.dense(3.0, -3.0),
Vectors.dense(4.0, -4.0)
)
resWithNaN = Array(
Vectors.dense(0.0, Double.NaN),
Vectors.dense(Double.NaN, 0.0),
Vectors.dense(0.5, -0.5),
Vectors.dense(1.0, -1.0),
Vectors.dense(1.5, -1.5),
Vectors.dense(2.0, -2.0)
)
// median = [2.0, ...]
// 1st quartile = [1.0, ...]
// 3st quartile = [3.0, ...]
// quantile range = IQR = [2.0, ...]
highDimData = Array(
Vectors.dense(Array.fill(2000)(0.0)),
Vectors.dense(Array.fill(2000)(1.0)),
Vectors.dense(Array.fill(2000)(2.0)),
Vectors.dense(Array.fill(2000)(3.0)),
Vectors.dense(Array.fill(2000)(4.0))
)
highDimRes = Array(
Vectors.dense(Array.fill(2000)(0.0)),
Vectors.dense(Array.fill(2000)(0.5)),
Vectors.dense(Array.fill(2000)(1.0)),
Vectors.dense(Array.fill(2000)(1.5)),
Vectors.dense(Array.fill(2000)(2.0))
)
}
private def assertResult: Row => Unit = {
case Row(vector1: Vector, vector2: Vector) =>
assert(vector1 ~== vector2 absTol 1E-5,
"The vector value is not correct after transformation.")
}
test("params") {
ParamsSuite.checkParams(new RobustScaler)
ParamsSuite.checkParams(new RobustScalerModel("empty",
Vectors.dense(1.0), Vectors.dense(2.0)))
}
test("Scaling with default parameter") {
val df0 = data.zip(resWithScaling).toSeq.toDF("features", "expected")
val robustScalerEst0 = new RobustScaler()
.setInputCol("features")
.setOutputCol("scaled_features")
val robustScaler0 = robustScalerEst0.fit(df0)
MLTestingUtils.checkCopyAndUids(robustScalerEst0, robustScaler0)
testTransformer[(Vector, Vector)](df0, robustScaler0, "scaled_features", "expected")(
assertResult)
}
test("Scaling with setter") {
val df1 = data.zip(resWithBoth).toSeq.toDF("features", "expected")
val df2 = data.zip(resWithCentering).toSeq.toDF("features", "expected")
val df3 = data.zip(data).toSeq.toDF("features", "expected")
val robustScaler1 = new RobustScaler()
.setInputCol("features")
.setOutputCol("scaled_features")
.setWithCentering(true)
.setWithScaling(true)
.fit(df1)
val robustScaler2 = new RobustScaler()
.setInputCol("features")
.setOutputCol("scaled_features")
.setWithCentering(true)
.setWithScaling(false)
.fit(df2)
val robustScaler3 = new RobustScaler()
.setInputCol("features")
.setOutputCol("scaled_features")
.setWithCentering(false)
.setWithScaling(false)
.fit(df3)
testTransformer[(Vector, Vector)](df1, robustScaler1, "scaled_features", "expected")(
assertResult)
testTransformer[(Vector, Vector)](df2, robustScaler2, "scaled_features", "expected")(
assertResult)
testTransformer[(Vector, Vector)](df3, robustScaler3, "scaled_features", "expected")(
assertResult)
}
test("sparse data and withCentering") {
val someSparseData = data.zipWithIndex.map {
case (vec, i) => if (i % 2 == 0) vec.toSparse else vec
}
val df = someSparseData.zip(resWithCentering).toSeq.toDF("features", "expected")
val robustScaler = new RobustScaler()
.setInputCol("features")
.setOutputCol("scaled_features")
.setWithCentering(true)
.setWithScaling(false)
.fit(df)
testTransformer[(Vector, Vector)](df, robustScaler, "scaled_features", "expected")(
assertResult)
}
test("deal with NaN values") {
val df0 = dataWithNaN.zip(resWithNaN).toSeq.toDF("features", "expected")
val robustScalerEst0 = new RobustScaler()
.setInputCol("features")
.setOutputCol("scaled_features")
val robustScaler0 = robustScalerEst0.fit(df0)
MLTestingUtils.checkCopyAndUids(robustScalerEst0, robustScaler0)
testTransformer[(Vector, Vector)](df0, robustScaler0, "scaled_features", "expected")(
assertResult)
}
test("deal with high-dim dataset") {
val df0 = highDimData.zip(highDimRes).toSeq.toDF("features", "expected")
val robustScalerEst0 = new RobustScaler()
.setInputCol("features")
.setOutputCol("scaled_features")
val robustScaler0 = robustScalerEst0.fit(df0)
MLTestingUtils.checkCopyAndUids(robustScalerEst0, robustScaler0)
testTransformer[(Vector, Vector)](df0, robustScaler0, "scaled_features", "expected")(
assertResult)
}
test("RobustScaler read/write") {
val t = new RobustScaler()
.setInputCol("myInputCol")
.setOutputCol("myOutputCol")
.setWithCentering(false)
.setWithScaling(true)
testDefaultReadWrite(t)
}
test("RobustScalerModel read/write") {
val instance = new RobustScalerModel("myRobustScalerModel",
Vectors.dense(1.0, 2.0), Vectors.dense(3.0, 4.0))
val newInstance = testDefaultReadWrite(instance)
assert(newInstance.range === instance.range)
assert(newInstance.median === instance.median)
}
}
| shaneknapp/spark | mllib/src/test/scala/org/apache/spark/ml/feature/RobustScalerSuite.scala | Scala | apache-2.0 | 8,822 |
import _root_.io.gatling.core.scenario.Simulation
import ch.qos.logback.classic.{Level, LoggerContext}
import io.gatling.core.Predef._
import io.gatling.http.Predef._
import org.slf4j.LoggerFactory
import scala.concurrent.duration._
/**
* Performance test for the News entity.
*/
class NewsGatlingTest extends Simulation {
val context: LoggerContext = LoggerFactory.getILoggerFactory.asInstanceOf[LoggerContext]
// Log all HTTP requests
//context.getLogger("io.gatling.http").setLevel(Level.valueOf("TRACE"))
// Log failed HTTP requests
//context.getLogger("io.gatling.http").setLevel(Level.valueOf("DEBUG"))
val baseURL = Option(System.getProperty("baseURL")) getOrElse """http://127.0.0.1:8080"""
val httpConf = http
.baseURL(baseURL)
.inferHtmlResources()
.acceptHeader("*/*")
.acceptEncodingHeader("gzip, deflate")
.acceptLanguageHeader("fr,fr-fr;q=0.8,en-us;q=0.5,en;q=0.3")
.connection("keep-alive")
.userAgentHeader("Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:33.0) Gecko/20100101 Firefox/33.0")
val headers_http = Map(
"Accept" -> """application/json"""
)
val headers_http_authenticated = Map(
"Accept" -> """application/json""",
"X-CSRF-TOKEN" -> "${csrf_token}"
)
val scn = scenario("Test the News entity")
.exec(http("First unauthenticated request")
.get("/api/account")
.headers(headers_http)
.check(status.is(401))
.check(headerRegex("Set-Cookie", "CSRF-TOKEN=(.*); [P,p]ath=/").saveAs("csrf_token")))
.pause(10)
.exec(http("Authentication")
.post("/api/authentication")
.headers(headers_http_authenticated)
.formParam("j_username", "admin")
.formParam("j_password", "admin")
.formParam("_spring_security_remember_me", "true")
.formParam("submit", "Login"))
.pause(1)
.exec(http("Authenticated request")
.get("/api/account")
.headers(headers_http_authenticated)
.check(status.is(200))
.check(headerRegex("Set-Cookie", "CSRF-TOKEN=(.*); [P,p]ath=/").saveAs("csrf_token")))
.pause(10)
.repeat(2) {
exec(http("Get all newss")
.get("/api/newss")
.headers(headers_http_authenticated)
.check(status.is(200)))
.pause(10 seconds, 20 seconds)
.exec(http("Create new news")
.put("/api/newss")
.headers(headers_http_authenticated)
.body(StringBody("""{"id":null, "title":"SAMPLE_TEXT", "dateCreated":"2020-01-01T00:00:00.000Z", "active":null, "coverLink":"SAMPLE_TEXT", "text":"SAMPLE_TEXT"}""")).asJSON
.check(status.is(201))
.check(headerRegex("Location", "(.*)").saveAs("new_news_url")))
.pause(10)
.repeat(5) {
exec(http("Get created news")
.get("${new_news_url}")
.headers(headers_http_authenticated))
.pause(10)
}
.exec(http("Delete created news")
.delete("${new_news_url}")
.headers(headers_http_authenticated))
.pause(10)
}
val users = scenario("Users").exec(scn)
setUp(
users.inject(rampUsers(100) over (1 minutes))
).protocols(httpConf)
}
| mkorobeynikov/jhipster-radriges | src/test/gatling/simulations/NewsGatlingTest.scala | Scala | gpl-2.0 | 3,373 |
package com.sksamuel.elastic4s.testkit
import com.sksamuel.elastic4s.TcpClient
import org.scalatest.Matchers
import org.scalatest.matchers.{MatchResult, Matcher}
trait IndexMatchers extends Matchers {
import com.sksamuel.elastic4s.ElasticDsl._
import scala.concurrent.duration._
def haveCount(expectedCount: Int)
(implicit client: TcpClient,
timeout: FiniteDuration = 10.seconds): Matcher[String] = new Matcher[String] {
def apply(left: String): MatchResult = {
val count = client.execute(search(left).size(0)).await(timeout).totalHits
MatchResult(
count == expectedCount,
s"Index $left had count $count but expected $expectedCount",
s"Index $left had document count $expectedCount"
)
}
}
def containDoc(expectedId: Any)
(implicit client: TcpClient,
timeout: FiniteDuration = 10.seconds): Matcher[String] = new Matcher[String] {
override def apply(left: String): MatchResult = {
val exists = client.execute(get(expectedId).from(left)).await(timeout).exists
MatchResult(
exists,
s"Index $left did not contain expected document $expectedId",
s"Index $left contained document $expectedId"
)
}
}
def beCreated(implicit client: TcpClient,
timeout: FiniteDuration = 10.seconds): Matcher[String] = new Matcher[String] {
override def apply(left: String): MatchResult = {
val exists = client.execute(indexExists(left)).await(timeout).isExists
MatchResult(
exists,
s"Index $left did not exist",
s"Index $left exists"
)
}
}
def beEmpty(implicit client: TcpClient,
timeout: FiniteDuration = 10.seconds): Matcher[String] = new Matcher[String] {
override def apply(left: String): MatchResult = {
val count = client.execute(search(left).size(0)).await(timeout).totalHits
MatchResult(
count == 0,
s"Index $left was not empty",
s"Index $left was empty"
)
}
}
}
| FabienPennequin/elastic4s | elastic4s-testkit/src/main/scala/com/sksamuel/elastic4s/testkit/IndexMatchers.scala | Scala | apache-2.0 | 2,070 |
package debop4s.data.common.pools
import java.util
import javax.sql.DataSource
import debop4s.data.common.DataSources
import org.slf4j.LoggerFactory
/**
* DataSource 를 생성해주는 Factory입니다.
* @author Sunghyouk Bae
*/
abstract class AbstractDataSourceFactory {
protected lazy val log = LoggerFactory.getLogger(getClass)
/**
* DataSource를 생성합니다.
* @param dataSourceClassName dataSourceClassName
* ( 기존 driverClass 가 아닙니다 : mysql용은 com.mysql.jdbc.jdbc2.optional.MysqlDataSource 입니다 )
* @param driverClass jdbc driver class
* @param url jdbc url
* @param username 사용자 명
* @param passwd 사용자 패스워드
* @return [[javax.sql.DataSource]] 인스턴스
*/
def createDataSource(dataSourceClassName: String = "",
driverClass: String = "",
url: String = "jdbc:h2:mem:test",
username: String = "",
passwd: String = "",
props: util.Map[String, String] = new util.HashMap(),
maxPoolSize: Int = DataSources.MAX_POOL_SIZE): DataSource
}
| debop/debop4s | debop4s-data-common/src/main/scala/debop4s/data/common/pools/AbstractDataSourceFactory.scala | Scala | apache-2.0 | 1,209 |
/*
* Copyright © 2017 University of Texas at Arlington
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import edu.uta.diql._
import org.apache.flink.api.scala._
object Test {
def main ( args: Array[String] ) {
val env = ExecutionEnvironment.getExecutionEnvironment
explain(true)
case class Point ( X: Double, Y: Double )
def distance ( x: Point, y: Point ): Double
= Math.sqrt(Math.pow(x.X-y.X,2)+Math.pow(x.Y-y.Y,2))
q("""let points = env.readTextFile("points.txt")
.map( _.split(",") )
.map( p => Point(p(0).toDouble,p(1).toDouble) )
in repeat centroids = Array( Point(0,0), Point(10,0), Point(0,10), Point(10,10) )
step select Point( avg/x, avg/y )
from p@Point(x,y) <- points
group by k: ( select c
from c <- centroids
order by distance(c,p) ).head
limit 10
""").foreach(println)
}
}
| fegaras/DIQL | tests/flink/kmeans.scala | Scala | apache-2.0 | 1,523 |
/***********************************************************************
* Copyright (c) 2013-2018 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.cassandra.tools.commands
import com.beust.jcommander.Parameters
import org.locationtech.geomesa.cassandra.data.CassandraDataStore
import org.locationtech.geomesa.cassandra.tools.CassandraDataStoreCommand
import org.locationtech.geomesa.cassandra.tools.CassandraDataStoreCommand.CassandraDataStoreParams
import org.locationtech.geomesa.cassandra.tools.commands.CassandraRemoveSchemaCommand.CassandraRemoveSchemaParams
import org.locationtech.geomesa.tools.data.{RemoveSchemaCommand, RemoveSchemaParams}
class CassandraRemoveSchemaCommand extends RemoveSchemaCommand[CassandraDataStore] with CassandraDataStoreCommand {
override val params = new CassandraRemoveSchemaParams
}
object CassandraRemoveSchemaCommand {
@Parameters(commandDescription = "Remove a schema and associated features from a GeoMesa catalog")
class CassandraRemoveSchemaParams extends RemoveSchemaParams with CassandraDataStoreParams
}
| jahhulbert-ccri/geomesa | geomesa-cassandra/geomesa-cassandra-tools/src/main/scala/org/locationtech/geomesa/cassandra/tools/commands/CassandraRemoveSchemaCommand.scala | Scala | apache-2.0 | 1,412 |
package org.orbeon.oxf.xml
import org.orbeon.saxon.pattern.NodeKindTest
import org.orbeon.saxon.`type`.Type
import org.orbeon.saxon.om.{Axis, AxisIterator, EmptyIterator, NodeInfo}
import scala.annotation.tailrec
/**
* This Iterator returns a node's attributes and descendant nodes and attributes.
*
* It is based on the Saxon Navigator DescendantEnumeration, simplified and rewritten in Scala.
*/
class AttributesAndElementsIterator(start: NodeInfo, includeSelf: Boolean = true)
extends Iterator[NodeInfo] {
private var current = findNext()
def next(): NodeInfo = {
val result = current
current = findNext()
result
}
def hasNext: Boolean = current ne null
private var attributes: AxisIterator = _
private var descendants: Iterator[NodeInfo] = _
private var children: AxisIterator = _
@tailrec
private def findNext(): NodeInfo = {
// Exhaust attributes if any
if (attributes ne null) {
val next = attributes.next().asInstanceOf[NodeInfo]
if (next ne null)
return next
else
attributes = null
}
// Exhaust descendants if any
if (descendants ne null) {
if (descendants.hasNext)
return descendants.next()
else
descendants = null
}
// We have exhausted attributes and descendants
if (children ne null) {
// Move to next child
val next = children.next().asInstanceOf[NodeInfo]
if (next ne null) {
attributes = next.iterateAxis(Axis.ATTRIBUTE)
if (next.hasChildNodes)
descendants = new AttributesAndElementsIterator(next, includeSelf = false)
next
} else
null
} else {
// This is the start
attributes = start.iterateAxis(Axis.ATTRIBUTE)
children =
if (start.hasChildNodes)
start.iterateAxis(Axis.CHILD, NodeKindTest.makeNodeKindTest(Type.ELEMENT))
else
EmptyIterator.getInstance
if (includeSelf)
start
else
findNext()
}
}
}
object AttributesAndElementsIterator {
def apply(start: NodeInfo, includeSelf: Boolean = true) =
new AttributesAndElementsIterator(start, includeSelf)
}
//
// The contents of this file are subject to the Mozilla Public License Version 1.0 (the "License");
// you may not use this file except in compliance with the License. You may obtain a copy of the
// License at http://www.mozilla.org/MPL/
//
// Software distributed under the License is distributed on an "AS IS" basis,
// WITHOUT WARRANTY OF ANY KIND, either express or implied.
// See the License for the specific language governing rights and limitations under the License.
//
// The Original Code is: all this file.
//
// The Initial Developer of the Original Code is Michael H. Kay.
//
// Portions created by (your name) are Copyright (C) (your legal entity). All Rights Reserved.
//
// Contributor(s): none.
//
| orbeon/orbeon-forms | src/main/scala/org/orbeon/oxf/xml/AttributesAndElementsIterator.scala | Scala | lgpl-2.1 | 2,900 |
import sbt._
import Keys._
import sbtassembly.Plugin._
import AssemblyKeys._
import sbtrelease.ReleasePlugin._
import com.typesafe.sbt.SbtScalariform._
import ohnosequences.sbt.SbtS3Resolver.S3Resolver
import ohnosequences.sbt.SbtS3Resolver.{ s3, s3resolver }
import org.scalastyle.sbt.ScalastylePlugin.{ Settings => styleSettings }
import scalariform.formatter.preferences._
import sbtbuildinfo.Plugin._
import spray.revolver.RevolverPlugin.Revolver.{settings => revolverSettings}
object MarathonBuild extends Build {
lazy val root = Project(
id = "marathon",
base = file("."),
settings = baseSettings ++
asmSettings ++
releaseSettings ++
publishSettings ++
formatSettings ++
styleSettings ++
revolverSettings ++
Seq(
libraryDependencies ++= Dependencies.root,
parallelExecution in Test := false,
fork in Test := true
)
)
.configs(IntegrationTest)
.settings(inConfig(IntegrationTest)(Defaults.testTasks): _*)
.settings(testOptions in Test := Seq(Tests.Argument("-l", "integration")))
.settings(testOptions in IntegrationTest := Seq(Tests.Argument("-n", "integration")))
lazy val testScalaStyle = taskKey[Unit]("testScalaStyle")
testScalaStyle := {
org.scalastyle.sbt.PluginKeys.scalastyle.toTask("").value
}
(test in Test) <<= (test in Test) dependsOn testScalaStyle
lazy val IntegrationTest = config("integration") extend Test
lazy val baseSettings = Defaults.defaultSettings ++ buildInfoSettings ++ Seq (
organization := "mesosphere",
scalaVersion := "2.11.2",
scalacOptions in Compile ++= Seq("-encoding", "UTF-8", "-target:jvm-1.6", "-deprecation", "-feature", "-unchecked", "-Xlog-reflective-calls", "-Xlint"),
javacOptions in Compile ++= Seq("-encoding", "UTF-8", "-source", "1.6", "-target", "1.6", "-Xlint:unchecked", "-Xlint:deprecation"),
resolvers ++= Seq(
"Mesosphere Public Repo" at "http://downloads.mesosphere.io/maven",
"Twitter Maven2 Repository" at "http://maven.twttr.com/",
"Spray Maven Repository" at "http://repo.spray.io/"
),
sourceGenerators in Compile <+= buildInfo,
fork in Test := true,
buildInfoKeys := Seq[BuildInfoKey](name, version, scalaVersion),
buildInfoPackage := "mesosphere.marathon"
)
lazy val asmSettings = assemblySettings ++ Seq(
mergeStrategy in assembly <<= (mergeStrategy in assembly) { old =>
{
case "application.conf" => MergeStrategy.concat
case "META-INF/jersey-module-version" => MergeStrategy.first
case "log4j.properties" => MergeStrategy.concat
case "org/apache/hadoop/yarn/util/package-info.class" => MergeStrategy.first
case "org/apache/hadoop/yarn/factories/package-info.class" => MergeStrategy.first
case "org/apache/hadoop/yarn/factory/providers/package-info.class" => MergeStrategy.first
case x => old(x)
}
},
excludedJars in assembly <<= (fullClasspath in assembly) map { cp =>
val exclude = Set(
"commons-beanutils-1.7.0.jar",
"stax-api-1.0.1.jar",
"commons-beanutils-core-1.8.0.jar",
"servlet-api-2.5.jar",
"jsp-api-2.1.jar"
)
cp filter { x => exclude(x.data.getName) }
}
)
lazy val publishSettings = S3Resolver.defaults ++ Seq(
publishTo := Some(s3resolver.value(
"Mesosphere Public Repo (S3)",
s3("downloads.mesosphere.com/maven")
))
)
lazy val formatSettings = scalariformSettings ++ Seq(
ScalariformKeys.preferences := FormattingPreferences()
.setPreference(IndentWithTabs, false)
.setPreference(IndentSpaces, 2)
.setPreference(AlignParameters, true)
.setPreference(DoubleIndentClassDeclaration, true)
.setPreference(MultilineScaladocCommentsStartOnFirstLine, false)
.setPreference(PlaceScaladocAsterisksBeneathSecondAsterisk, true)
.setPreference(PreserveDanglingCloseParenthesis, true)
.setPreference(CompactControlReadability, true) //MV: should be false!
.setPreference(AlignSingleLineCaseStatements, true)
.setPreference(PreserveSpaceBeforeArguments, true)
.setPreference(SpaceBeforeColon, false)
.setPreference(SpaceInsideBrackets, false)
.setPreference(SpaceInsideParentheses, false)
.setPreference(SpacesWithinPatternBinders, true)
.setPreference(FormatXml, true)
)
}
object Dependencies {
import Dependency._
val root = Seq(
// runtime
akkaActor % "compile",
akkaSlf4j % "compile",
sprayClient % "compile",
sprayHttpx % "compile",
json4s % "compile",
chaos % "compile",
mesosUtils % "compile",
jacksonCaseClass % "compile",
twitterCommons % "compile",
twitterZkClient % "compile",
jodaTime % "compile",
jodaConvert % "compile",
jerseyServlet % "compile",
jerseyMultiPart % "compile",
uuidGenerator % "compile",
jGraphT % "compile",
hadoopHdfs % "compile",
hadoopCommon % "compile",
beanUtils % "compile",
scallop % "compile",
// test
Test.scalatest % "test",
Test.mockito % "test",
Test.akkaTestKit % "test"
)
}
object Dependency {
object V {
// runtime deps versions
val Chaos = "0.6.1"
val JacksonCCM = "0.1.2"
val MesosUtils = "0.20.1-1"
val Akka = "2.3.6"
val Spray = "1.3.1"
val Json4s = "3.2.10"
val TwitterCommons = "0.0.60"
val TwitterZKClient = "0.0.60"
val Jersey = "1.18.1"
val JodaTime = "2.3"
val JodaConvert = "1.6"
val UUIDGenerator = "3.1.3"
val JGraphT = "0.9.1"
val Hadoop = "2.4.1"
val Scallop = "0.9.5"
// test deps versions
val Mockito = "1.9.5"
val ScalaTest = "2.1.7"
}
val excludeMortbayJetty = ExclusionRule(organization = "org.mortbay.jetty")
val excludeJavaxServlet = ExclusionRule(organization = "javax.servlet")
val akkaActor = "com.typesafe.akka" %% "akka-actor" % V.Akka
val akkaSlf4j = "com.typesafe.akka" %% "akka-slf4j" % V.Akka
val sprayClient = "io.spray" % "spray-client" % V.Spray
val sprayHttpx = "io.spray" % "spray-httpx" % V.Spray
val json4s = "org.json4s" %% "json4s-jackson" % V.Json4s
val chaos = "mesosphere" %% "chaos" % V.Chaos
val mesosUtils = "mesosphere" %% "mesos-utils" % V.MesosUtils
val jacksonCaseClass = "mesosphere" %% "jackson-case-class-module" % V.JacksonCCM
val jerseyServlet = "com.sun.jersey" % "jersey-servlet" % V.Jersey
val jerseyMultiPart = "com.sun.jersey.contribs" % "jersey-multipart" % V.Jersey
val jodaTime = "joda-time" % "joda-time" % V.JodaTime
val jodaConvert = "org.joda" % "joda-convert" % V.JodaConvert
val twitterCommons = "com.twitter.common.zookeeper" % "candidate" % V.TwitterCommons
val twitterZkClient = "com.twitter.common.zookeeper" % "client" % V.TwitterZKClient
val uuidGenerator = "com.fasterxml.uuid" % "java-uuid-generator" % V.UUIDGenerator
val jGraphT = "org.javabits.jgrapht" % "jgrapht-core" % V.JGraphT
val hadoopHdfs = "org.apache.hadoop" % "hadoop-hdfs" % V.Hadoop excludeAll(excludeMortbayJetty, excludeJavaxServlet)
val hadoopCommon = "org.apache.hadoop" % "hadoop-common" % V.Hadoop excludeAll(excludeMortbayJetty, excludeJavaxServlet)
val beanUtils = "commons-beanutils" % "commons-beanutils" % "1.9.2"
val scallop = "org.rogach" %% "scallop" % V.Scallop
object Test {
val scalatest = "org.scalatest" %% "scalatest" % V.ScalaTest
val mockito = "org.mockito" % "mockito-all" % V.Mockito
val akkaTestKit = "com.typesafe.akka" %% "akka-testkit" % V.Akka
}
}
| EvanKrall/marathon | project/build.scala | Scala | apache-2.0 | 7,814 |
/**
* Copyright 2012-2014 Jorge Aliss (jaliss at gmail dot com) - twitter: @jaliss
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package securesocial.core.providers.utils
import securesocial.core.SocialUser
import play.api.{ Play, Logger }
import Play.current
import play.api.libs.concurrent.Akka
import play.api.mvc.RequestHeader
import play.api.i18n.Messages
import play.twirl.api.{ Html, Txt }
import org.apache.commons.mail.{ DefaultAuthenticator, SimpleEmail, MultiPartEmail, EmailAttachment }
import java.io.File
import javax.mail.internet.InternetAddress
import service.PGP
/**
* A helper class to send email notifications
*/
object Mailer {
val fromAddress = current.configuration.getString("smtp.from").get
val WithdrawalConfirmSubject = "mails.sendWithdrawalConfirmEmail.subject"
val AlreadyRegisteredSubject = "mails.sendAlreadyRegisteredEmail.subject"
val SignUpEmailSubject = "mails.sendSignUpEmail.subject"
val WelcomeEmailSubject = "mails.welcomeEmail.subject"
val PasswordResetSubject = "mails.passwordResetEmail.subject"
val PasswordResetOkSubject = "mails.passwordResetOk.subject"
def sendWithdrawalConfirmEmail(email: String, amount: String, currency: String, destination: String, id: Long, token: String, pgp: Option[String])(implicit messages: Messages) {
val url_confirm = "%s/%s/%s".format(current.configuration.getString("url.withdrawal_confirm").getOrElse("http://localhost:9000/withdrawal_confirm"), id, token)
val url_reject = "%s/%s/%s".format(current.configuration.getString("url.withdrawal_reject").getOrElse("http://localhost:9000/withdrawal_reject"), id, token)
val txtAndHtml = (Some(views.txt.auth.mails.withdrawalConfirmEmail(email, amount, currency, destination, id, token, url_confirm, url_reject)), None)
sendEmail(Messages(WithdrawalConfirmSubject), email, txtAndHtml, pgp)
}
def sendRefillWalletEmail(email: String, currency: String, nodeId: Int, balance: BigDecimal, balanceTarget: BigDecimal)(implicit messages: Messages) {
val txtAndHtml = (Some(views.txt.auth.mails.refillWalletEmail(email, currency, nodeId, balance, balanceTarget)), None)
sendEmail(s"Refill $currency wallet $nodeId", email, txtAndHtml)
}
def sendAlreadyRegisteredEmail(email: String, pgp: Option[String])(implicit messages: Messages) {
val url = current.configuration.getString("url.passwordreset").getOrElse("http://localhost:9000/reset")
val txtAndHtml = (Some(views.txt.auth.mails.alreadyRegisteredEmail(email, url)), None)
sendEmail(Messages(AlreadyRegisteredSubject), email, txtAndHtml, pgp)
}
def sendSignUpEmail(to: String, token: String)(implicit messages: Messages) {
val url = current.configuration.getString("url.signup").getOrElse("http://localhost:9000/signup") + "/" + token
val txtAndHtml = (Some(views.txt.auth.mails.signUpEmail(token, url)), None)
sendEmail(Messages(SignUpEmailSubject), to, txtAndHtml)
}
def sendWelcomeEmail(user: SocialUser)(implicit request: RequestHeader, messages: Messages) {
val txtAndHtml = (Some(views.txt.auth.mails.welcomeEmail(user)), None)
sendEmail(Messages(WelcomeEmailSubject), user.email, txtAndHtml, user.pgp)
}
def sendPasswordResetEmail(email: String, token: String, pgp: Option[String])(implicit messages: Messages) {
val url = current.configuration.getString("url.passwordreset").getOrElse("http://localhost:9000/reset") + "/" + token
val txtAndHtml = (Some(views.txt.auth.mails.passwordResetEmail(email, url)), None)
sendEmail(Messages(PasswordResetSubject), email, txtAndHtml, pgp)
}
def sendPasswordChangedNotice(email: String, pgp: Option[String])(implicit request: RequestHeader, messages: Messages) {
val txtAndHtml = (Some(views.txt.auth.mails.passwordChangedNotice(email)), None)
sendEmail(Messages(PasswordResetOkSubject), email, txtAndHtml, pgp)
}
private def sendEmail(subject: String, recipient: String, body: (Option[Txt], Option[Html]), pgp: Option[String] = None) {
import scala.concurrent.duration._
import play.api.libs.concurrent.Execution.Implicits._
if (Logger.isDebugEnabled) {
Logger.debug("[securesocial] sending email to %s".format(recipient))
}
val strBody = pgp match {
case Some(pgp_key) => PGP.simple_encrypt(pgp_key, body._1.get.toString())
case None => body._1.get.toString()
}
Akka.system.scheduler.scheduleOnce(1.seconds) {
val smtpHost = Play.current.configuration.getString("smtp.host").getOrElse(throw new RuntimeException("smtp.host needs to be set in application.conf in order to use this plugin (or set smtp.mock to true)"))
val smtpPort = Play.current.configuration.getInt("smtp.port").getOrElse(25)
val smtpSsl = Play.current.configuration.getBoolean("smtp.ssl").getOrElse(false)
val smtpUser = Play.current.configuration.getString("smtp.user").get
val smtpPassword = Play.current.configuration.getString("smtp.password").get
val smtpLocalhost = current.configuration.getString("smtp.localhost").get
val email = new SimpleEmail()
email.setMsg(strBody)
email.setHostName(smtpLocalhost)
//TODO: move this somewhere better
System.setProperty("mail.smtp.localhost", current.configuration.getString("smtp.localhost").get)
email.setCharset("utf-8")
email.setSubject(subject)
setAddress(fromAddress) { (address, name) => email.setFrom(address, name) }
email.addTo(recipient, null)
email.setHostName(smtpHost)
email.setSmtpPort(smtpPort)
email.setSSLOnConnect(smtpSsl)
email.setAuthentication(smtpUser, smtpPassword)
try {
email.send
} catch {
case ex: Throwable => {
// important: Print the bodies of emails in logs only if dealing with fake money
if (Play.current.configuration.getBoolean("fakeexchange").get) {
Logger.debug("Failed to send email to %s, subject: %s, email body:\\n%s".format(recipient, subject, strBody))
}
throw ex
}
}
}
}
/**
* Extracts an email address from the given string and passes to the enclosed method.
* https://github.com/typesafehub/play-plugins/blob/master/mailer/src/main/scala/com/typesafe/plugin/MailerPlugin.scala
*
* @param emailAddress
* @param setter
*/
private def setAddress(emailAddress: String)(setter: (String, String) => Unit) = {
if (emailAddress != null) {
try {
val iAddress = new InternetAddress(emailAddress)
val address = iAddress.getAddress
val name = iAddress.getPersonal
setter(address, name)
} catch {
case e: Exception =>
setter(emailAddress, null)
}
}
}
// XXX: currently not used
def sendEmailWithFile(subject: String, recipient: String, body: String, attachment: EmailAttachment) {
import scala.concurrent.duration._
import play.api.libs.concurrent.Execution.Implicits._
if (Logger.isDebugEnabled) {
Logger.debug("[securesocial] sending email to %s".format(recipient))
Logger.debug("[securesocial] mail = [%s]".format(body))
}
Akka.system.scheduler.scheduleOnce(1.seconds) {
// we can't use the plugin easily with multipart emails
val email = new MultiPartEmail
email.setHostName(current.configuration.getString("smtp.host").get)
//TODO: move this somewhere better
System.setProperty("mail.smtp.localhost", current.configuration.getString("smtp.localhost").get)
email.attach(attachment)
email.setSubject(subject)
email.addTo(recipient)
email.setBoolHasAttachments(true)
email.setSmtpPort(current.configuration.getInt("smtp.port").getOrElse(25))
email.setSSLOnConnect(current.configuration.getBoolean("smtp.ssl").get)
email.setAuthentication(current.configuration.getString("smtp.user").get, current.configuration.getString("smtp.password").get)
setAddress(fromAddress) { (address, name) => email.setFrom(address, name) }
email.setMsg(body)
email.send()
new File(attachment.getPath).delete()
}
}
}
| txbits/txbits | txbits/app/securesocial/core/providers/utils/Mailer.scala | Scala | agpl-3.0 | 8,617 |
package sangria.schema
import java.text.SimpleDateFormat
import java.util.Date
import sangria.ast
import sangria.util.Pos
import sangria.util.SimpleGraphQlSupport._
import sangria.validation.ValueCoercionViolation
import scala.util.{Failure, Success, Try}
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpec
class CustomScalarSpec extends AnyWordSpec with Matchers {
"Schema" should {
"allow to define custom scalar types" in {
val dateFormat = new SimpleDateFormat("yyyy-MM-dd")
case object DateCoercionViolation extends ValueCoercionViolation("Date value expected")
def parseDate(s: String) = Try(dateFormat.parse(s)) match {
case Success(d) => Right(d)
case Failure(error) => Left(DateCoercionViolation)
}
val DateType = ScalarType[Date](
"Date",
description = Some("An example of date scalar type"),
coerceOutput = (d, _) => dateFormat.format(d),
coerceUserInput = {
case s: String => parseDate(s)
case _ => Left(DateCoercionViolation)
},
coerceInput = {
case ast.StringValue(s, _, _, _, _) => parseDate(s)
case _ => Left(DateCoercionViolation)
}
)
val DateArg = Argument("dateInput", DateType)
val QueryType = ObjectType(
"Query",
fields[Unit, Unit](
Field(
"foo",
DateType,
arguments = DateArg :: Nil,
resolve = ctx => {
val date: Date = ctx.arg(DateArg)
new Date(date.getTime + 1000 * 60 * 60 * 24 * 5)
})
)
)
val schema = Schema(QueryType)
check(
schema,
(),
"""
{
foo(dateInput: "2015-05-11")
}
""",
Map("data" -> Map("foo" -> "2015-05-16")))
checkContainsErrors(
schema,
(),
"""
{
foo(dateInput: "2015-05-test")
}
""",
null,
List(
"""Expected type 'Date!', found '"2015-05-test"'. Date value expected""" -> List(
Pos(3, 28)))
)
}
}
}
| OlegIlyenko/sangria | modules/core/src/test/scala/sangria/schema/CustomScalarSpec.scala | Scala | apache-2.0 | 2,190 |
package com.benoj.janus.resources
import akka.http.scaladsl.model.StatusCodes
import akka.http.scaladsl.server.{Directives, Route}
import akka.stream.Materializer
import scala.concurrent.ExecutionContext
object TaskResource extends Directives {
def route(projectId: String, storyId: String)(implicit mat: Materializer, ec: ExecutionContext): Route = pathPrefix("tasks") {
pathEnd {
complete(StatusCodes.NotImplemented)
} ~
pathPrefix(Segment) { taskId =>
pathEnd {
complete(StatusCodes.NotImplemented)
}
}
}
}
| benoj/janus | rest-api/src/main/scala/com/benoj/janus/resources/TaskResource.scala | Scala | mit | 569 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources
import org.scalatest.matchers.should.Matchers._
import org.apache.spark.sql.catalyst.TableIdentifier
import org.apache.spark.sql.catalyst.dsl.expressions._
import org.apache.spark.sql.catalyst.dsl.plans._
import org.apache.spark.sql.catalyst.expressions.Expression
import org.apache.spark.sql.catalyst.plans.logical.{Filter, LogicalPlan, Project}
import org.apache.spark.sql.catalyst.rules.RuleExecutor
import org.apache.spark.sql.execution.{FileSourceScanExec, SparkPlan}
import org.apache.spark.sql.execution.datasources.parquet.ParquetFileFormat
import org.apache.spark.sql.execution.datasources.v2.BatchScanExec
import org.apache.spark.sql.execution.joins.BroadcastHashJoinExec
import org.apache.spark.sql.functions.broadcast
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.test.SharedSparkSession
import org.apache.spark.sql.types.StructType
class PruneFileSourcePartitionsSuite extends PrunePartitionSuiteBase with SharedSparkSession {
override def format: String = "parquet"
object Optimize extends RuleExecutor[LogicalPlan] {
val batches = Batch("PruneFileSourcePartitions", Once, PruneFileSourcePartitions) :: Nil
}
test("PruneFileSourcePartitions should not change the output of LogicalRelation") {
withTable("test") {
spark.range(10).selectExpr("id", "id % 3 as p").write.partitionBy("p").saveAsTable("test")
val tableMeta = spark.sharedState.externalCatalog.getTable("default", "test")
val catalogFileIndex = new CatalogFileIndex(spark, tableMeta, 0)
val dataSchema = StructType(tableMeta.schema.filterNot { f =>
tableMeta.partitionColumnNames.contains(f.name)
})
val relation = HadoopFsRelation(
location = catalogFileIndex,
partitionSchema = tableMeta.partitionSchema,
dataSchema = dataSchema,
bucketSpec = None,
fileFormat = new ParquetFileFormat(),
options = Map.empty)(sparkSession = spark)
val logicalRelation = LogicalRelation(relation, tableMeta)
val query = Project(Seq(Symbol("id"), Symbol("p")),
Filter(Symbol("p") === 1, logicalRelation)).analyze
val optimized = Optimize.execute(query)
assert(optimized.missingInput.isEmpty)
}
}
test("SPARK-20986 Reset table's statistics after PruneFileSourcePartitions rule") {
withTable("tbl") {
spark.range(10).selectExpr("id", "id % 3 as p").write.partitionBy("p").saveAsTable("tbl")
sql(s"ANALYZE TABLE tbl COMPUTE STATISTICS")
val tableStats = spark.sessionState.catalog.getTableMetadata(TableIdentifier("tbl")).stats
assert(tableStats.isDefined && tableStats.get.sizeInBytes > 0, "tableStats is lost")
val df = sql("SELECT * FROM tbl WHERE p = 1")
val sizes1 = df.queryExecution.analyzed.collect {
case relation: LogicalRelation => relation.catalogTable.get.stats.get.sizeInBytes
}
assert(sizes1.size === 1, s"Size wrong for:\n ${df.queryExecution}")
assert(sizes1(0) == tableStats.get.sizeInBytes)
val relations = df.queryExecution.optimizedPlan.collect {
case relation: LogicalRelation => relation
}
assert(relations.size === 1, s"Size wrong for:\n ${df.queryExecution}")
val size2 = relations(0).stats.sizeInBytes
assert(size2 == relations(0).catalogTable.get.stats.get.sizeInBytes)
assert(size2 < tableStats.get.sizeInBytes)
}
}
test("SPARK-26576 Broadcast hint not applied to partitioned table") {
withTable("tbl") {
withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1") {
spark.range(10).selectExpr("id", "id % 3 as p").write.partitionBy("p").saveAsTable("tbl")
val df = spark.table("tbl")
val qe = df.join(broadcast(df), "p").queryExecution
qe.sparkPlan.collect { case j: BroadcastHashJoinExec => j } should have size 1
}
}
}
test("SPARK-35985 push filters for empty read schema") {
// Force datasource v2 for parquet
withSQLConf((SQLConf.USE_V1_SOURCE_LIST.key, "")) {
withTempPath { dir =>
spark.range(10).coalesce(1).selectExpr("id", "id % 3 as p")
.write.partitionBy("p").parquet(dir.getCanonicalPath)
withTempView("tmp") {
spark.read.parquet(dir.getCanonicalPath).createOrReplaceTempView("tmp");
assertPrunedPartitions("SELECT COUNT(*) FROM tmp WHERE p = 0", 1, "(tmp.p = 0)")
assertPrunedPartitions("SELECT input_file_name() FROM tmp WHERE p = 0", 1, "(tmp.p = 0)")
}
}
}
}
protected def collectPartitionFiltersFn(): PartialFunction[SparkPlan, Seq[Expression]] = {
case scan: FileSourceScanExec => scan.partitionFilters
}
override def getScanExecPartitionSize(plan: SparkPlan): Long = {
plan.collectFirst {
case p: FileSourceScanExec => p.selectedPartitions.length
case b: BatchScanExec => b.partitions.size
}.get
}
}
| shaneknapp/spark | sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/PruneFileSourcePartitionsSuite.scala | Scala | apache-2.0 | 5,745 |
package common
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.SQLContext
import play.api._
object AppGlobal extends GlobalSettings {
/**
* Consolidate Spark into a configuration object
*/
object SparkConfig {
@transient val sparkConf =new SparkConf()
.setMaster("local[2]")
.setAppName("gds")
val sparkContext = new SparkContext(sparkConf)
val sqlContext = new SQLContext(sparkContext)
}
/**
* On start load the json data from conf/data.json into in-memory Spark
*/
override def onStart(app: Application) {
val dataFrame = SparkConfig.sqlContext.read.json("conf/data.json")
dataFrame.registerTempTable("godzilla")
dataFrame.cache()
}
}
| atware/activator-spray-spark-react | app/common/AppGlobal.scala | Scala | apache-2.0 | 737 |
package org.scalameta.ast
import scala.language.experimental.macros
import scala.annotation.StaticAnnotation
import scala.annotation.meta.getter
import scala.{Seq => _}
import scala.collection.immutable.Seq
import scala.reflect.macros.blackbox.Context
import org.scalameta.adt.{Reflection => AdtReflection}
object internal {
trait Ast extends org.scalameta.adt.Internal.Adt
class root extends StaticAnnotation
class branch extends StaticAnnotation
class astClass extends StaticAnnotation
class astCompanion extends StaticAnnotation
@getter class astField extends StaticAnnotation
@getter class auxiliary extends StaticAnnotation
class registry(paths: List[String]) extends StaticAnnotation
def hierarchyCheck[T]: Unit = macro Macros.hierarchyCheck[T]
def productPrefix[T]: String = macro Macros.productPrefix[T]
def loadField[T](f: T): Unit = macro Macros.loadField
def storeField[T](f: T, v: T): Unit = macro Macros.storeField
def initField[T](f: T): T = macro Macros.initField
def initParam[T](f: T): T = macro Macros.initField
def children[T, U]: Seq[U] = macro Macros.children[T]
class Macros(val c: Context) extends AdtReflection {
lazy val u: c.universe.type = c.universe
lazy val mirror: u.Mirror = c.mirror
import c.universe._
import c.internal._
import decorators._
import definitions._
def hierarchyCheck[T](implicit T: c.WeakTypeTag[T]): c.Tree = {
val sym = T.tpe.typeSymbol.asClass
val designation = if (sym.isRoot) "root" else if (sym.isBranch) "branch" else if (sym.isLeaf) "leaf" else "unknown"
val roots = sym.baseClasses.filter(_.isRoot)
if (roots.length == 0 && sym.isLeaf) c.abort(c.enclosingPosition, s"rootless leaf is disallowed")
else if (roots.length > 1) c.abort(c.enclosingPosition, s"multiple roots for a $designation: " + (roots.map(_.fullName).init.mkString(", ")) + " and " + roots.last.fullName)
val root = roots.headOption.getOrElse(NoSymbol)
sym.baseClasses.map(_.asClass).foreach{bsym =>
val exempt =
bsym.isModuleClass ||
bsym == ObjectClass ||
bsym == AnyClass ||
bsym == symbolOf[scala.Serializable] ||
bsym == symbolOf[java.io.Serializable] ||
bsym == symbolOf[scala.Product] ||
bsym == symbolOf[scala.Equals] ||
root.info.baseClasses.contains(bsym)
if (!exempt && !bsym.isRoot && !bsym.isBranch && !bsym.isLeaf) c.abort(c.enclosingPosition, s"outsider parent of a $designation: ${bsym.fullName}")
// NOTE: sealedness is turned off because we can't have @ast hierarchy sealed anymore
// hopefully, in the future we'll find a way to restore sealedness
// if (!exempt && !bsym.isSealed && !bsym.isFinal) c.abort(c.enclosingPosition, s"unsealed parent of a $designation: ${bsym.fullName}")
}
q"()"
}
def interfaceToApi[I, A](interface: c.Tree)(implicit I: c.WeakTypeTag[I], A: c.WeakTypeTag[A]): c.Tree = {
q"$interface.asInstanceOf[$A]"
}
def productPrefix[T](implicit T: c.WeakTypeTag[T]): c.Tree = {
q"${T.tpe.typeSymbol.asLeaf.prefix}"
}
def loadField(f: c.Tree): c.Tree = {
val q"this.$finternalName" = f
val fname = TermName(finternalName.toString.stripPrefix("_"))
def lazyLoad(fn: c.Tree => c.Tree) = {
val assertionMessage = s"internal error when initializing ${c.internal.enclosingOwner.owner.name}.$fname"
q"""
if ($f == null) {
// there's not much sense in using org.scalameta.invariants.require here
// because when the assertion trips, the tree is most likely in inconsistent state
// which will either lead to useless printouts or maybe even worse errors
_root_.scala.Predef.require(this.privatePrototype != null, $assertionMessage)
$f = ${fn(q"this.privatePrototype.$fname")}
}
"""
}
def copySubtree(subtree: c.Tree) = {
val tempName = c.freshName(TermName("copy" + fname.toString.capitalize))
q"""
val $tempName = $subtree.privateCopy(prototype = $subtree, parent = this)
if (this.privatePrototype.isTypechecked != this.isTypechecked) $tempName.withTypechecked(this.isTypechecked)
else $tempName
"""
}
f.tpe.finalResultType match {
case Any(tpe) => q"()"
case Primitive(tpe) => q"()"
case Tree(tpe) => lazyLoad(pf => q"${copySubtree(pf)}")
case OptionTree(tpe) => lazyLoad(pf => q"$pf.map(el => ${copySubtree(q"el")})")
case OptionSeqTree(tpe) => lazyLoad(pf => q"$pf.map(_.map(el => ${copySubtree(q"el")}))")
case SeqTree(tpe) => lazyLoad(pf => q"$pf.map(el => ${copySubtree(q"el")})")
case SeqSeqTree(tpe) => lazyLoad(pf => q"$pf.map(_.map(el => ${copySubtree(q"el")}))")
}
}
def storeField(f: c.Tree, v: c.Tree): c.Tree = {
def copySubtree(subtree: c.Tree) = {
q"$subtree.privateCopy(prototype = $subtree, parent = node)"
}
f.tpe.finalResultType match {
case Any(tpe) => q"()"
case Primitive(tpe) => q"()"
case Tree(tpe) => q"$f = ${copySubtree(v)}"
case OptionTree(tpe) => q"$f = $v.map(el => ${copySubtree(q"el")})"
case OptionSeqTree(tpe) => q"$f = $v.map(_.map(el => ${copySubtree(q"el")}))"
case SeqTree(tpe) => q"$f = $v.map(el => ${copySubtree(q"el")})"
case SeqSeqTree(tpe) => q"$f = $v.map(_.map(el => ${copySubtree(q"el")}))"
case tpe => c.abort(c.enclosingPosition, s"unsupported field type $tpe")
}
}
def initField(f: c.Tree): c.Tree = {
f.tpe.finalResultType match {
case Any(tpe) => q"$f"
case Primitive(tpe) => q"$f"
case Tree(tpe) => q"null"
case OptionTree(tpe) => q"null"
case OptionSeqTree(tpe) => q"null"
case SeqTree(tpe) => q"null"
case SeqSeqTree(tpe) => q"null"
case tpe => c.abort(c.enclosingPosition, s"unsupported field type $tpe")
}
}
private object Any {
def unapply(tpe: Type): Option[Type] = {
if (tpe =:= AnyTpe) Some(tpe)
else None
}
}
private object Primitive {
def unapply(tpe: Type): Option[Type] = {
if (tpe =:= typeOf[String] ||
tpe =:= typeOf[scala.Symbol] ||
ScalaPrimitiveValueClasses.contains(tpe.typeSymbol)) Some(tpe)
else if (tpe.typeSymbol == OptionClass && Primitive.unapply(tpe.typeArgs.head).nonEmpty) Some(tpe)
else if (tpe.typeSymbol == ClassClass) Some(tpe)
else None
}
}
private object Tree {
def unapply(tpe: Type): Option[Type] = {
if (tpe <:< c.mirror.staticClass("scala.meta.Tree").asType.toType) Some(tpe)
else None
}
}
private object SeqTree {
def unapply(tpe: Type): Option[Type] = {
if (tpe.typeSymbol == c.mirror.staticClass("scala.collection.immutable.Seq")) {
tpe.typeArgs match {
case Tree(tpe) :: Nil => Some(tpe)
case _ => None
}
} else None
}
}
private object SeqSeqTree {
def unapply(tpe: Type): Option[Type] = {
if (tpe.typeSymbol == c.mirror.staticClass("scala.collection.immutable.Seq")) {
tpe.typeArgs match {
case SeqTree(tpe) :: Nil => Some(tpe)
case _ => None
}
} else None
}
}
private object OptionTree {
def unapply(tpe: Type): Option[Type] = {
if (tpe.typeSymbol == c.mirror.staticClass("scala.Option")) {
tpe.typeArgs match {
case Tree(tpe) :: Nil => Some(tpe)
case _ => None
}
} else None
}
}
private object OptionSeqTree {
def unapply(tpe: Type): Option[Type] = {
if (tpe.typeSymbol == c.mirror.staticClass("scala.Option")) {
tpe.typeArgs match {
case SeqTree(tpe) :: Nil => Some(tpe)
case _ => None
}
} else None
}
}
def children[T](implicit T: c.WeakTypeTag[T]): c.Tree = {
var streak = List[Tree]()
def flushStreak(acc: Tree): Tree = {
val result = if (acc.isEmpty) q"$streak" else q"$acc ++ $streak"
streak = Nil
result
}
val acc = T.tpe.typeSymbol.asLeaf.fields.foldLeft(q"": Tree)((acc, f) => f.tpe match {
case Tree(_) =>
streak :+= q"this.${f.sym}"
acc
case SeqTree(_) =>
val acc1 = flushStreak(acc)
q"$acc1 ++ this.${f.sym}"
case SeqSeqTree(_) =>
val acc1 = flushStreak(acc)
q"$acc1 ++ this.${f.sym}.flatten"
case OptionTree(_) =>
val acc1 = flushStreak(acc)
q"$acc1 ++ this.${f.sym}.toList"
case _ =>
acc
})
flushStreak(acc)
}
}
}
| beni55/scalameta | foundation/src/main/scala/org/scalameta/ast/internal.scala | Scala | bsd-3-clause | 8,875 |
package fi.onesto.sbt.mobilizer
import util._
import net.schmizz.sshj.SSHClient
/**
* Specifies a target environment for deployment.
*
* @param hosts List of hosts in the format `[USER@]HOSTNAME[:PORT]` where USER defaults to [[username]] and
* PORT defaults to [[port]] if not given.
* @param standbyHosts List of stand-by hosts: files are copied but [[restartCommand]] or [[checkCommand]] are not run.
* @param jumpServer A function which receives a hostname and returns an jump server address (in above host format)
* if such is required for the connection.
* @param port Default SSH port; deprecated in favour of `HOSTNAME:PORT` syntax.
* @param username Name of the user to log in as to the target hosts.
* @param rootDirectory Target directory to deploy to on the hosts.
* @param releasesDirectoryName Name of the directory containing releases under [[rootDirectory]].
* @param currentDirectoryName Name of the current symbolic link under [[rootDirectory]].
* @param libDirectoryName Name of the directory containing library files under [[rootDirectory]].
* @param javaBin Path to the Java executable on the target hosts.
* @param javaOpts Additional options to pass to the `java` command on startup.
* @param rsyncCommand Name or path to the `rsync` command on the target hosts.
* @param rsyncOpts Additional options to pass to `rsync` when copying files.
* @param restartCommand Shell command for restarting the application on the target hosts.
* @param checkCommand Shell command for checking whether application is up on the target hosts.
*/
final case class DeploymentEnvironment(
hosts: Seq[String] = Seq("localhost"),
standbyHosts: Seq[String] = Seq.empty,
jumpServer: String => Option[String] = _ => None,
@deprecated("use host:port syntax for hosts", "0.2.0")
port: Int = SSHClient.DEFAULT_PORT,
@deprecated("use user@host:port syntax for hosts", "0.3.0")
username: String = currentUser,
rootDirectory: String = "/tmp/deploy",
releasesDirectoryName: String = "releases",
currentDirectoryName: String = "current",
libDirectoryName: String = "lib",
javaBin: String = "java",
javaOpts: Seq[String] = Seq.empty,
rsyncCommand: String = "rsync",
rsyncOpts: Seq[String] = Seq.empty,
restartCommand: Option[String] = None,
checkCommand: Option[String] = None) {
val releasesRoot: String = s"$rootDirectory/$releasesDirectoryName"
val currentDirectory: String = s"$rootDirectory/$currentDirectoryName"
def releaseDirectory(releaseId: String): String =
s"$releasesRoot/$releaseId"
def libDirectory(releaseId: String): String =
s"${releaseDirectory(releaseId)}/$libDirectoryName"
}
| onesto/sbt-mobilizer | src/main/scala/fi/onesto/sbt/mobilizer/DeploymentEnvironment.scala | Scala | mit | 3,111 |
// Generated by the Scala Plugin for the Protocol Buffer Compiler.
// Do not edit!
//
// Protofile syntax: PROTO3
package com.thesamet.docs.json
@SerialVersionUID(0L)
final case class MyContainer(
myAny: _root_.scala.Option[com.google.protobuf.any.Any] = _root_.scala.None
) extends scalapb.GeneratedMessage with scalapb.Message[MyContainer] with scalapb.lenses.Updatable[MyContainer] {
@transient
private[this] var __serializedSizeCachedValue: _root_.scala.Int = 0
private[this] def __computeSerializedValue(): _root_.scala.Int = {
var __size = 0
if (myAny.isDefined) {
val __value = myAny.get
__size += 1 + _root_.com.google.protobuf.CodedOutputStream.computeUInt32SizeNoTag(__value.serializedSize) + __value.serializedSize
};
__size
}
final override def serializedSize: _root_.scala.Int = {
var read = __serializedSizeCachedValue
if (read == 0) {
read = __computeSerializedValue()
__serializedSizeCachedValue = read
}
read
}
def writeTo(`_output__`: _root_.com.google.protobuf.CodedOutputStream): _root_.scala.Unit = {
myAny.foreach { __v =>
val __m = __v
_output__.writeTag(1, 2)
_output__.writeUInt32NoTag(__m.serializedSize)
__m.writeTo(_output__)
};
}
def mergeFrom(`_input__`: _root_.com.google.protobuf.CodedInputStream): com.thesamet.docs.json.MyContainer = {
var __myAny = this.myAny
var _done__ = false
while (!_done__) {
val _tag__ = _input__.readTag()
_tag__ match {
case 0 => _done__ = true
case 10 =>
__myAny = Option(_root_.scalapb.LiteParser.readMessage(_input__, __myAny.getOrElse(com.google.protobuf.any.Any.defaultInstance)))
case tag => _input__.skipField(tag)
}
}
com.thesamet.docs.json.MyContainer(
myAny = __myAny
)
}
def getMyAny: com.google.protobuf.any.Any = myAny.getOrElse(com.google.protobuf.any.Any.defaultInstance)
def clearMyAny: MyContainer = copy(myAny = _root_.scala.None)
def withMyAny(__v: com.google.protobuf.any.Any): MyContainer = copy(myAny = Option(__v))
def getFieldByNumber(__fieldNumber: _root_.scala.Int): _root_.scala.Any = {
(__fieldNumber: @_root_.scala.unchecked) match {
case 1 => myAny.orNull
}
}
def getField(__field: _root_.scalapb.descriptors.FieldDescriptor): _root_.scalapb.descriptors.PValue = {
_root_.scala.Predef.require(__field.containingMessage eq companion.scalaDescriptor)
(__field.number: @_root_.scala.unchecked) match {
case 1 => myAny.map(_.toPMessage).getOrElse(_root_.scalapb.descriptors.PEmpty)
}
}
def toProtoString: _root_.scala.Predef.String = _root_.scalapb.TextFormat.printToUnicodeString(this)
def companion = com.thesamet.docs.json.MyContainer
}
object MyContainer extends scalapb.GeneratedMessageCompanion[com.thesamet.docs.json.MyContainer] {
implicit def messageCompanion: scalapb.GeneratedMessageCompanion[com.thesamet.docs.json.MyContainer] = this
def fromFieldsMap(__fieldsMap: scala.collection.immutable.Map[_root_.com.google.protobuf.Descriptors.FieldDescriptor, _root_.scala.Any]): com.thesamet.docs.json.MyContainer = {
_root_.scala.Predef.require(__fieldsMap.keys.forall(_.getContainingType() == javaDescriptor), "FieldDescriptor does not match message type.")
val __fields = javaDescriptor.getFields
com.thesamet.docs.json.MyContainer(
__fieldsMap.get(__fields.get(0)).asInstanceOf[_root_.scala.Option[com.google.protobuf.any.Any]]
)
}
implicit def messageReads: _root_.scalapb.descriptors.Reads[com.thesamet.docs.json.MyContainer] = _root_.scalapb.descriptors.Reads{
case _root_.scalapb.descriptors.PMessage(__fieldsMap) =>
_root_.scala.Predef.require(__fieldsMap.keys.forall(_.containingMessage == scalaDescriptor), "FieldDescriptor does not match message type.")
com.thesamet.docs.json.MyContainer(
__fieldsMap.get(scalaDescriptor.findFieldByNumber(1).get).flatMap(_.as[_root_.scala.Option[com.google.protobuf.any.Any]])
)
case _ => throw new RuntimeException("Expected PMessage")
}
def javaDescriptor: _root_.com.google.protobuf.Descriptors.Descriptor = JsonProto.javaDescriptor.getMessageTypes.get(1)
def scalaDescriptor: _root_.scalapb.descriptors.Descriptor = JsonProto.scalaDescriptor.messages(1)
def messageCompanionForFieldNumber(__number: _root_.scala.Int): _root_.scalapb.GeneratedMessageCompanion[_] = {
var __out: _root_.scalapb.GeneratedMessageCompanion[_] = null
(__number: @_root_.scala.unchecked) match {
case 1 => __out = com.google.protobuf.any.Any
}
__out
}
lazy val nestedMessagesCompanions: Seq[_root_.scalapb.GeneratedMessageCompanion[_ <: _root_.scalapb.GeneratedMessage]] = Seq.empty
def enumCompanionForFieldNumber(__fieldNumber: _root_.scala.Int): _root_.scalapb.GeneratedEnumCompanion[_] = throw new MatchError(__fieldNumber)
lazy val defaultInstance = com.thesamet.docs.json.MyContainer(
)
implicit class MyContainerLens[UpperPB](_l: _root_.scalapb.lenses.Lens[UpperPB, com.thesamet.docs.json.MyContainer]) extends _root_.scalapb.lenses.ObjectLens[UpperPB, com.thesamet.docs.json.MyContainer](_l) {
def myAny: _root_.scalapb.lenses.Lens[UpperPB, com.google.protobuf.any.Any] = field(_.getMyAny)((c_, f_) => c_.copy(myAny = Option(f_)))
def optionalMyAny: _root_.scalapb.lenses.Lens[UpperPB, _root_.scala.Option[com.google.protobuf.any.Any]] = field(_.myAny)((c_, f_) => c_.copy(myAny = f_))
}
final val MY_ANY_FIELD_NUMBER = 1
def of(
myAny: _root_.scala.Option[com.google.protobuf.any.Any]
): _root_.com.thesamet.docs.json.MyContainer = _root_.com.thesamet.docs.json.MyContainer(
myAny
)
}
| dotty-staging/ScalaPB | docs/src/main/scala/com/thesamet/docs/json/MyContainer.scala | Scala | apache-2.0 | 5,815 |
/*
* Copyright 2014 Databricks
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.databricks.spark.csv
import java.io.{File, IOException}
private[csv] object TestUtils {
/**
* This function deletes a file or a directory with everything that's in it. This function is
* copied from Spark with minor modifications made to it. See original source at:
* github.com/apache/spark/blob/master/core/src/main/scala/org/apache/spark/util/Utils.scala
*/
def deleteRecursively(file: File) {
def listFilesSafely(file: File): Seq[File] = {
if (file.exists()) {
val files = file.listFiles()
if (files == null) {
throw new IOException("Failed to list files for dir: " + file)
}
files
} else {
List()
}
}
if (file != null) {
try {
if (file.isDirectory) {
var savedIOException: IOException = null
for (child <- listFilesSafely(file)) {
try {
deleteRecursively(child)
} catch {
// In case of multiple exceptions, only last one will be thrown
case ioe: IOException => savedIOException = ioe
}
}
if (savedIOException != null) {
throw savedIOException
}
}
} finally {
if (!file.delete()) {
// Delete can also fail if the file simply did not exist
if (file.exists()) {
throw new IOException("Failed to delete: " + file.getAbsolutePath)
}
}
}
}
}
}
| falaki/spark-csv | src/test/scala/com/databricks/spark/csv/TestUtils.scala | Scala | apache-2.0 | 2,086 |
package dk.bayes.factorgraph.ep.calibrate.fb
import org.junit._
import org.junit.Assert._
import dk.bayes.factorgraph.ep.util.TennisFactorGraph._
import org.junit.Assert._
import dk.bayes.factorgraph.factor.GaussianFactor
import dk.bayes.math.linear._
import breeze.linalg.DenseMatrix
import breeze.linalg.DenseVector
import dk.bayes.factorgraph.factor.BivariateGaussianFactor
import dk.bayes.factorgraph.ep.GenericEP
import dk.bayes.factorgraph.ep.calibrate.fb.ForwardBackwardEPCalibrate
/**
* This is a test for a skill update with TrueSkill rating system in a two-person game, like Tennis.
*
* Bayesian inference is performed with Expectation Propagation algorithm.
*
* @author Daniel Korzekwa
*/
class TrueSkillOnlineTennisEPTest {
private def progress(currIter: Int) = println("EP iteration: " + currIter)
/**
* Tests for variable marginals.
*/
@Test(expected = classOf[NoSuchElementException]) def variable_marginal_var_id_not_found:Unit = {
val tennisFactorGraph = createTennisFactorGraphAfterPlayer1Won()
val epCalibrate = ForwardBackwardEPCalibrate(tennisFactorGraph)
assertEquals(EPSummary(1, 44), epCalibrate.calibrate(10, progress))
val ep = GenericEP(tennisFactorGraph)
val outcomeMarginal = ep.marginal(123)
}
/**http://atom.research.microsoft.com/trueskill/rankcalculator.aspx*/
@Test def variable_marginal_no_result_set:Unit = {
val tennisFactorGraph = createTennisFactorGraphAfterPlayer1Won()
val epCalibrate = ForwardBackwardEPCalibrate(tennisFactorGraph)
assertEquals(EPSummary(1, 44), epCalibrate.calibrate(10, progress))
val ep = GenericEP(tennisFactorGraph)
val outcomeMarginal = ep.marginal(outcomeVarId)
assertEquals(0.24463, outcomeMarginal.getValue((outcomeVarId, 0)), 0.00001)
assertEquals(0.75537, outcomeMarginal.getValue((outcomeVarId, 1)), 0.00001)
val skill1Marginal = ep.marginal(skill1VarId).asInstanceOf[GaussianFactor]
assertEquals(27.1742, skill1Marginal.m, 0.0001)
assertEquals(37.5013, skill1Marginal.v, 0.0001)
val skill2Marginal = ep.marginal(skill2VarId).asInstanceOf[GaussianFactor]
assertEquals(33.846, skill2Marginal.m, 0.0001)
assertEquals(20.861, skill2Marginal.v, 0.0001)
assertEquals(EPSummary(1, 44), epCalibrate.calibrate(10, progress))
val outcomeMarginal2 = ep.marginal(outcomeVarId)
assertEquals(0.24463, outcomeMarginal2.getValue((outcomeVarId, 0)), 0.00001)
assertEquals(0.75537, outcomeMarginal2.getValue((outcomeVarId, 1)), 0.00001)
val skill1MarginalLater = ep.marginal(skill1VarId).asInstanceOf[GaussianFactor]
assertEquals(27.1742, skill1MarginalLater.m, 0.0001)
assertEquals(37.5013, skill1MarginalLater.v, 0.0001)
val skill2MarginalLater = ep.marginal(skill2VarId).asInstanceOf[GaussianFactor]
assertEquals(33.846, skill2MarginalLater.m, 0.0001)
assertEquals(20.861, skill2MarginalLater.v, 0.0001)
}
/**http://atom.research.microsoft.com/trueskill/rankcalculator.aspx*/
@Test def variable_marginal_player1_wins:Unit = {
val tennisFactorGraph = createTennisFactorGraph()
val ep = GenericEP(tennisFactorGraph)
ep.setEvidence(outcomeVarId, true)
val epCalibrate = ForwardBackwardEPCalibrate(tennisFactorGraph)
assertEquals(EPSummary(2, 88), epCalibrate.calibrate(70, progress))
val outcomeMarginal = ep.marginal(outcomeVarId)
assertEquals(1, outcomeMarginal.getValue((outcomeVarId, 0)), 0.0001)
assertEquals(0, outcomeMarginal.getValue((outcomeVarId, 1)), 0.0001)
val skill1Marginal = ep.marginal(skill1VarId).asInstanceOf[GaussianFactor]
assertEquals(27.1744, skill1Marginal.m, 0.0001)
assertEquals(37.4973, skill1Marginal.v, 0.0001)
val skill2Marginal = ep.marginal(skill2VarId).asInstanceOf[GaussianFactor]
assertEquals(33.8473, skill2Marginal.m, 0.0001)
assertEquals(20.8559, skill2Marginal.v, 0.0001)
}
/**http://atom.research.microsoft.com/trueskill/rankcalculator.aspx*/
@Test def variable_marginal_player1_looses:Unit = {
val tennisFactorGraph = createTennisFactorGraphAfterPlayer1Won()
val ep = GenericEP(tennisFactorGraph)
ep.setEvidence(outcomeVarId, false)
val epCalibrate = ForwardBackwardEPCalibrate(tennisFactorGraph)
assertEquals(EPSummary(2, 88), epCalibrate.calibrate(100, progress))
val outcomeMarginal = ep.marginal(outcomeVarId)
assertEquals(0, outcomeMarginal.getValue((outcomeVarId, 0)), 0.0001)
assertEquals(1, outcomeMarginal.getValue((outcomeVarId, 1)), 0.0001)
val skill1Marginal = ep.marginal(skill1VarId).asInstanceOf[GaussianFactor]
assertEquals(25.558, skill1Marginal.m, 0.0001)
assertEquals(30.5446, skill1Marginal.v, 0.0001)
val skill2Marginal = ep.marginal(skill2VarId).asInstanceOf[GaussianFactor]
assertEquals(34.745, skill2Marginal.m, 0.0001)
assertEquals(18.7083, skill2Marginal.v, 0.0001)
}
/**
* Tests for factor marginals.
*/
@Test(expected = classOf[NoSuchElementException]) def factor_marginal_var_id_not_found:Unit = {
val tennisFactorGraph = createTennisFactorGraphAfterPlayer1Won()
val ep = GenericEP(tennisFactorGraph)
val perfMarginal = ep.marginal(skill1VarId, 345)
}
@Test(expected = classOf[NoSuchElementException]) def factor_marginal_var_id_not_found2:Unit = {
val tennisFactorGraph = createTennisFactorGraphAfterPlayer1Won()
val ep = GenericEP(tennisFactorGraph)
val perfMarginal = ep.marginal(skill1VarId, perf1VarId, 45)
}
/**http://atom.research.microsoft.com/trueskill/rankcalculator.aspx*/
@Ignore @Test def factor_marginal_no_result_set = {
val tennisFactorGraph = createTennisFactorGraphAfterPlayer1Won()
val ep = GenericEP(tennisFactorGraph)
val perfMarginal = ep.marginal(skill1VarId, perf1VarId).asInstanceOf[BivariateGaussianFactor]
assertEquals(Vector(1, 3), perfMarginal.getVariableIds())
assertEquals(DenseVector(Double.NaN, Double.NaN).toString, perfMarginal.mean.toString)
assertEquals(new DenseMatrix(2, 2, Array(Double.NaN, Double.NaN, Double.NaN, Double.NaN)).toString, perfMarginal.variance.toString)
val epCalibrate = ForwardBackwardEPCalibrate(tennisFactorGraph)
assertEquals(EPSummary(1, 44), epCalibrate.calibrate(10, progress))
val perfMarginal2 = ep.marginal(skill1VarId, perf1VarId).asInstanceOf[BivariateGaussianFactor]
assertEquals(Vector(1, 3), perfMarginal2.getVariableIds())
assertEquals(DenseVector(27.174, 27.174).toString, perfMarginal2.mean.toString)
assertEquals(new DenseMatrix(2, 2, Array(37.501, 37.501, 37.501, 54.862)).toString, perfMarginal2.variance.toString)
}
/**http://atom.research.microsoft.com/trueskill/rankcalculator.aspx*/
@Test def factor_marginal_player1_wins = {
val tennisFactorGraph = createTennisFactorGraph()
val ep = GenericEP(tennisFactorGraph)
ep.setEvidence(outcomeVarId, true)
val epCalibrate = ForwardBackwardEPCalibrate(tennisFactorGraph)
assertEquals(EPSummary(2, 88), epCalibrate.calibrate(70, progress))
val perfFactorMarginal = ep.marginal(skill1VarId, perf1VarId).asInstanceOf[BivariateGaussianFactor]
assertEquals(Vector(1, 3), perfFactorMarginal.getVariableIds())
assertTrue(isIdentical(DenseVector(27.174, 32.142), perfFactorMarginal.mean, 0.001))
assertTrue(isIdentical(new DenseMatrix(2, 2, Array(37.497, 28.173, 28.173, 34.212)), perfFactorMarginal.variance, 0.001))
val player1PerfMarginal = ep.marginal(perf1VarId).asInstanceOf[GaussianFactor]
assertEquals(32.1415, player1PerfMarginal.m, 0.0001)
assertEquals(34.2117, player1PerfMarginal.v, 0.0001)
}
/**http://atom.research.microsoft.com/trueskill/rankcalculator.aspx*/
@Test def factor_marginal_player1_looses = {
val tennisFactorGraph = createTennisFactorGraphAfterPlayer1Won()
val ep = GenericEP(tennisFactorGraph)
ep.setEvidence(outcomeVarId, false)
val epCalibrate = ForwardBackwardEPCalibrate(tennisFactorGraph)
assertEquals(EPSummary(2, 88), epCalibrate.calibrate(100, progress))
val perfFactorMarginal = ep.marginal(skill1VarId, perf1VarId).asInstanceOf[BivariateGaussianFactor]
assertEquals(Vector(1, 3), perfFactorMarginal.getVariableIds)
assertTrue(isIdentical(DenseVector(25.558, 24.810), perfFactorMarginal.mean, 0.001))
assertTrue(isIdentical(new DenseMatrix(2, 2, Array(30.545, 27.324, 27.324, 39.974)), perfFactorMarginal.variance, 0.001))
val player1PerfMarginal = ep.marginal(perf1VarId).asInstanceOf[GaussianFactor]
assertEquals(24.8097, player1PerfMarginal.m, 0.0001)
assertEquals(39.9735, player1PerfMarginal.v, 0.0001)
}
/**
* Testing inference including GenericFactor
*/
@Test def genericfactor_test_factor_marginal_player1_wins:Unit = {
val tennisFactorGraph = createTennisFactorGraphWithGenFactor()
val ep = GenericEP(tennisFactorGraph)
ep.setEvidence(outcomeVarId, true)
val epCalibrate = ForwardBackwardEPCalibrate(tennisFactorGraph)
assertEquals(EPSummary(2, 88), epCalibrate.calibrate(70, progress))
val perfFactorMarginal = ep.marginal(skill1VarId, perf1VarId).asInstanceOf[BivariateGaussianFactor]
assertEquals(Vector(1, 3), perfFactorMarginal.getVariableIds())
assertTrue(isIdentical(DenseVector(27.174, 32.142), perfFactorMarginal.mean, 0.001))
assertTrue(isIdentical(new DenseMatrix(2, 2, Array(37.497, 28.173, 28.173, 34.212)), perfFactorMarginal.variance, 0.001))
val player1PerfMarginal = ep.marginal(perf1VarId).asInstanceOf[GaussianFactor]
assertEquals(32.1415, player1PerfMarginal.m, 0.0001)
assertEquals(34.2117, player1PerfMarginal.v, 0.0001)
}
} | danielkorzekwa/bayes-scala | src/test/scala/dk/bayes/factorgraph/ep/calibrate/fb/TrueSkillOnlineTennisEPTest.scala | Scala | bsd-2-clause | 9,613 |
package com.orendainx.trucking.storm.topologies
import com.hortonworks.registries.schemaregistry.client.SchemaRegistryClient
import com.orendainx.trucking.storm.bolts._
import com.orendainx.trucking.storm.nifi.ByteArrayToNiFiPacket
import com.typesafe.config.{ConfigFactory, Config => TypeConfig}
import com.typesafe.scalalogging.Logger
import org.apache.nifi.remote.client.SiteToSiteClient
import org.apache.nifi.storm.{NiFiBolt, NiFiSpout}
import org.apache.storm.generated.StormTopology
import org.apache.storm.topology.TopologyBuilder
import org.apache.storm.topology.base.BaseWindowedBolt
import org.apache.storm.{Config, StormSubmitter}
import scala.concurrent.duration._
/**
* Companion object to [[NiFiToNiFi]] class.
* Provides an entry point for passing in a custom configuration.
*
* @author Edgar Orendain <edgar@orendainx.com>
*/
object NiFiToNiFiWithSchema {
def main(args: Array[String]): Unit = {
// Build and submit the Storm config and topology
val (stormConfig, topology) = buildDefaultStormConfigAndTopology()
StormSubmitter.submitTopologyWithProgressBar("NiFiToNiFiWithSchema", stormConfig, topology)
}
/**
* Build a Storm Config and Topology with the default configuration.
*
* @return A 2-tuple ([[Config]], [[StormTopology]])
*/
def buildDefaultStormConfigAndTopology(): (Config, StormTopology) = {
val config = ConfigFactory.load()
// Set up configuration for the Storm Topology
val stormConfig = new Config()
stormConfig.setDebug(config.getBoolean(Config.TOPOLOGY_DEBUG))
stormConfig.setMessageTimeoutSecs(config.getInt(Config.TOPOLOGY_MESSAGE_TIMEOUT_SECS))
stormConfig.setNumWorkers(config.getInt(Config.TOPOLOGY_WORKERS))
stormConfig.put(SchemaRegistryClient.Configuration.SCHEMA_REGISTRY_URL.name(), config.getString("schema-registry.url"))
(stormConfig, new NiFiToNiFiWithSchema(config).buildTopology())
}
}
/**
* Create a topology with the following components.
*
* Spouts:
* - NiFiSpout (for injesting EnrichedTruckData from NiFi)
* - NiFiSpout (for injesting TrafficData from NiFi)
* Bolt:
* - NiFiPacketWithSchemaToObject (for converting from NiFi packet with schema to JVM object)
* - TruckAndTrafficJoinBolt (for joining EnrichedTruckData and TrafficData streams into EnrichedTruckAndTrafficData)
* - DataWindowingBolt (for generating driver stats from trucking data)
* - ObjectToBytesWithSchema (for serializing JVM object into array of bytes with schema)
* - NiFiBolt (for sending data back out to NiFi)
*
* @author Edgar Orendain <edgar@orendainx.com>
*/
class NiFiToNiFiWithSchema(config: TypeConfig) {
private lazy val logger = Logger(classOf[NiFiToNiFi])
private lazy val NiFiUrl: String = config.getString("nifi.url")
/**
*
* @return a built StormTopology
*/
def buildTopology(): StormTopology = {
// Builder to perform the construction of the topology.
implicit val builder = new TopologyBuilder()
// Default number of tasks (instances) of components to spawn
val defaultTaskCount = config.getInt(Config.TOPOLOGY_TASKS)
/*
* Build Nifi spouts to ingest truck and traffic data separately
*/
val batchDuration = config.getLong(Config.TOPOLOGY_BOLTS_WINDOW_LENGTH_DURATION_MS)
val truckNifiPort = config.getString("nifi.truck-data.port-name")
val trafficNifiPort = config.getString("nifi.traffic-data.port-name")
// This assumes that the data is text data, as it will map the byte array received from NiFi to a UTF-8 Encoded string.
// Attempt to sync up with the join bolt, keeping back pressure in NiFi
val truckSpoutConfig = new SiteToSiteClient.Builder().url(NiFiUrl).portName(truckNifiPort)
.requestBatchDuration(batchDuration, MILLISECONDS).buildConfig()
val trafficSpoutConfig = new SiteToSiteClient.Builder().url(NiFiUrl).portName(trafficNifiPort)
.requestBatchDuration(batchDuration, MILLISECONDS).buildConfig()
// Create a spout with the specified configuration, and place it in the, now empty, topology blueprint
builder.setSpout("enrichedTruckData", new NiFiSpout(truckSpoutConfig), defaultTaskCount)
builder.setSpout("trafficData", new NiFiSpout(trafficSpoutConfig), defaultTaskCount)
//builder.setBolt("serializedData", new NiFiPacketToSerialized(), defaultTaskCount).shuffleGrouping("enrichedTruckData").shuffleGrouping("trafficData")
builder.setBolt("byteData", new NiFiPacketToBytes(), defaultTaskCount).shuffleGrouping("enrichedTruckData").shuffleGrouping("trafficData")
//builder.setBolt("unpackagedData", new SerializedWithSchemaToObject(), defaultTaskCount).shuffleGrouping("serializedData")
builder.setBolt("unpackagedData", new BytesWithSchemaToObject(), defaultTaskCount).shuffleGrouping("byteData")
/*
* Build bolt to merge windowed data streams, and then generate sliding windowed driving stats
*/
val windowDuration = config.getInt(Config.TOPOLOGY_BOLTS_WINDOW_LENGTH_DURATION_MS)
// Create a bolt with a tumbling window and place the bolt in the topology blueprint, connected to the "enrichedTruckData"
// and "trafficData" streams. globalGrouping suggests that data from both streams be sent to *each* instance of this bolt
// (in case there are more than one in the cluster)
val joinBolt = new TruckAndTrafficJoinBolt().withTumblingWindow(new BaseWindowedBolt.Duration(windowDuration, MILLISECONDS))
builder.setBolt("joinedData", joinBolt, defaultTaskCount).globalGrouping("unpackagedData")
/*
* Build bolt to generate driver stats from data collected in a window.
* Creates a tuple count based window bolt that slides with every incoming tuple.
*/
val intervalCount = config.getInt(Config.TOPOLOGY_BOLTS_SLIDING_INTERVAL_COUNT)
// Build bold and then place in the topology blueprint connected to the "joinedData" stream. ShuffleGrouping suggests
// that tuples from that stream are distributed across this bolt's tasks (instances), so as to keep load levels even.
val statsBolt = new DataWindowingBolt().withWindow(new BaseWindowedBolt.Count(intervalCount))
builder.setBolt("windowedDriverStats", statsBolt, defaultTaskCount).shuffleGrouping("joinedData")
/*
* Serialize data before pushing out to anywhere.
*/
builder.setBolt("serializedJoinedData", new ObjectToBytesWithSchema()).shuffleGrouping("joinedData")
builder.setBolt("serializedDriverStats", new ObjectToBytesWithSchema()).shuffleGrouping("windowedDriverStats")
/*
* Build bolts to push data back out to NiFi
*/
val joinedNifiPort = config.getString("nifi.truck-and-traffic-data.port-name")
val joinedNififrequency = config.getInt("nifi.truck-and-traffic-data.tick-frequency")
val joinNifiBatchSize = config.getInt("nifi.truck-and-traffic-data.batch-size")
// Construct a clientConfig and a NiFi bolt
val joinedBoltConfig = new SiteToSiteClient.Builder().url(NiFiUrl).portName(joinedNifiPort).buildConfig()
val joinedNifiBolt = new NiFiBolt(joinedBoltConfig, new ByteArrayToNiFiPacket(), joinedNififrequency).withBatchSize(joinNifiBatchSize)
builder.setBolt("joinedDataToNiFi", joinedNifiBolt, defaultTaskCount).shuffleGrouping("serializedJoinedData")
val statsNifiPort = config.getString("nifi.driver-stats.port-name")
val statsNifiFrequency = config.getInt("nifi.driver-stats.tick-frequency")
val statsNifiBatchSize = config.getInt("nifi.driver-stats.batch-size")
// Construct a clientConfig and a NiFi bolt
val statsBoltConfig = new SiteToSiteClient.Builder().url(NiFiUrl).portName(statsNifiPort).buildConfig()
val statsNifiBolt = new NiFiBolt(statsBoltConfig, new ByteArrayToNiFiPacket(), statsNifiFrequency).withBatchSize(statsNifiBatchSize)
builder.setBolt("driverStatsToNifi", statsNifiBolt, defaultTaskCount).shuffleGrouping("serializedDriverStats")
logger.info("Storm topology finished building.")
// Finally, create the topology
builder.createTopology()
}
}
| orendain/trucking-iot | storm-topology/src/main/scala/com/orendainx/trucking/storm/topologies/NiFiToNiFiWithSchema.scala | Scala | apache-2.0 | 8,086 |
package cpup.mc.personalTech
import cpup.mc.lib.CPupCommonProxy
import cpw.mods.fml.common.FMLCommonHandler
import net.minecraftforge.common.MinecraftForge
class CommonProxy extends CPupCommonProxy[PersonalTech.type] {
def mod = PersonalTech
def registerEvents {
val airCommonEvents = new air.CommonEvents
FMLCommonHandler.instance().bus().register(airCommonEvents)
MinecraftForge.EVENT_BUS.register(airCommonEvents)
}
} | CoderPuppy/personaltech-mc | src/main/scala/cpup/mc/personalTech/CommonProxy.scala | Scala | mit | 432 |
/*
* Copyright 2012 Twitter Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.twitter.zipkin.collector.filter
import com.twitter.finagle.{Service, Filter}
import com.twitter.util.Future
import com.twitter.zipkin.common.Span
/**
* Filter that determines whether to index a span.
* Spans with the Finagle default service name "client" should not be indexed
* since they are unhelpful. Instead, rely on indexed server-side span names.
*/
class ClientIndexFilter extends Filter[Span, Unit, Span, Unit] {
def apply(req: Span, service: Service[Span, Unit]): Future[Unit] = {
if (shouldIndex(req)) {
service(req)
} else {
Future.Unit
}
}
private[filter] def shouldIndex(span: Span): Boolean = {
!(span.isClientSide() && span.serviceNames.contains("client"))
}
}
| pteichman/zipkin | zipkin-collector-core/src/main/scala/com/twitter/zipkin/collector/filter/ClientIndexFilter.scala | Scala | apache-2.0 | 1,337 |
/*
Copyright 2012 Twitter, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.twitter.algebird
import scala.annotation.tailrec
import scala.annotation.implicitNotFound
import java.lang.{ Integer => JInt, Short => JShort, Long => JLong, Float => JFloat, Double => JDouble, Boolean => JBool }
import java.util.{ List => JList, Map => JMap }
/**
* Field: Ring + division. It is a generalization of Ring and adds support for inversion and
* multiplicative identity.
*/
@implicitNotFound(msg = "Cannot find Field type class for ${T}")
trait Field[@specialized(Int, Long, Float, Double) T] extends Ring[T] {
// default implementation uses div YOU MUST OVERRIDE ONE OF THESE
def inverse(v: T): T = {
assertNotZero(v)
div(one, v)
}
// default implementation uses inverse:
def div(l: T, r: T): T = {
assertNotZero(r)
times(l, inverse(r))
}
}
// For Java interop so they get the default methods
abstract class AbstractField[T] extends Field[T]
object FloatField extends Field[Float] {
override def one = 1.0f
override def zero = 0.0f
override def negate(v: Float) = -v
override def plus(l: Float, r: Float) = l + r
override def minus(l: Float, r: Float) = l - r
override def times(l: Float, r: Float) = l * r
override def div(l: Float, r: Float) = {
assertNotZero(r)
l / r
}
}
object DoubleField extends Field[Double] {
override def one = 1.0
override def zero = 0.0
override def negate(v: Double) = -v
override def plus(l: Double, r: Double) = l + r
override def minus(l: Double, r: Double) = l - r
override def times(l: Double, r: Double) = l * r
override def div(l: Double, r: Double) = {
assertNotZero(r)
l / r
}
}
object BooleanField extends Field[Boolean] {
override def one = true
override def zero = false
override def negate(v: Boolean) = v
override def plus(l: Boolean, r: Boolean) = l ^ r
override def minus(l: Boolean, r: Boolean) = l ^ r
override def times(l: Boolean, r: Boolean) = l && r
override def inverse(l: Boolean) = {
assertNotZero(l)
true
}
override def div(l: Boolean, r: Boolean) = {
assertNotZero(r)
l
}
}
object Field {
// This pattern is really useful for typeclasses
def div[T](l: T, r: T)(implicit fld: Field[T]) = fld.div(l, r)
implicit val boolField: Field[Boolean] = BooleanField
implicit val jboolField: Field[JBool] = JBoolField
implicit val floatField: Field[Float] = FloatField
implicit val jfloatField: Field[JFloat] = JFloatField
implicit val doubleField: Field[Double] = DoubleField
implicit val jdoubleField: Field[JDouble] = JDoubleField
}
| sid-kap/algebird | algebird-core/src/main/scala/com/twitter/algebird/Field.scala | Scala | apache-2.0 | 3,105 |
/*
* Copyright 2008-present MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.mongodb.scala.gridfs
import java.nio.ByteBuffer
import com.mongodb.async.SingleResultCallback
import com.mongodb.async.client.gridfs.{GridFSDownloadStream => JGridFSDownloadStream}
import org.mongodb.scala.internal.ObservableHelper.{observe, observeCompleted, observeInt, observeLong}
import org.mongodb.scala.{Completed, Observable}
/**
* A GridFS InputStream for downloading data from GridFS
*
* Provides the `GridFSFile` for the file to being downloaded as well as the `read` methods of a `AsyncInputStream`
*
* @since 1.2
*/
case class GridFSDownloadStream(private val wrapped: JGridFSDownloadStream) extends AsyncInputStream {
/**
* Gets the corresponding GridFSFile for the file being downloaded
*
* @return a Observable with a single element containing the corresponding GridFSFile for the file being downloaded
*/
def gridFSFile(): Observable[GridFSFile] = observe(wrapped.getGridFSFile(_: SingleResultCallback[GridFSFile]))
/**
* Sets the number of chunks to return per batch.
*
* Can be used to control the memory consumption of this InputStream. The smaller the batchSize the lower the memory consumption
* and higher latency.
*
* @param batchSize the batch size
* @return this
* @see [[http://http://docs.mongodb.org/manual/reference/method/cursor.batchSize/#cursor.batchSize Batch Size]]
*/
def batchSize(batchSize: Int): GridFSDownloadStream = {
wrapped.batchSize(batchSize)
this
}
/**
* Reads a sequence of bytes from this stream into the given buffer.
*
* @param dst the destination buffer
* @return an Observable with a single element indicating total number of bytes read into the buffer, or
* `-1` if there is no more data because the end of the stream has been reached.
*/
override def read(dst: ByteBuffer): Observable[Int] = observeInt(wrapped.read(dst, _: SingleResultCallback[java.lang.Integer]))
/**
* Skips over and discards n bytes of data from this input stream.
*
* @param bytesToSkip the number of bytes to skip
* @return an Observable with a single element indicating the total number of bytes skipped
* @since 2.6
*/
override def skip(bytesToSkip: Long): Observable[Long] = observeLong(wrapped.skip(bytesToSkip, _: SingleResultCallback[java.lang.Long]))
/**
* Closes the input stream
*
* @return a Observable with a single element indicating when the operation has completed
*/
override def close(): Observable[Completed] = observeCompleted(wrapped.close(_: SingleResultCallback[Void]))
}
| rozza/mongo-scala-driver | driver/src/main/scala/org/mongodb/scala/gridfs/GridFSDownloadStream.scala | Scala | apache-2.0 | 3,176 |
/*
Copyright 2010 the original author or authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package net.gumbix.hl7dsl.helper
import org.hl7.types.impl._
import org.hl7.types._
import org.hl7.types.enums.AddressPartType._
import scala.collection._
import java.util.ArrayList
import java.util.List
/**
* Class to build an address
* @author Ahmet Gül (guel.ahmet@hotmail.de)
* @author Markus Gumbel
*/
class Address(addr: BAG[AD], change: Address => Unit) {
def this() = {
this (new BAGjuCollectionAdapter[AD](new java.util.ArrayList()),
{a: Address =>})
}
private val wrapper: AddressWrapper = {
var wrapper = new AddressWrapper(ADimpl.valueOf(new ArrayList[ADXP]()))
if (addr != null) {
val it = addr.iterator
while (it.hasNext) {
val ad: AD = it.next
wrapper = new AddressWrapper(ad)
}
}
wrapper
}
// TODO remove bag
def toRSBag = {
val rsList = new ArrayList[AD]()
rsList.add(wrapper.currentRimValue)
BAGjuListAdapter.valueOf(rsList)
}
def toRS = wrapper.currentRimValue
def country = wrapper.get(Country)
def country_=(v: String) {
wrapper.set(Country, v)
change(this)
}
def county = wrapper.get(CountyOrParish)
def county_=(v: String) {
wrapper.set(CountyOrParish, v)
change(this)
}
def postalCode = wrapper.get(PostalCode)
def postalCode_=(v: String) {
wrapper.set(PostalCode, v)
change(this)
}
def city = wrapper.get(Municipality)
def city_=(v: String) {
wrapper.set(Municipality, v)
change(this)
}
def houseNumber = wrapper.get(BuildingNumber)
def houseNumber_=(v: String) {
wrapper.set(BuildingNumber, v)
change(this)
}
def streetName = wrapper.get(StreetName)
def streetName_=(v: String) {
wrapper.set(StreetName, v)
change(this)
}
def streetAddressLine = wrapper.get(StreetAddressLine)
def streetAddressLine_=(v: String) {
wrapper.set(StreetAddressLine, v)
change(this)
}
def additionalLocator = wrapper.get(AdditionalLocator)
def additionalLocator_=(v: String) {
wrapper.set(AdditionalLocator, v)
change(this)
}
def postBox = wrapper.get(PostBox)
def postBox_=(v: String) {
wrapper.set(PostBox, v)
change(this)
}
override def toString = {
val buffer = new StringBuffer
buffer.append(postalCode.getOrElse("") + " ")
buffer.append(city.getOrElse("") + " ")
buffer.toString
}
} | markusgumbel/dshl7 | core/src/main/scala/net/gumbix/hl7dsl/helper/Address.scala | Scala | apache-2.0 | 2,960 |
/*
* _____ __ _ __
* / ___/_________ _/ /____ _ (_)__ __/ /________ __
* \\__ \\/ ___/ __ `/ // __ `/ / // / / / __/ ___/ / / /
* ___/ / /__/ /_/ / // /_/ / / // /_/ / /_(__ ) /_/ /
* /____/\\___/\\__,_/_/ \\__,_/__/ / \\__,_/\\__/____/\\__,_/
* /___/
*
* Copyright (c) 2010, Oscar Forero & Scalajutsu Contributors
* All rights reserved.
*
*/
package scalajutsu.experimental.metadata_it
import org.junit.runner.RunWith
import org.scalatest.matchers.ShouldMatchers
import org.scalatest.WordSpec
import org.scalatest.junit.JUnitRunner
/**
* TODO: Write description here!!
*
* @author Oscar Forero
* @version 1.0
*
*/
@RunWith(classOf[JUnitRunner])
class MetadataSpec extends WordSpec with ShouldMatchers {
"An object" when {
"enriched with metadata" should {
import uk.ac.liv.oforero.metadata.plain.Metadata._
val a = 5 addMeta ("serializable" → true)
"be equal to other without metadata" in {
assert( a == 5 )
assert( a.hasMeta)
assert( a hasMeta "serializable")
assert( 5 == a)
var c = 3
assert( c + 2 == a)
}
"be assignable to another variable of the underlying type" in {
val b: Int = a
assert( a == b )
assert( b == a )
}
}
}
}
| oforero/Metadata | compiler-plugin/src/it/test-plugin-project/src/test/scala/MetadataSpecIT.scala | Scala | bsd-3-clause | 1,350 |
/*
* MPEBeliefPropagation.scala
* An MPE algorithm using BP
*
* Created By: Brian Ruttenberg (bruttenberg@cra.com)
* Creation Date: Jan 15, 2014
*
* Copyright 2013 Avrom J. Pfeffer and Charles River Analytics, Inc.
* See http://www.cra.com or email figaro@cra.com for information.
*
* See http://www.github.com/p2t2/figaro for a copy of the software license.
*/
package com.cra.figaro.algorithm.factored.beliefpropagation
import com.cra.figaro.algorithm._
import com.cra.figaro.language._
import com.cra.figaro.algorithm._
import com.cra.figaro.algorithm.sampling._
import com.cra.figaro.algorithm.factored.factors._
import com.cra.figaro.util
import scala.collection.mutable.{ Set, Map }
/**
* BP algorithm to compute the most probable explanation.
*/
abstract class MPEBeliefPropagation(override val universe: Universe)(
val dependentUniverses: List[(Universe, List[NamedEvidence[_]])],
val dependentAlgorithm: (Universe, List[NamedEvidence[_]]) => () => Double)
extends MPEAlgorithm with ProbabilisticBeliefPropagation {
override val semiring = MaxProductSemiring()
/*
* Empty for MPE Algorithms
*/
val targetElements = List[Element[_]]()
override def initialize() = {
val (neededElements, _) = getNeededElements(universe.activeElements, Int.MaxValue)
factorGraph = new BasicFactorGraph(getFactors(neededElements, targetElements), logSpaceSemiring()): FactorGraph[Double]
super.initialize
}
/*
* Convert factors to use MaxProduct
*/
override def getFactors(allElements: List[Element[_]], targetElements: List[Element[_]], upper: Boolean = false): List[Factor[Double]] = {
val factors = super.getFactors(allElements, targetElements, upper)
// Not needed since BP now converts factors to log space of the defined semiring
//factors.map (_.mapTo(x => x, logSpaceSemiring()))
factors
}
def mostLikelyValue[T](target: Element[T]): T = {
val beliefs = getBeliefsForElement(target)
beliefs.maxBy(_._1)._2
}
}
object MPEBeliefPropagation {
/**
* Create a most probable explanation computer using One time BP
* in the current default universe.
*/
def apply(myIterations: Int)(implicit universe: Universe) =
new MPEBeliefPropagation(universe)(
List(),
(u: Universe, e: List[NamedEvidence[_]]) => () => ProbEvidenceSampler.computeProbEvidence(10000, e)(u)) with OneTimeProbabilisticBeliefPropagation with OneTimeMPE { val iterations = myIterations }
/**
* Create a most probable explanation computer using Anytime BP
* in the current default universe.
*/
def apply()(implicit universe: Universe) =
new MPEBeliefPropagation(universe)(
List(),
(u: Universe, e: List[NamedEvidence[_]]) => () => ProbEvidenceSampler.computeProbEvidence(10000, e)(u)) with AnytimeProbabilisticBeliefPropagation with AnytimeMPE
/**
* Create a most probable explanation computer using One time BP using the given
* dependent universes in the current default universe.
*/
def apply(dependentUniverses: List[(Universe, List[NamedEvidence[_]])], myIterations: Int)(implicit universe: Universe) =
new MPEBeliefPropagation(universe)(
dependentUniverses,
(u: Universe, e: List[NamedEvidence[_]]) => () => ProbEvidenceSampler.computeProbEvidence(10000, e)(u)) with OneTimeProbabilisticBeliefPropagation with OneTimeMPE { val iterations = myIterations }
/**
* Create a most probable explanation computer using Anytime BP using the given
* dependent universes in the current default universe.
*/
def apply(dependentUniverses: List[(Universe, List[NamedEvidence[_]])])(implicit universe: Universe) =
new MPEBeliefPropagation(universe)(
dependentUniverses,
(u: Universe, e: List[NamedEvidence[_]]) => () => ProbEvidenceSampler.computeProbEvidence(10000, e)(u)) with AnytimeProbabilisticBeliefPropagation with AnytimeMPE
/**
* Create a most probable explanation computer using One time BP
* using the given dependent universes in the current default universe.
* Use the given dependent algorithm function to
* determine the algorithm to use to compute probability of evidence in each dependent universe.
*/
def apply(
dependentUniverses: List[(Universe, List[NamedEvidence[_]])],
dependentAlgorithm: (Universe, List[NamedEvidence[_]]) => () => Double, myIterations: Int)(implicit universe: Universe) =
new MPEBeliefPropagation(universe)(
dependentUniverses,
dependentAlgorithm) with OneTimeProbabilisticBeliefPropagation with OneTimeMPE { val iterations = myIterations }
/**
* Create a most probable explanation computer using Anytime BP
* using the given dependent universes in the current default universe.
* Use the given dependent algorithm function to
* determine the algorithm to use to compute probability of evidence in each dependent universe.
*/
def apply(
dependentUniverses: List[(Universe, List[NamedEvidence[_]])],
dependentAlgorithm: (Universe, List[NamedEvidence[_]]) => () => Double)(implicit universe: Universe) =
new MPEBeliefPropagation(universe)(
dependentUniverses,
dependentAlgorithm) with AnytimeProbabilisticBeliefPropagation with AnytimeMPE
}
| scottcb/figaro | Figaro/src/main/scala/com/cra/figaro/algorithm/factored/beliefpropagation/MPEBeliefPropagation.scala | Scala | bsd-3-clause | 5,242 |
import common._
package object scalashop {
/** The value of every pixel is represented as a 32 bit integer. */
type RGBA = Int
/** Returns the red component. */
def red(c: RGBA): Int = (0xff000000 & c) >>> 24
/** Returns the green component. */
def green(c: RGBA): Int = (0x00ff0000 & c) >>> 16
/** Returns the blue component. */
def blue(c: RGBA): Int = (0x0000ff00 & c) >>> 8
/** Returns the alpha component. */
def alpha(c: RGBA): Int = (0x000000ff & c) >>> 0
/** Used to create an RGBA value from separate components. */
def rgba(r: Int, g: Int, b: Int, a: Int): RGBA = {
(r << 24) | (g << 16) | (b << 8) | (a << 0)
}
/** Restricts the integer into the specified range. */
def clamp(v: Int, min: Int, max: Int): Int = {
if (v < min) min
else if (v > max) max
else v
}
/** Image is a two-dimensional matrix of pixel values. */
class Img(val width: Int, val height: Int, private val data: Array[RGBA]) {
def this(w: Int, h: Int) = this(w, h, new Array(w * h))
def apply(x: Int, y: Int): RGBA = data(y * width + x)
def update(x: Int, y: Int, c: RGBA): Unit = data(y * width + x) = c
}
/** Computes the blurred RGBA value of a single pixel of the input image. */
def boxBlurKernel(src: Img, x: Int, y: Int, radius: Int): RGBA = {
var horizon = clamp(x - radius, 0, src.width - 1)
var r, b, g, a = 0
var count = 0
while (horizon <= clamp(x + radius, 0, src.width - 1)) {
var vertical = clamp(y - radius, 0, src.height - 1)
while (vertical <= clamp(y + radius, 0, src.height - 1)) {
val rgba = src(clamp(horizon, 0, src.width - 1), clamp(vertical, 0, src.height - 1))
r = r + red(rgba)
g = g + green(rgba)
b = b + blue(rgba)
a = a + alpha(rgba)
vertical = vertical + 1
count = count + 1
}
horizon = horizon + 1
}
rgba(r / count, g / count, b / count, a / count)
}
}
| syhan/coursera | parprog1/scalashop/src/main/scala/scalashop/package.scala | Scala | gpl-3.0 | 1,957 |
package com.szadowsz.gospel.core.engine.state
import java.util.List
import com.szadowsz.gospel.core.data._
import com.szadowsz.gospel.core.engine.subgoal.tree.SubGoalTree
import com.szadowsz.gospel.core.engine.{Engine, EngineRunner}
/**
* @author Matteo Iuliani
*/
class ExceptionState(protected override val runner : EngineRunner) extends State {
/**
* the name of the engine state.
*/
protected val stateName: String = "Exception"
private[gospel] final val catchTerm: Term = Term.createTerm("catch(Goal, Catcher, Handler)")
private[gospel] final val javaCatchTerm: Term = Term.createTerm("java_catch(Goal, List, Finally)")
private[gospel] def doJob(e: Engine) {
val errorType: String = e.context.currentGoal.getName
if (errorType == "throw")
prologException(e)
else
javaException(e)
}
private def prologException(e: Engine) {
val errorTerm: Term = e.context.currentGoal.getArg(0)
e.context = e.context.fatherCtx
// step to the halt state if the error cannot be handled
if (e.context == null) {
e.nextState = runner.END_HALT
return
}
while (true) { // backward tree search for a resolution of Subgoal catch/3 whose second argument unifies with the Exception thrown
// we have identified the ExecutionContext with the proper subgoal Catch/3
if (e.context.currentGoal.matches(catchTerm) && e.context.currentGoal.getArg(1).matches(errorTerm)) {
runner.cut // Cut all choice points generated by the Erroneous Goal
// Unify the argument of throw / 1 with the second argument of Catch / 3
val unifiedVars: List[Var] = e.context.trailingVars.getHead
e.context.currentGoal.getArg(1).unify(unifiedVars, unifiedVars, errorTerm)
// insert the manager of the error to the head of the Subgoal list
// to perform the third argument of catch/3. The manager must
// also be prepared for maintaining the replacements during the process of
// unification between the argument of throw/1 and the second argument of catch/3
var handlerTerm: Term = e.context.currentGoal.getArg(2)
val curHandlerTerm: Term = handlerTerm.getTerm
// step to the halt state if the error cannot be handled
if (!(curHandlerTerm.isInstanceOf[Struct])) {
e.nextState = runner.END_FALSE
return
}
// Code inserted to allow evaluation of meta-clause
// such as p(X) :- X. When evaluating directly terms,
// they are converted to execution of a call/1 predicate.
// This enables the dynamic linking of built-ins for
// terms coming from outside the demonstration context.
if (handlerTerm ne curHandlerTerm)
handlerTerm = new Struct("call", curHandlerTerm)
val handler: Struct = handlerTerm.asInstanceOf[Struct]
runner.identify(handler)
val sgt: SubGoalTree = new SubGoalTree
sgt.addLeaf(handler)
runner.pushSubGoal(sgt)
e.context.currentGoal = handler
e.nextState = runner.GOAL_SELECTION
return
} else {
// step to the halt state if the error cannot be handled
e.context = e.context.fatherCtx
if (e.context == null) {
e.nextState = runner.END_HALT
return
}
}
}
}
private def javaException(e: Engine) {
val exceptionTerm: Term = e.context.currentGoal.getArg(0)
e.context = e.context.fatherCtx
// step to the halt state if the error cannot be handled
if (e.context == null) {
e.nextState = runner.END_HALT
return
}
while (true) { // backward tree search for a resolution of Subgoal java_catch/3 whose argument unifies with the Exception thrown
// we have identified the ExecutionContext with the proper subgoal java_Catch/3
if (e.context.currentGoal.matches(javaCatchTerm) && javaMatch(e.context.currentGoal.getArg(1), exceptionTerm)) {
runner.cut // cut all the choice points generated by JavaGoal
// Unify the topic of java_throw/1 with the appropriate catch
val unifiedVars: List[Var] = e.context.trailingVars.getHead
var handlerTerm: Term = javaUnify(e.context.currentGoal.getArg(1), exceptionTerm, unifiedVars)
if (handlerTerm == null) {
e.nextState = runner.END_FALSE
return
}
// Insert the catch and (if present) finally blocks at the head of
// the subgoals to perform List. The two predicates must also
// be prepared for implementing & maintaining the substitutions
// Made during the process of unification between
// The exception and the catch block
val curHandlerTerm: Term = handlerTerm.getTerm
if (!(curHandlerTerm.isInstanceOf[Struct])) {
e.nextState = runner.END_FALSE
return
}
var finallyTerm: Term = e.context.currentGoal.getArg(2)
val curFinallyTerm: Term = finallyTerm.getTerm
// check if we have a finally block
var isFinally: Boolean = true
if (curFinallyTerm.isInstanceOf[numeric.Int]) {
val finallyInt: numeric.Int = curFinallyTerm.asInstanceOf[numeric.Int]
if (finallyInt.intValue == 0){
isFinally = false
} else { // syntax error
e.nextState = runner.END_FALSE
return
}
} else if (!(curFinallyTerm.isInstanceOf[Struct])) {
e.nextState = runner.END_FALSE
return
}
// Code inserted to allow evaluation of meta-clause
// such as p(X) :- X. When evaluating directly terms,
// they are converted to execution of a call/1 predicate.
// This enables the dynamic linking of built-ins for
// terms coming from outside the demonstration context.
if (handlerTerm ne curHandlerTerm)
handlerTerm = new Struct("call", curHandlerTerm)
if (finallyTerm ne curFinallyTerm)
finallyTerm = new Struct("call", curFinallyTerm)
val handler: Struct = handlerTerm.asInstanceOf[Struct]
runner.identify(handler)
val sgt: SubGoalTree = new SubGoalTree
sgt.addLeaf(handler)
if (isFinally) {
val finallyStruct: Struct = finallyTerm.asInstanceOf[Struct]
runner.identify(finallyStruct)
sgt.addLeaf(finallyStruct)
}
runner.pushSubGoal(sgt)
e.context.currentGoal = handler
e.nextState = runner.GOAL_SELECTION
return
}
else {
e.context = e.context.fatherCtx
if (e.context == null) {
e.nextState = runner.END_HALT
return
}
}
}
}
// checks whether the term is a catch mergeable with the argument of the exception thrown
private def javaMatch(arg1: Term, exceptionTerm: Term): Boolean = {
if (!arg1.isList) return false
val list: Struct = arg1.asInstanceOf[Struct]
if (list.isEmptyList) return false
val it = list.iterator
while (it.hasNext) {
val nextTerm: Term = it.next
if (nextTerm.isCompound){
val element: Struct = nextTerm.asInstanceOf[Struct]
if ((element.getName == ",")&& element.getArity == 2){
if (element.getArg(0).matches(exceptionTerm)) {
return true
}
}
}
}
return false
}
// Unifies the predicate of java_throw/1 with the right catch statement and returns the corresponding handler
private def javaUnify(arg1: Term, exceptionTerm: Term, unifiedVars: List[Var]): Term = {
val list: Struct = arg1.asInstanceOf[Struct]
val it: Iterator[_ <: Term] = list.iterator.filter(_.isCompound)
for (nextTerm <- it) {
val element: Struct = nextTerm.asInstanceOf[Struct]
if ((element.getName == ",")&& element.getArity == 2){
if (element.getArg(0).matches(exceptionTerm)) {
element.getArg(0).unify(unifiedVars, unifiedVars, exceptionTerm)
return element.getArg(1)
}
}
}
return null
}
} | zakski/project-soisceal | scala-core/src/main/scala/com/szadowsz/gospel/core/engine/state/ExceptionState.scala | Scala | lgpl-3.0 | 8,064 |
package demo
package components
package reactselect
import japgolly.scalajs.react._
import japgolly.scalajs.react.vdom.html_<^._
object ReactSelectInfo {
val cssSettings = scalacss.devOrProdDefaults
import cssSettings._
object Style extends StyleSheet.Inline {
import dsl._
val content = style(textAlign.center, fontSize(30.px), paddingTop(40.px))
}
val component = ScalaComponent
.builder[Unit]("ReactSelectInfo")
.render(P => {
InfoTemplate(componentFilePath = "reactselect/Select.scala")(
<.div(
<.h3("React Select "),
<.p("scalajs-react wrapper for ",
RedLink("react select", "https://github.com/JedWatson/react-select")),
<.div(
<.h4("Supported Version :"),
<.span("1.0.0-beta")
),
<.div(
<.h4("How To Use :"),
<.p("Follow the installation guide from :",
RedLink("here", "https://github.com/JedWatson/react-select#installation"))
)
)
)
})
.build
def apply() = component()
}
| rleibman/scalajs-react-components | demo/src/main/scala/demo/components/reactselect/ReactSelectInfo.scala | Scala | apache-2.0 | 1,090 |
/*
* Copyright 2010 Michael Fortin <mike@brzy.org>
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this
* file except in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed
* under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific
* language governing permissions and limitations under the License.
*/
package org.brzy.calista.server
import org.brzy.calista.{Session, SessionManager, SessionImpl}
/**
* Used to connect to the embedded server in unit tests.
*
* @author Michael Fortin
*/
trait EmbeddedTest {
val server = EmbeddedServer
val sessionManager = new SessionManager("Test", "localhost",9161)
def doWith(f: (Session) => Unit) {
val session = sessionManager.createSession
f(session)
session.close()
}
} | m410/calista | src/test/scala/org/brzy/calista/server/EmbeddedTest.scala | Scala | apache-2.0 | 1,045 |
// Testing that isEmpty and get are viewed with `memberType` from `Casey1`.
trait T[A, B >: Null] { def isEmpty: A = false.asInstanceOf[A]; def get: B = null}
class Casey1() extends T[Boolean, String]
object Casey1 { def unapply(a: Casey1) = a }
object Test {
def main(args: Array[String]) {
val c @ Casey1(x) = new Casey1()
assert(x == c.get)
}
}
| felixmulder/scala | test/files/run/t7850c.scala | Scala | bsd-3-clause | 361 |
package modules
import com.google.inject.{ AbstractModule, Provides }
import com.mohiva.play.silhouette.api.repositories.AuthInfoRepository
import com.mohiva.play.silhouette.api.services._
import com.mohiva.play.silhouette.api.util._
import com.mohiva.play.silhouette.api.{ Environment, EventBus }
import com.mohiva.play.silhouette.impl.authenticators._
import com.mohiva.play.silhouette.impl.daos.DelegableAuthInfoDAO
import com.mohiva.play.silhouette.impl.providers._
import com.mohiva.play.silhouette.impl.providers.oauth1._
import com.mohiva.play.silhouette.impl.providers.oauth1.secrets.{ CookieSecretProvider, CookieSecretSettings }
import com.mohiva.play.silhouette.impl.providers.oauth1.services.PlayOAuth1Service
import com.mohiva.play.silhouette.impl.providers.oauth2._
import com.mohiva.play.silhouette.impl.providers.oauth2.state.{ CookieStateProvider, CookieStateSettings, DummyStateProvider }
import com.mohiva.play.silhouette.impl.providers.openid.YahooProvider
import com.mohiva.play.silhouette.impl.providers.openid.services.PlayOpenIDService
import com.mohiva.play.silhouette.impl.repositories.DelegableAuthInfoRepository
import com.mohiva.play.silhouette.impl.services._
import com.mohiva.play.silhouette.impl.util._
import models.User
import models.daos._
import models.services.{ UserService, UserServiceImpl }
import net.codingwell.scalaguice.ScalaModule
import play.api.Play
import play.api.Play.current
import play.api.libs.concurrent.Execution.Implicits._
import play.api.libs.openid.OpenIdClient
/**
* The Guice module which wires all Silhouette dependencies.
*/
class SilhouetteModule extends AbstractModule with ScalaModule {
/**
* Configures the module.
*/
def configure() {
bind[UserService].to[UserServiceImpl]
bind[UserDAO].to[UserDAOImpl]
bind[DelegableAuthInfoDAO[PasswordInfo]].to[PasswordInfoDAO]
bind[DelegableAuthInfoDAO[OAuth1Info]].to[OAuth1InfoDAO]
bind[DelegableAuthInfoDAO[OAuth2Info]].to[OAuth2InfoDAO]
bind[DelegableAuthInfoDAO[OpenIDInfo]].to[OpenIDInfoDAO]
bind[CacheLayer].to[PlayCacheLayer]
bind[HTTPLayer].toInstance(new PlayHTTPLayer)
bind[IDGenerator].toInstance(new SecureRandomIDGenerator())
bind[PasswordHasher].toInstance(new BCryptPasswordHasher)
bind[FingerprintGenerator].toInstance(new DefaultFingerprintGenerator(false))
bind[EventBus].toInstance(EventBus())
}
/**
* Provides the Silhouette environment.
*
* @param userService The user service implementation.
* @param authenticatorService The authentication service implementation.
* @param eventBus The event bus instance.
* @return The Silhouette environment.
*/
@Provides
def provideEnvironment(
userService: UserService,
authenticatorService: AuthenticatorService[SessionAuthenticator],
eventBus: EventBus): Environment[User, SessionAuthenticator] = {
Environment[User, SessionAuthenticator](
userService,
authenticatorService,
Seq(),
eventBus
)
}
/**
* Provides the social provider registry.
*
* @param facebookProvider The Facebook provider implementation.
* @param googleProvider The Google provider implementation.
* @param vkProvider The VK provider implementation.
* @param clefProvider The Clef provider implementation.
* @param twitterProvider The Twitter provider implementation.
* @param xingProvider The Xing provider implementation.
* @param yahooProvider The Yahoo provider implementation.
* @param githubProvider The Yahoo provider implementation.
* @return The Silhouette environment.
*/
@Provides
def provideSocialProviderRegistry(
// facebookProvider: FacebookProvider,
// googleProvider: GoogleProvider,
// vkProvider: VKProvider,
// clefProvider: ClefProvider,
// twitterProvider: TwitterProvider,
// xingProvider: XingProvider,
// yahooProvider: YahooProvider,
githubProvider: GitHubProvider): SocialProviderRegistry = {
SocialProviderRegistry(Seq(
// googleProvider,
// facebookProvider,
// twitterProvider,
// vkProvider,
// xingProvider,
// yahooProvider,
// clefProvider,
githubProvider
))
}
/**
* Provides the authenticator service.
*
* @param fingerprintGenerator The fingerprint generator implementation.
* @return The authenticator service.
*/
@Provides
def provideAuthenticatorService(
fingerprintGenerator: FingerprintGenerator): AuthenticatorService[SessionAuthenticator] = {
new SessionAuthenticatorService(SessionAuthenticatorSettings(
sessionKey = Play.configuration.getString("silhouette.authenticator.sessionKey").get,
encryptAuthenticator = Play.configuration.getBoolean("silhouette.authenticator.encryptAuthenticator").get,
useFingerprinting = Play.configuration.getBoolean("silhouette.authenticator.useFingerprinting").get,
authenticatorIdleTimeout = Play.configuration.getInt("silhouette.authenticator.authenticatorIdleTimeout"),
authenticatorExpiry = Play.configuration.getInt("silhouette.authenticator.authenticatorExpiry").get
), fingerprintGenerator, Clock())
}
/**
* Provides the auth info repository.
*
* @param passwordInfoDAO The implementation of the delegable password auth info DAO.
* @param oauth1InfoDAO The implementation of the delegable OAuth1 auth info DAO.
* @param oauth2InfoDAO The implementation of the delegable OAuth2 auth info DAO.
* @param openIDInfoDAO The implementation of the delegable OpenID auth info DAO.
* @return The auth info repository instance.
*/
@Provides
def provideAuthInfoRepository(
passwordInfoDAO: DelegableAuthInfoDAO[PasswordInfo],
oauth1InfoDAO: DelegableAuthInfoDAO[OAuth1Info],
oauth2InfoDAO: DelegableAuthInfoDAO[OAuth2Info],
openIDInfoDAO: DelegableAuthInfoDAO[OpenIDInfo]): AuthInfoRepository = {
new DelegableAuthInfoRepository(passwordInfoDAO, oauth1InfoDAO, oauth2InfoDAO, openIDInfoDAO)
}
/**
* Provides the avatar service.
*
* @param httpLayer The HTTP layer implementation.
* @return The avatar service implementation.
*/
@Provides
def provideAvatarService(httpLayer: HTTPLayer): AvatarService = new GravatarService(httpLayer)
/**
* Provides the OAuth1 token secret provider.
*
* @return The OAuth1 token secret provider implementation.
*/
@Provides
def provideOAuth1TokenSecretProvider: OAuth1TokenSecretProvider = {
new CookieSecretProvider(CookieSecretSettings(
cookieName = Play.configuration.getString("silhouette.oauth1TokenSecretProvider.cookieName").get,
cookiePath = Play.configuration.getString("silhouette.oauth1TokenSecretProvider.cookiePath").get,
cookieDomain = Play.configuration.getString("silhouette.oauth1TokenSecretProvider.cookieDomain"),
secureCookie = Play.configuration.getBoolean("silhouette.oauth1TokenSecretProvider.secureCookie").get,
httpOnlyCookie = Play.configuration.getBoolean("silhouette.oauth1TokenSecretProvider.httpOnlyCookie").get,
expirationTime = Play.configuration.getInt("silhouette.oauth1TokenSecretProvider.expirationTime").get
), Clock())
}
/**
* Provides the OAuth2 state provider.
*
* @param idGenerator The ID generator implementation.
* @return The OAuth2 state provider implementation.
*/
@Provides
def provideOAuth2StateProvider(idGenerator: IDGenerator): OAuth2StateProvider = {
new CookieStateProvider(CookieStateSettings(
cookieName = Play.configuration.getString("silhouette.oauth2StateProvider.cookieName").get,
cookiePath = Play.configuration.getString("silhouette.oauth2StateProvider.cookiePath").get,
cookieDomain = Play.configuration.getString("silhouette.oauth2StateProvider.cookieDomain"),
secureCookie = Play.configuration.getBoolean("silhouette.oauth2StateProvider.secureCookie").get,
httpOnlyCookie = Play.configuration.getBoolean("silhouette.oauth2StateProvider.httpOnlyCookie").get,
expirationTime = Play.configuration.getInt("silhouette.oauth2StateProvider.expirationTime").get
), idGenerator, Clock())
}
/**
* Provides the credentials provider.
*
* @param authInfoRepository The auth info repository implementation.
* @param passwordHasher The default password hasher implementation.
* @return The credentials provider.
*/
@Provides
def provideCredentialsProvider(
authInfoRepository: AuthInfoRepository,
passwordHasher: PasswordHasher): CredentialsProvider = {
new CredentialsProvider(authInfoRepository, passwordHasher, Seq(passwordHasher))
}
/**
* Provides the Facebook provider.
*
* @param httpLayer The HTTP layer implementation.
* @param stateProvider The OAuth2 state provider implementation.
* @return The Facebook provider.
*/
@Provides
def provideFacebookProvider(httpLayer: HTTPLayer, stateProvider: OAuth2StateProvider): FacebookProvider = {
new FacebookProvider(httpLayer, stateProvider, OAuth2Settings(
authorizationURL = Play.configuration.getString("silhouette.facebook.authorizationURL"),
accessTokenURL = Play.configuration.getString("silhouette.facebook.accessTokenURL").get,
redirectURL = Play.configuration.getString("silhouette.facebook.redirectURL").get,
clientID = Play.configuration.getString("silhouette.facebook.clientID").getOrElse(""),
clientSecret = Play.configuration.getString("silhouette.facebook.clientSecret").getOrElse(""),
scope = Play.configuration.getString("silhouette.facebook.scope")))
}
/**
* Provides the Google provider.
*
* @param httpLayer The HTTP layer implementation.
* @param stateProvider The OAuth2 state provider implementation.
* @return The Google provider.
*/
@Provides
def provideGoogleProvider(httpLayer: HTTPLayer, stateProvider: OAuth2StateProvider): GoogleProvider = {
new GoogleProvider(httpLayer, stateProvider, OAuth2Settings(
authorizationURL = Play.configuration.getString("silhouette.google.authorizationURL"),
accessTokenURL = Play.configuration.getString("silhouette.google.accessTokenURL").get,
redirectURL = Play.configuration.getString("silhouette.google.redirectURL").get,
clientID = Play.configuration.getString("silhouette.google.clientID").getOrElse(""),
clientSecret = Play.configuration.getString("silhouette.google.clientSecret").getOrElse(""),
scope = Play.configuration.getString("silhouette.google.scope")))
}
/**
* Provides the VK provider.
*
* @param httpLayer The HTTP layer implementation.
* @param stateProvider The OAuth2 state provider implementation.
* @return The VK provider.
*/
@Provides
def provideVKProvider(httpLayer: HTTPLayer, stateProvider: OAuth2StateProvider): VKProvider = {
new VKProvider(httpLayer, stateProvider, OAuth2Settings(
authorizationURL = Play.configuration.getString("silhouette.vk.authorizationURL"),
accessTokenURL = Play.configuration.getString("silhouette.vk.accessTokenURL").get,
redirectURL = Play.configuration.getString("silhouette.vk.redirectURL").get,
clientID = Play.configuration.getString("silhouette.vk.clientID").getOrElse(""),
clientSecret = Play.configuration.getString("silhouette.vk.clientSecret").getOrElse(""),
scope = Play.configuration.getString("silhouette.vk.scope")))
}
/**
* Provides the Clef provider.
*
* @param httpLayer The HTTP layer implementation.
* @return The Clef provider.
*/
@Provides
def provideClefProvider(httpLayer: HTTPLayer): ClefProvider = {
new ClefProvider(httpLayer, new DummyStateProvider, OAuth2Settings(
accessTokenURL = Play.configuration.getString("silhouette.clef.accessTokenURL").get,
redirectURL = Play.configuration.getString("silhouette.clef.redirectURL").get,
clientID = Play.configuration.getString("silhouette.clef.clientID").getOrElse(""),
clientSecret = Play.configuration.getString("silhouette.clef.clientSecret").getOrElse("")))
}
/**
* Provides the Twitter provider.
*
* @param httpLayer The HTTP layer implementation.
* @param tokenSecretProvider The token secret provider implementation.
* @return The Twitter provider.
*/
@Provides
def provideTwitterProvider(httpLayer: HTTPLayer, tokenSecretProvider: OAuth1TokenSecretProvider): TwitterProvider = {
val settings = OAuth1Settings(
requestTokenURL = Play.configuration.getString("silhouette.twitter.requestTokenURL").get,
accessTokenURL = Play.configuration.getString("silhouette.twitter.accessTokenURL").get,
authorizationURL = Play.configuration.getString("silhouette.twitter.authorizationURL").get,
callbackURL = Play.configuration.getString("silhouette.twitter.callbackURL").get,
consumerKey = Play.configuration.getString("silhouette.twitter.consumerKey").getOrElse(""),
consumerSecret = Play.configuration.getString("silhouette.twitter.consumerSecret").getOrElse(""))
new TwitterProvider(httpLayer, new PlayOAuth1Service(settings), tokenSecretProvider, settings)
}
/**
* Provides the Xing provider.
*
* @param httpLayer The HTTP layer implementation.
* @param tokenSecretProvider The token secret provider implementation.
* @return The Xing provider.
*/
@Provides
def provideXingProvider(httpLayer: HTTPLayer, tokenSecretProvider: OAuth1TokenSecretProvider): XingProvider = {
val settings = OAuth1Settings(
requestTokenURL = Play.configuration.getString("silhouette.xing.requestTokenURL").get,
accessTokenURL = Play.configuration.getString("silhouette.xing.accessTokenURL").get,
authorizationURL = Play.configuration.getString("silhouette.xing.authorizationURL").get,
callbackURL = Play.configuration.getString("silhouette.xing.callbackURL").get,
consumerKey = Play.configuration.getString("silhouette.xing.consumerKey").getOrElse(""),
consumerSecret = Play.configuration.getString("silhouette.xing.consumerSecret").getOrElse(""))
new XingProvider(httpLayer, new PlayOAuth1Service(settings), tokenSecretProvider, settings)
}
/**
* Provides the Yahoo provider.
*
* @param cacheLayer The cache layer implementation.
* @param httpLayer The HTTP layer implementation.
* @param client The OpenID client implementation.
* @return The Yahoo provider.
*/
@Provides
def provideYahooProvider(cacheLayer: CacheLayer, httpLayer: HTTPLayer, client: OpenIdClient): YahooProvider = {
import scala.collection.JavaConversions._
val settings = OpenIDSettings(
providerURL = Play.configuration.getString("silhouette.yahoo.providerURL").get,
callbackURL = Play.configuration.getString("silhouette.yahoo.callbackURL").get,
axRequired = Play.configuration.getObject("silhouette.yahoo.axRequired").map(_.mapValues(_.unwrapped().toString).toSeq).getOrElse(Seq()),
axOptional = Play.configuration.getObject("silhouette.yahoo.axOptional").map(_.mapValues(_.unwrapped().toString).toSeq).getOrElse(Seq()),
realm = Play.configuration.getString("silhouette.yahoo.realm"))
new YahooProvider(httpLayer, new PlayOpenIDService(client, settings), settings)
}
/**
* Provides the GitHub provider.
*
* @param httpLayer The HTTP layer implementation.
* @param stateProvider The OAuth2 state provider implementation.
* @return The GitHub provider.
*/
@Provides
def provideGitHubProvider(httpLayer: HTTPLayer, stateProvider: OAuth2StateProvider): GitHubProvider = {
new GitHubProvider(httpLayer, stateProvider, OAuth2Settings(
authorizationURL = Play.configuration.getString("silhouette.github.authorizationUrl"),
accessTokenURL = Play.configuration.getString("silhouette.github.accessTokenUrl").get,
redirectURL = Play.configuration.getString("silhouette.github.redirectURL").get,
clientID = Play.configuration.getString("silhouette.github.clientId").getOrElse(""),
clientSecret = Play.configuration.getString("silhouette.github.clientSecret").getOrElse(""),
scope = Play.configuration.getString("silhouette.github.scope")))
}
}
| joaoraf/dwws-test-01 | app/modules/SilhouetteModule.scala | Scala | apache-2.0 | 16,134 |
package jitd.rewrite
import jitd.spec._
import jitd.typecheck._
object AccessorToFunction
{
def apply(definition: Definition, accessor: Accessor, prefix:String): FunctionDefinition =
{
val renderName = prefix + accessor.name
val args = Seq(
("jitd_node_ref", TNodeRef(), FunctionArgType.ConstInputRef)
) ++ accessor.args.map { field => (field.name, field.t,
if(field.t.isInstanceOf[PrimType]) { FunctionArgType.Input } else { FunctionArgType.ConstInputRef }
)
} ++ accessor.ret.map { field => (field.name, field.t, FunctionArgType.OutputRef) }
val function =
FunctionDefinition(
renderName,
Some(accessor.returnType),
args,
ExtractNode(
"jitd_node",
WrapNodeRef(Var("jitd_node_ref")),
accessor.lookups.toSeq.map { case (nodeType, handler) =>
val fieldMap = definition.node(nodeType).fields.map { field =>
field.name -> NodeSubscript(Var("jitd_node"), field.name)
}.toMap
nodeType -> InlineFunctions(
InlineVars(handler, fieldMap),
Map[String,(Seq[String],Expression)](
("delegate", (
Seq("delegate_jitd_node"),
FunctionCall(renderName,
Seq(Var("delegate_jitd_node"))++
(accessor.args ++ accessor.ret).map { _.name }.map { Var(_) }
)
))
)
)
},
Error(s"Unhandled Node Type in ${accessor.name}")
)
)
definition.typechecker.withFunctions(
function.name -> function.signature
).check {
function
}
}
} | UBOdin/jitd-synthesis | src/main/scala/jitd/rewrite/AccessorToFunction.scala | Scala | apache-2.0 | 1,749 |
package singleton.ops.impl
import singleton.twoface.impl.TwoFaceAny
import scala.reflect.macros.whitebox
private object MacroCache {
import scala.collection.mutable
val cache = mutable.Map.empty[Any, Any]
def get(key : Any) : Option[Any] = cache.get(key)
def add[V <: Any](key : Any, value : V) : V = {cache += (key -> value); value}
}
trait GeneralMacros {
val c: whitebox.Context
import c.universe._
val defaultAnnotatedSym : Option[TypeSymbol] =
if (c.enclosingImplicits.isEmpty) None else c.enclosingImplicits.last.pt match {
case TypeRef(_,sym,_) => Some(sym.asType)
case x => Some(x.typeSymbol.asType)
}
private val func1Sym = symbolOf[Function1[_,_]]
object funcTypes {
val Arg = symbolOf[OpId.Arg]
val AcceptNonLiteral = symbolOf[OpId.AcceptNonLiteral]
val GetArg = symbolOf[OpId.GetArg]
val ImplicitFound = symbolOf[OpId.ImplicitFound]
val EnumCount = symbolOf[OpId.EnumCount]
val Id = symbolOf[OpId.Id]
val ToNat = symbolOf[OpId.ToNat]
val ToChar = symbolOf[OpId.ToChar]
val ToInt = symbolOf[OpId.ToInt]
val ToLong = symbolOf[OpId.ToLong]
val ToFloat = symbolOf[OpId.ToFloat]
val ToDouble = symbolOf[OpId.ToDouble]
val ToString = symbolOf[OpId.ToString]
val IsNat = symbolOf[OpId.IsNat]
val IsChar = symbolOf[OpId.IsChar]
val IsInt = symbolOf[OpId.IsInt]
val IsLong = symbolOf[OpId.IsLong]
val IsFloat = symbolOf[OpId.IsFloat]
val IsDouble = symbolOf[OpId.IsDouble]
val IsString = symbolOf[OpId.IsString]
val IsBoolean = symbolOf[OpId.IsBoolean]
val Negate = symbolOf[OpId.Negate]
val Abs = symbolOf[OpId.Abs]
val NumberOfLeadingZeros = symbolOf[OpId.NumberOfLeadingZeros]
val Floor = symbolOf[OpId.Floor]
val Ceil = symbolOf[OpId.Ceil]
val Round = symbolOf[OpId.Round]
val Sin = symbolOf[OpId.Sin]
val Cos = symbolOf[OpId.Cos]
val Tan = symbolOf[OpId.Tan]
val Sqrt = symbolOf[OpId.Sqrt]
val Log = symbolOf[OpId.Log]
val Log10 = symbolOf[OpId.Log10]
val Reverse = symbolOf[OpId.Reverse]
val ! = symbolOf[OpId.!]
val Require = symbolOf[OpId.Require]
val ITE = symbolOf[OpId.ITE]
val IsNonLiteral = symbolOf[OpId.IsNonLiteral]
val GetType = symbolOf[OpId.GetType]
val ==> = symbolOf[OpId.==>]
val + = symbolOf[OpId.+]
val - = symbolOf[OpId.-]
val * = symbolOf[OpId.*]
val / = symbolOf[OpId./]
val % = symbolOf[OpId.%]
val < = symbolOf[OpId.<]
val > = symbolOf[OpId.>]
val <= = symbolOf[OpId.<=]
val >= = symbolOf[OpId.>=]
val == = symbolOf[OpId.==]
val != = symbolOf[OpId.!=]
val && = symbolOf[OpId.&&]
val || = symbolOf[OpId.||]
val BitwiseAnd = symbolOf[OpId.BitwiseAnd]
val BitwiseOr = symbolOf[OpId.BitwiseOr]
val Pow = symbolOf[OpId.Pow]
val Min = symbolOf[OpId.Min]
val Max = symbolOf[OpId.Max]
val Substring = symbolOf[OpId.Substring]
val SubSequence = symbolOf[OpId.SubSequence]
val StartsWith = symbolOf[OpId.StartsWith]
val EndsWith = symbolOf[OpId.EndsWith]
val Head = symbolOf[OpId.Head]
val Tail = symbolOf[OpId.Tail]
val CharAt = symbolOf[OpId.CharAt]
val Length = symbolOf[OpId.Length]
val Matches = symbolOf[OpId.Matches]
val FirstMatch = symbolOf[OpId.FirstMatch]
val PrefixMatch = symbolOf[OpId.PrefixMatch]
val ReplaceFirstMatch = symbolOf[OpId.ReplaceFirstMatch]
val ReplaceAllMatches = symbolOf[OpId.ReplaceAllMatches]
}
////////////////////////////////////////////////////////////////////
// Code thanks to Shapeless
// https://github.com/milessabin/shapeless/blob/master/core/src/main/scala/shapeless/lazy.scala
////////////////////////////////////////////////////////////////////
def setAnnotation(msg: String, annotatedSym : TypeSymbol): Unit = {
import c.internal._
import decorators._
val tree0 =
c.typecheck(
q"""
new _root_.scala.annotation.implicitNotFound("dummy")
""",
silent = false
)
class SubstMessage extends Transformer {
val global = c.universe.asInstanceOf[scala.tools.nsc.Global]
override def transform(tree: Tree): Tree = {
super.transform {
tree match {
case Literal(Constant("dummy")) => Literal(Constant(msg))
case t => t
}
}
}
}
val tree = new SubstMessage().transform(tree0)
annotatedSym.setAnnotations(Annotation(tree))
()
}
////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////
// Calc
////////////////////////////////////////////////////////////////////
sealed trait Calc extends Product with Serializable {
val primitive : Primitive
val tpe : Type
}
object Calc {
implicit def getPrimitive(from : Calc) : Primitive = from.primitive
}
sealed trait Primitive extends Product with Serializable {
val dummyConstant : Any
val name : String
val tpe : Type
override def equals(that: Any): Boolean = {
val thatPrim = that.asInstanceOf[Primitive]
thatPrim.dummyConstant == dummyConstant && thatPrim.name == name && thatPrim.tpe =:= tpe
}
}
object Primitive {
case object Char extends Primitive {val dummyConstant = '\\u0001';val name = "Char"; val tpe = typeOf[scala.Char]}
case object Int extends Primitive {val dummyConstant = 1; val name = "Int"; val tpe = typeOf[scala.Int]}
case object Long extends Primitive {val dummyConstant = 1L; val name = "Long"; val tpe = typeOf[scala.Long]}
case object Float extends Primitive {val dummyConstant = 1.0f; val name = "Float"; val tpe = typeOf[scala.Float]}
case object Double extends Primitive {val dummyConstant = 1.0; val name = "Double"; val tpe = typeOf[scala.Double]}
case object String extends Primitive {val dummyConstant = "1"; val name = "String"; val tpe = typeOf[java.lang.String]}
case object Boolean extends Primitive {val dummyConstant = true; val name = "Boolean"; val tpe = typeOf[scala.Boolean]}
case class Unknown(tpe : Type, name : String) extends Primitive {val dummyConstant: Any = None}
def fromLiteral(lit : Any) : Primitive = lit match {
case value : std.Char => Primitive.Char
case value : std.Int => Primitive.Int
case value : std.Long => Primitive.Long
case value : std.Float => Primitive.Float
case value : std.Double => Primitive.Double
case value : std.String => Primitive.String
case value : std.Boolean => Primitive.Boolean
case _ => abort(s"Unsupported literal type: $lit")
}
}
sealed trait CalcVal extends Calc {
val primitive : Primitive
val literal : Option[Any]
val tree : Tree
}
object CalcVal {
sealed trait Kind
object Lit extends Kind
object NLit extends Kind
implicit val lift = Liftable[CalcVal] {p => p.tree}
def unapply(arg: CalcVal): Option[(Any, Tree)] = Some((arg.literal.getOrElse(arg.dummyConstant), arg.tree))
def apply(value : Any, tree : Tree)(implicit kind : Kind) = kind match {
case Lit => CalcLit(value)
case NLit => CalcNLit(Primitive.fromLiteral(value), tree)
}
//use this when a literal calculation may fail
def mayFail(primitive: Primitive, value : => Any, tree : Tree)(implicit kind : Kind) = kind match {
case Lit => try{CalcLit(value)} catch {case e : Throwable => abort(e.getMessage)}
case NLit => CalcNLit(primitive, tree)
}
}
case class CalcLit(primitive : Primitive, value : Any) extends CalcVal {
val literal = Some(value)
val tpe : Type = constantTypeOf(value)
val tree : Tree = Literal(Constant(value))
}
object CalcLit {
object Char {
def unapply(arg: CalcLit): Option[std.Char] = arg match {
case CalcLit(Primitive.Char, value : std.Char) => Some(value)
case _ => None
}
}
object Int {
def unapply(arg: CalcLit): Option[std.Int] = arg match {
case CalcLit(Primitive.Int, value : std.Int) => Some(value)
case _ => None
}
}
object Long {
def unapply(arg: CalcLit): Option[std.Long] = arg match {
case CalcLit(Primitive.Long, value : std.Long) => Some(value)
case _ => None
}
}
object Float {
def unapply(arg: CalcLit): Option[std.Float] = arg match {
case CalcLit(Primitive.Float, value : std.Float) => Some(value)
case _ => None
}
}
object Double {
def unapply(arg: CalcLit): Option[std.Double] = arg match {
case CalcLit(Primitive.Double, value : std.Double) => Some(value)
case _ => None
}
}
object String {
def unapply(arg: CalcLit): Option[std.String] = arg match {
case CalcLit(Primitive.String, value : std.String) => Some(value)
case _ => None
}
}
object Boolean {
def unapply(arg: CalcLit): Option[std.Boolean] = arg match {
case CalcLit(Primitive.Boolean, value : std.Boolean) => Some(value)
case _ => None
}
}
def apply(t : Any) : CalcLit = CalcLit(Primitive.fromLiteral(t), t)
}
case class CalcNLit(primitive : Primitive, tree : Tree, tpe : Type) extends CalcVal {
val literal = None
}
object CalcNLit {
def apply(primitive: Primitive, tree: Tree): CalcNLit = new CalcNLit(primitive, tree, primitive.tpe)
}
sealed trait CalcType extends Calc
object CalcType {
case class Mark(primitive : Primitive) extends CalcType {
val tpe = primitive.tpe
}
case class TF(primitive : Primitive) extends CalcType {
val tpe = primitive.tpe
}
case class UB(primitive : Primitive) extends CalcType {
val tpe = primitive.tpe
}
def unapply(arg: CalcType): Option[Primitive] = Some(arg.primitive)
}
case class CalcUnknown(tpe : Type, treeOption : Option[Tree]) extends Calc {
override val primitive: Primitive = Primitive.Unknown(tpe, "Unknown")
}
object NonLiteralCalc {
def unapply(tpe: Type): Option[CalcType.Mark] = tpe match {
case TypeRef(_, sym, _) => sym match {
case t if t == symbolOf[Char] => Some(CalcType.Mark(Primitive.Char))
case t if t == symbolOf[Int] => Some(CalcType.Mark(Primitive.Int))
case t if t == symbolOf[Long] => Some(CalcType.Mark(Primitive.Long))
case t if t == symbolOf[Float] => Some(CalcType.Mark(Primitive.Float))
case t if t == symbolOf[Double] => Some(CalcType.Mark(Primitive.Double))
case t if t == symbolOf[java.lang.String] => Some(CalcType.Mark(Primitive.String))
case t if t == symbolOf[Boolean] => Some(CalcType.Mark(Primitive.Boolean))
case _ => None
}
case _ => None
}
}
////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////
// Calc Caching
////////////////////////////////////////////////////////////////////
object CalcCache {
import collection.mutable
import io.AnsiColor._
def deepCopyTree(t: Tree): Tree = {
val treeDuplicator = new Transformer {
// by default Transformers don’t copy trees which haven’t been modified,
// so we need to use use strictTreeCopier
override val treeCopy =
c.asInstanceOf[reflect.macros.runtime.Context].global.newStrictTreeCopier.asInstanceOf[TreeCopier]
}
treeDuplicator.transform(t)
}
final case class Key private (key : Type, argContext : List[Tree]) {
override def equals(that: Any): Boolean = {
val thatKey = that.asInstanceOf[Key]
(thatKey.key =:= key) && (thatKey.argContext.length == argContext.length) &&
ListZipper(thatKey.argContext, argContext).forall(_ equalsStructure _)
}
}
object Key {
implicit def fromType(key : Type) : Key = new Key(key, GetArgTree.argContext)
}
val cache = MacroCache.cache.asInstanceOf[mutable.Map[Key, Calc]]
def get(key : Type) : Option[Calc] = {
val k = Key.fromType(key)
cache.get(k).map {v =>
VerboseTraversal(s"${YELLOW}${BOLD}fetching${RESET} $k, $v")
val cloned = v match {
case lit : CalcLit => CalcLit(lit.value) //reconstruct internal literal tree
case nlit : CalcNLit => CalcNLit(nlit.primitive, deepCopyTree(nlit.tree))
case c => c
}
cloned
}
}
def add[V <: Calc](key : Type, value : V) : V = {
val k = Key.fromType(key)
cache += (k -> value)
VerboseTraversal(s"${GREEN}${BOLD}caching${RESET} $k -> $value")
value
}
}
////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////
// Code thanks to Paul Phillips
// https://github.com/paulp/psply/blob/master/src/main/scala/PsplyMacros.scala
////////////////////////////////////////////////////////////////////
import scala.reflect.internal.SymbolTable
object VerboseTraversal {
private val verboseTraversal = false
private val indentSize = 2
private var indent : Int = 0
private def indentStr : String = " " * (indentSize * indent)
def incIdent : Unit = if (verboseTraversal) {
indent = indent + 1
println("--" * indent + ">")
}
def decIdent : Unit = if (verboseTraversal) {
println("<" + "--" * indent)
indent = indent - 1
}
def apply(s : String) : Unit = {
if (verboseTraversal) println(indentStr + s.replaceAll("\\n",s"\\n$indentStr"))
}
}
/** Typecheck singleton types so as to obtain indirectly
* available known-at-compile-time values.
*/
object TypeCalc {
////////////////////////////////////////////////////////////////////////
// Calculates the integer value of Shapeless Nat
////////////////////////////////////////////////////////////////////////
object NatCalc {
def unapply(tp: Type): Option[CalcLit] = {
tp match {
case TypeRef(_, sym, args) if sym == symbolOf[shapeless.Succ[_]] =>
args.head match {
case NatCalc(CalcLit.Int(value)) => Some(CalcLit(value + 1))
case _ => abort(s"Given Nat type is defective: $tp, raw: ${showRaw(tp)}")
}
case TypeRef(_, sym, _) if sym == symbolOf[shapeless._0] =>
Some(CalcLit(0))
case TypeRef(pre, sym, Nil) =>
unapply(sym.info asSeenFrom (pre, sym.owner))
case _ =>
None
}
}
}
////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////
// Calculates the TwoFace values
////////////////////////////////////////////////////////////////////////
object TwoFaceCalc {
def unapplyArg(calcTFType : Option[CalcType.TF], tfArgType : Type): Option[Calc] = {
TypeCalc.unapply(tfArgType) match {
case Some(t : CalcLit) => Some(t)
case _ => calcTFType
}
}
def unapply(tp: Type) : Option[Calc] = {
val tfAnySym = symbolOf[TwoFaceAny[_,_]]
tp match {
case TypeRef(_, sym, args) if args.nonEmpty && tp.baseClasses.contains(tfAnySym) =>
VerboseTraversal(s"@@TwoFaceCalc@@\\nTP: $tp\\nRAW: ${showRaw(tp)}\\nBaseCls:${tp.baseClasses}")
val calcTFType = sym match {
case t if tp.baseClasses.contains(symbolOf[TwoFaceAny.Char[_]]) => Some(CalcType.TF(Primitive.Char))
case t if tp.baseClasses.contains(symbolOf[TwoFaceAny.Int[_]]) => Some(CalcType.TF(Primitive.Int))
case t if tp.baseClasses.contains(symbolOf[TwoFaceAny.Long[_]]) => Some(CalcType.TF(Primitive.Long))
case t if tp.baseClasses.contains(symbolOf[TwoFaceAny.Float[_]]) => Some(CalcType.TF(Primitive.Float))
case t if tp.baseClasses.contains(symbolOf[TwoFaceAny.Double[_]]) => Some(CalcType.TF(Primitive.Double))
case t if tp.baseClasses.contains(symbolOf[TwoFaceAny.String[_]]) => Some(CalcType.TF(Primitive.String))
case t if tp.baseClasses.contains(symbolOf[TwoFaceAny.Boolean[_]]) => Some(CalcType.TF(Primitive.Boolean))
case _ => None
}
if (calcTFType.isDefined)
unapplyArg(calcTFType, tp.baseType(tfAnySym).typeArgs(1))
else
None
case _ => None
}
}
}
////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////
// Calculates the different Op wrappers by unapplying their argument.
////////////////////////////////////////////////////////////////////////
object OpCastCalc {
def unapply(tp: Type): Option[Calc] = {
tp match {
case TypeRef(_, sym, args) =>
sym match {
case t if t == symbolOf[OpNat[_]] => Some(TypeCalc(args.head))
case t if t == symbolOf[OpChar[_]] => Some(TypeCalc(args.head))
case t if t == symbolOf[OpInt[_]] => Some(TypeCalc(args.head))
case t if t == symbolOf[OpLong[_]] => Some(TypeCalc(args.head))
case t if t == symbolOf[OpFloat[_]] => Some(TypeCalc(args.head))
case t if t == symbolOf[OpDouble[_]] => Some(TypeCalc(args.head))
case t if t == symbolOf[OpString[_]] => Some(TypeCalc(args.head))
case t if t == symbolOf[OpBoolean[_]] => Some(TypeCalc(args.head))
case _ => None
}
case _ =>
None
}
}
}
////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////
// Calculates an Op
////////////////////////////////////////////////////////////////////////
object OpCalc {
private val opMacroSym = symbolOf[OpMacro[_,_,_,_]]
private var uncachingReason : Int = 0
def setUncachingReason(arg : Int) : Unit = {
uncachingReason = arg
}
def unapply(tp: Type): Option[Calc] = {
tp match {
case TypeRef(_, sym, ft :: tp :: _) if sym == opMacroSym && ft.typeSymbol == funcTypes.GetType =>
Some(CalcUnknown(tp, None))
case TypeRef(_, sym, args) if sym == opMacroSym =>
VerboseTraversal(s"@@OpCalc@@\\nTP: $tp\\nRAW: ${showRaw(tp)}")
val funcType = args.head.typeSymbol.asType
CalcCache.get(tp) match {
case None =>
val args = tp.typeArgs
lazy val aValue = TypeCalc(args(1))
lazy val bValue = TypeCalc(args(2))
lazy val cValue = TypeCalc(args(3))
//If function is set/get variable we keep the original string,
//otherwise we get the variable's value
val retVal = (funcType, aValue) match {
case (funcTypes.ImplicitFound, _) =>
setUncachingReason(1)
aValue match {
case CalcUnknown(t, _) => try {
c.typecheck(q"implicitly[$t]")
Some(CalcLit(true))
} catch {
case e : Throwable =>
Some(CalcLit(false))
}
case _ => Some(CalcLit(false))
}
case (funcTypes.EnumCount, _) =>
aValue match {
case CalcUnknown(t, _) => Some(CalcLit(t.typeSymbol.asClass.knownDirectSubclasses.size))
case _ => Some(CalcLit(0))
}
case (funcTypes.IsNat, _) =>
aValue match {
case CalcLit.Int(t) if t >= 0 => Some(CalcLit(true))
case _ => Some(CalcLit(false))
}
case (funcTypes.IsChar, _) =>
aValue.primitive match {
case Primitive.Char => Some(CalcLit(true))
case _ => Some(CalcLit(false))
}
case (funcTypes.IsInt, _) =>
aValue.primitive match {
case Primitive.Int => Some(CalcLit(true))
case _ => Some(CalcLit(false))
}
case (funcTypes.IsLong, _) =>
aValue.primitive match {
case Primitive.Long => Some(CalcLit(true))
case _ => Some(CalcLit(false))
}
case (funcTypes.IsFloat, _) =>
aValue.primitive match {
case Primitive.Float => Some(CalcLit(true))
case _ => Some(CalcLit(false))
}
case (funcTypes.IsDouble, _) =>
aValue.primitive match {
case Primitive.Double => Some(CalcLit(true))
case _ => Some(CalcLit(false))
}
case (funcTypes.IsString, _) =>
aValue.primitive match {
case Primitive.String => Some(CalcLit(true))
case _ => Some(CalcLit(false))
}
case (funcTypes.IsBoolean, _) =>
aValue.primitive match {
case Primitive.Boolean => Some(CalcLit(true))
case _ => Some(CalcLit(false))
}
case (funcTypes.IsNonLiteral, _) => //Looking for non literals
aValue match {
case t : CalcLit => Some(CalcLit(false))
case _ => Some(CalcLit(true)) //non-literal type (e.g., Int, Long,...)
}
case (funcTypes.ITE, CalcLit.Boolean(cond)) => //Special control case: ITE (If-Then-Else)
if (cond) Some(bValue) //true (then) part of the IF
else Some(cValue) //false (else) part of the IF
case (funcTypes.Arg, CalcLit.Int(argNum)) =>
bValue match { //Checking the argument type
case t : CalcLit => Some(t) //Literal argument is just a literal
case _ => //Got a type, so returning argument name
TypeCalc.unapply(args(3)) match {
case Some(t: CalcType) =>
val term = TermName(s"arg$argNum")
Some(CalcNLit(t, q"$term"))
case _ =>
None
}
}
case _ => //regular cases
opCalc(funcType, aValue, bValue, cValue) match {
case (res : CalcVal) => Some(res)
case u @ CalcUnknown(_,Some(_)) => Some(u) //Accept unknown values with a tree
case _ => None
}
}
if (uncachingReason > 0) VerboseTraversal(s"$uncachingReason:: Skipped caching of $tp")
else retVal.foreach{rv => CalcCache.add(tp, rv)}
retVal
case cached => cached
}
case _ => None
}
}
}
////////////////////////////////////////////////////////////////////////
def apply(tp: Type): Calc = {
TypeCalc.unapply(tp) match {
case Some(t : CalcVal) => t
case Some(t @ CalcType.UB(_)) => t
case Some(t @ CalcType.TF(_)) => CalcNLit(t, q"valueOf[$tp].getValue")
case Some(t : CalcType) => CalcNLit(t, q"valueOf[$tp]")
case Some(t : CalcUnknown) => t
case _ =>
VerboseTraversal(s"@@Unknown@@\\nTP: $tp\\nRAW: ${showRaw(tp)}")
CalcUnknown(tp, None)
}
}
def unapply(tp: Type): Option[Calc] = {
val g = c.universe.asInstanceOf[SymbolTable]
implicit def fixSymbolOps(sym: Symbol): g.Symbol = sym.asInstanceOf[g.Symbol]
VerboseTraversal(s"@@TypeCalc.unapply@@ ${c.enclosingPosition}\\nTP: $tp\\nRAW: ${showRaw(tp)}")
VerboseTraversal.incIdent
val tpCalc = tp match {
////////////////////////////////////////////////////////////////////////
// Value cases
////////////////////////////////////////////////////////////////////////
case ConstantType(ConstantCalc(t)) => Some(t) //Constant
case OpCalc(t) => Some(t) // Operational Function
case OpCastCalc(t) => Some(t) //Op Cast wrappers
case TwoFaceCalc(t) => Some(t) //TwoFace values
case NonLiteralCalc(t) => Some(t)// Non-literal values
case NatCalc(t) => Some(t) //For Shapeless Nat
////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////
// Tree traversal
////////////////////////////////////////////////////////////////////////
case tp @ ExistentialType(_, _) => unapply(tp.underlying)
case TypeBounds(lo, hi) => unapply(hi) match {
case Some(t : CalcLit) => Some(t)
//There can be cases, like in the following example, where we can extract a non-literal value.
// def foo2[W](w : TwoFace.Int[W])(implicit tfs : TwoFace.Int.Shell1[Negate, W, Int]) = -w+1
//We want to calculate `-w+1`, even though we have not provided a complete implicit.
//While returning `TwoFace.Int[Int](-w+1)` is possible in this case, we would rather reserve
//the ability to have a literal return type, so `TwoFace.Int[Negate[W]+1](-w+1)` is returned.
//So even if we can have a `Some(CalcType)` returning, we force it as an upper-bound calc type.
case Some(t) => Some(CalcType.UB(t))
case _ => None
}
case RefinedType(parents, scope) =>
parents.iterator map unapply collectFirst { case Some(x) => x }
case NullaryMethodType(tpe) => unapply(tpe)
case TypeRef(_, sym, _) if sym.isAliasType =>
val tpDealias = tp.dealias
if (tpDealias == tp)
abort("Unable to dealias type: " + showRaw(tp))
else
unapply(tpDealias)
case TypeRef(pre, sym, Nil) => unapply(sym.info asSeenFrom (pre, sym.owner))
case SingleType(pre, sym) => unapply(sym.info asSeenFrom (pre, sym.owner))
////////////////////////////////////////////////////////////////////////
case _ =>
VerboseTraversal("Exhausted search at: " + showRaw(tp))
None
}
VerboseTraversal.decIdent
tpCalc
}
}
////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////
// Calculates from a constant
////////////////////////////////////////////////////////////////////////
object ConstantCalc {
def unapply(constant: Constant): Option[CalcLit] = {
constant match {
case Constant(t : Char) => Some(CalcLit(t))
case Constant(t : Int) => Some(CalcLit(t))
case Constant(t : Long) => Some(CalcLit(t))
case Constant(t : Float) => Some(CalcLit(t))
case Constant(t : Double) => Some(CalcLit(t))
case Constant(t : String) => Some(CalcLit(t))
case Constant(t : Boolean) => Some(CalcLit(t))
case _ => None
}
}
}
////////////////////////////////////////////////////////////////////////
def abort(msg: String, annotatedSym : Option[TypeSymbol] = defaultAnnotatedSym): Nothing = {
VerboseTraversal(s"!!!!!!aborted with: $msg at $annotatedSym, $defaultAnnotatedSym")
if (annotatedSym.isDefined) setAnnotation(msg, annotatedSym.get)
c.abort(c.enclosingPosition, msg)
}
def buildWarningMsgLoc : String = s"${c.enclosingPosition.source.path}:${c.enclosingPosition.line}:${c.enclosingPosition.column}"
def buildWarningMsg(msg: String): String = s"Warning: $buildWarningMsgLoc $msg"
def buildWarningMsg(msg: Tree): Tree = q""" "Warning: " + $buildWarningMsgLoc + " " + $msg """
def constantTreeOf(t : Any) : Tree = Literal(Constant(t))
def constantTypeOf(t: Any) : Type = c.internal.constantType(Constant(t))
def genOpTreeLit(opTpe : Type, t: Any) : Tree = {
val outTpe = constantTypeOf(t)
val outTree = constantTreeOf(t)
val outWideTpe = outTpe.widen
val outTypeName = TypeName("Out" + wideTypeName(outTpe))
val outWideLiteral = outTree
q"""
new $opTpe {
type OutWide = $outWideTpe
type Out = $outTpe
type $outTypeName = $outTpe
final val value: $outTpe = $outWideLiteral
final val isLiteral = true
final val valueWide: $outWideTpe = $outWideLiteral
}
"""
}
def genOpTreeNat(opTpe : Type, t: Int) : Tree = {
val outWideTpe = typeOf[Int]
val outWideLiteral = constantTreeOf(t)
val outTypeName = TypeName("OutNat")
val outTpe = mkNatTpe(t)
val outTree = q"new ${mkNatTpt(t)}"
q"""
new $opTpe {
type OutWide = $outWideTpe
type Out = $outTpe
type $outTypeName = $outTpe
final val value: $outTpe = $outTree
final val isLiteral = true
final val valueWide: $outWideTpe = $outWideLiteral
}
"""
}
def genOpTreeNLit(opTpe : Type, calc : CalcNLit) : Tree = {
val valueTree = calc.tree
val outTpe = calc.tpe
q"""
new $opTpe {
type OutWide = $outTpe
type Out = $outTpe
final val value: $outTpe = $valueTree
final val isLiteral = false
final val valueWide: $outTpe = $valueTree
}
"""
}
def genOpTreeUnknown(opTpe : Type, calc : CalcUnknown) : Tree = {
val outTpe = calc.tpe
calc.treeOption match {
case Some(valueTree) =>
q"""
new $opTpe {
type OutWide = $outTpe
type Out = $outTpe
final lazy val value: $outTpe = $valueTree
final val isLiteral = false
final lazy val valueWide: $outTpe = $valueTree
}
"""
case None =>
q"""
new $opTpe {
type OutWide = Option[$outTpe]
type Out = Option[$outTpe]
final val value: Option[$outTpe] = None
final val isLiteral = false
final val valueWide: Option[$outTpe] = None
}
"""
}
}
def extractionFailed(tpe: Type) = {
val msg = s"Cannot extract value from $tpe\\n" + "showRaw==> " + showRaw(tpe)
abort(msg)
}
def extractionFailed(tree: Tree) = {
val msg = s"Cannot extract value from $tree\\n" + "showRaw==> " + showRaw(tree)
abort(msg)
}
def extractValueFromOpTree(opTree : c.Tree) : CalcVal = {
def outFindCond(elem : c.Tree) : Boolean = elem match {
case q"final val value : $valueTpe = $valueTree" => true
case _ => false
}
def getOut(opClsBlk : List[c.Tree]) : CalcVal = opClsBlk.find(outFindCond) match {
case Some(q"final val value : $valueTpe = $valueTree") =>
valueTree match {
case Literal(ConstantCalc(t)) => t
case _ => valueTpe match {
case NonLiteralCalc(t) => CalcNLit(t, q"$valueTree")
case _ => extractionFailed(opTree)
}
}
case _ => extractionFailed(opTree)
}
opTree match {
case q"""{
$mods class $tpname[..$tparams] $ctorMods(...$paramss) extends ..$parents { $self => ..$opClsBlk }
$expr(...$exprss)
}""" => getOut(opClsBlk)
case _ => extractionFailed(opTree)
}
}
def extractValueFromNumTree(numValueTree : c.Tree) : CalcVal = {
val typedTree = c.typecheck(numValueTree)
TypeCalc(typedTree.tpe) match {
case t : CalcLit => t
case t : CalcType.UB => CalcNLit(t, numValueTree, typedTree.tpe)
case t : CalcNLit => CalcNLit(t, numValueTree)
case _ => extractionFailed(typedTree.tpe)
}
}
def extractValueFromTwoFaceTree(tfTree : c.Tree) : CalcVal = {
val typedTree = c.typecheck(tfTree)
TypeCalc(typedTree.tpe) match {
case t : CalcLit => t
case t : CalcType => CalcNLit(t, q"$tfTree.getValue")
case t : CalcNLit => CalcNLit(t, q"$tfTree.getValue")
case t =>
// println(t)
extractionFailed(typedTree.tpe)
}
}
def wideTypeName(tpe : Type) : String = tpe.widen.typeSymbol.name.toString
object HasOutValue {
def unapply(tree : Tree) : Option[Tree] = tree match {
case Apply(Apply(_,_), List(Block(ClassDef(_,_,_,Template(_,_,members)) :: _, _))) =>
members.collectFirst {
case ValDef(_,TermName("value "),_,t) => t
}
case _ => None
}
}
object GetArgTree {
def isMethodMacroCall : Boolean = c.enclosingImplicits.last.sym.isMacro
def getAllArgs(tree : Tree, lhs : Boolean) : List[Tree] = tree match {
case ValDef(_,_,_,Apply(_, t)) => t
case HasOutValue(valueTree) => List(valueTree)
case Apply(TypeApply(_,_), List(HasOutValue(valueTree))) => List(valueTree)
case Apply(Apply(_,_), _) => getAllArgsRecur(tree)
case Apply(TypeApply(_,_), _) => getAllArgsRecur(tree)
case Apply(_, args) => if (isMethodMacroCall || lhs) args else List(tree)
case t : Select => List(t)
case t : Literal => List(t)
case t : Ident => List(t)
case _ => getAllArgsRecur(tree)
}
def getAllArgsRecur(tree : Tree) : List[Tree] = tree match {
case Apply(fun, args) => getAllArgsRecur(fun) ++ args
case _ => List()
}
def getAllLHSArgs(tree : Tree) : List[Tree] = tree match {
case Apply(TypeApply(Select(t, _), _), _) => getAllArgs(t, true)
case TypeApply(Select(t, _), _) => getAllArgs(t, true)
case Select(t, _) => getAllArgs(t, true)
case _ => abort("Left-hand-side tree not found")
}
private var argListUsed = false
private lazy val argList = {
argListUsed = true
getAllArgs(c.enclosingImplicits.last.tree, false)
}
private var lhsArgListUsed = false
private lazy val lhsArgList = {
lhsArgListUsed = true
getAllLHSArgs(c.enclosingImplicits.last.tree)
}
def argContext : List[Tree] = (argListUsed, lhsArgListUsed) match {
case (false, false) => List()
case (true, false) => argList
case (false, true) => lhsArgList
case (true, true) => argList ++ lhsArgList
}
def apply(argIdx : Int, lhs : Boolean) : (Tree, Type) = {
val tree = c.enclosingImplicits.last.tree
// println(">>>>>>> enclosingImpl: ")// + c.enclosingImplicits.last)
// println("pt: " + c.enclosingImplicits.last.pt)
// println("tree: " + c.enclosingImplicits.last.tree)
// println("rawTree: " + showRaw(c.enclosingImplicits.last.tree))
val allArgs = if (lhs) lhsArgList else argList
// println("args: " + allArgs)
// println("<<<<<<< rawArgs" + showRaw(allArgs))
val argTree : Tree = if (argIdx < allArgs.length) c.typecheck(allArgs(argIdx))
else abort(s"Argument index($argIdx) is not smaller than the total number of arguments(${allArgs.length})")
val tpe = c.enclosingImplicits.last.pt match {
case TypeRef(_,sym,tp :: _) if (sym == func1Sym) => tp //conversion, so get the type from last.pt
case _ => argTree.tpe //not a conversion, so get the type from the tree
}
(argTree, tpe)
}
}
def extractFromArg(argIdx : Int, lhs : Boolean) : Calc = {
val (typedTree, tpe) = GetArgTree(argIdx, lhs)
VerboseTraversal(s"@@extractFromArg@@\\nTP: $tpe\\nRAW: ${showRaw(tpe)}\\nTree: $typedTree")
TypeCalc(tpe) match {
case _ : CalcUnknown => CalcUnknown(tpe, Some(c.untypecheck(typedTree)))
case t : CalcNLit => CalcNLit(t, typedTree)
case t => t
}
}
///////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////
// Three operands (Generic)
///////////////////////////////////////////////////////////////////////////////////////////
def materializeOpGen[F](implicit ev0: c.WeakTypeTag[F]): MaterializeOpAuxGen =
new MaterializeOpAuxGen(weakTypeOf[F])
def opCalc(funcType : TypeSymbol, aCalc : => Calc, bCalc : => Calc, cCalc : => Calc) : Calc = {
lazy val a = aCalc
lazy val b = bCalc
lazy val cArg = cCalc
def unsupported() : Calc = {
(a, b) match {
case (aArg : CalcVal, bArg : CalcVal) => abort(s"Unsupported $funcType[$a, $b, $cArg]")
case _ => CalcUnknown(funcType.toType, None)
}
}
//The output val is literal if all arguments are literal. Otherwise, it is non-literal.
lazy implicit val cvKind : CalcVal.Kind = (a, b, cArg) match {
case (_ : CalcLit, _ : CalcLit, _ : CalcLit) => CalcVal.Lit
case _ => CalcVal.NLit
}
def AcceptNonLiteral : Calc = Id //AcceptNonLiteral has a special handling in MaterializeOpAuxGen
def GetArg : Calc = (a, b) match {
case (CalcLit.Int(idx), CalcLit.Boolean(lhs)) if (idx >= 0) => extractFromArg(idx, lhs)
case _ => unsupported()
}
def Id : Calc = a match {
case (av : Calc) => av
case _ => unsupported()
}
def ToNat : Calc = ToInt //Same handling, but also has a special case to handle this in MaterializeOpAuxGen
def ToChar : Calc = a match {
case CalcVal(t : Char, tt) => CalcVal(t, q"$tt")
case CalcVal(t : Int, tt) => CalcVal(t.toChar, q"$tt.toChar")
case CalcVal(t : Long, tt) => CalcVal(t.toChar, q"$tt.toChar")
case CalcVal(t : Float, tt) => CalcVal(t.toChar, q"$tt.toChar")
case CalcVal(t : Double, tt) => CalcVal(t.toChar, q"$tt.toChar")
case _ => unsupported()
}
def ToInt : Calc = a match {
case CalcVal(t : Char, tt) => CalcVal(t.toInt, q"$tt.toInt")
case CalcVal(t : Int, tt) => CalcVal(t, q"$tt")
case CalcVal(t : Long, tt) => CalcVal(t.toInt, q"$tt.toInt")
case CalcVal(t : Float, tt) => CalcVal(t.toInt, q"$tt.toInt")
case CalcVal(t : Double, tt) => CalcVal(t.toInt, q"$tt.toInt")
case CalcVal(t : String, tt) => CalcVal(t.toInt, q"$tt.toInt")
case _ => unsupported()
}
def ToLong : Calc = a match {
case CalcVal(t : Char, tt) => CalcVal(t.toLong, q"$tt.toLong")
case CalcVal(t : Int, tt) => CalcVal(t.toLong, q"$tt.toLong")
case CalcVal(t : Long, tt) => CalcVal(t, q"$tt")
case CalcVal(t : Float, tt) => CalcVal(t.toLong, q"$tt.toLong")
case CalcVal(t : Double, tt) => CalcVal(t.toLong, q"$tt.toLong")
case CalcVal(t : String, tt) => CalcVal(t.toLong, q"$tt.toLong")
case _ => unsupported()
}
def ToFloat : Calc = a match {
case CalcVal(t : Char, tt) => CalcVal(t.toFloat, q"$tt.toFloat")
case CalcVal(t : Int, tt) => CalcVal(t.toFloat, q"$tt.toFloat")
case CalcVal(t : Long, tt) => CalcVal(t.toFloat, q"$tt.toFloat")
case CalcVal(t : Float, tt) => CalcVal(t, q"$tt")
case CalcVal(t : Double, tt) => CalcVal(t.toFloat, q"$tt.toFloat")
case CalcVal(t : String, tt) => CalcVal(t.toFloat, q"$tt.toFloat")
case _ => unsupported()
}
def ToDouble : Calc = a match {
case CalcVal(t : Char, tt) => CalcVal(t.toDouble, q"$tt.toDouble")
case CalcVal(t : Int, tt) => CalcVal(t.toDouble, q"$tt.toDouble")
case CalcVal(t : Long, tt) => CalcVal(t.toDouble, q"$tt.toDouble")
case CalcVal(t : Float, tt) => CalcVal(t.toDouble, q"$tt.toDouble")
case CalcVal(t : Double, tt) => CalcVal(t, q"$tt")
case CalcVal(t : String, tt) => CalcVal(t.toDouble, q"$tt.toDouble")
case _ => unsupported()
}
def ToString : Calc = a match {
case CalcVal(t : Char, tt) => CalcVal(t.toString, q"$tt.toString")
case CalcVal(t : Int, tt) => CalcVal(t.toString, q"$tt.toString")
case CalcVal(t : Long, tt) => CalcVal(t.toString, q"$tt.toString")
case CalcVal(t : Float, tt) => CalcVal(t.toString, q"$tt.toString")
case CalcVal(t : Double, tt) => CalcVal(t.toString, q"$tt.toString")
case CalcVal(t : String, tt) => CalcVal(t, q"$tt")
case CalcVal(t : Boolean, tt) => CalcVal(t.toString, q"$tt.toString")
case _ => unsupported()
}
def Negate : Calc = a match {
case CalcVal(t : Char, tt) => CalcVal(-t, q"-$tt")
case CalcVal(t : Int, tt) => CalcVal(-t, q"-$tt")
case CalcVal(t : Long, tt) => CalcVal(-t, q"-$tt")
case CalcVal(t : Float, tt) => CalcVal(-t, q"-$tt")
case CalcVal(t : Double, tt) => CalcVal(-t, q"-$tt")
case _ => unsupported()
}
def Abs : Calc = a match {
case CalcVal(t : Int, tt) => CalcVal(math.abs(t), q"_root_.scala.math.abs($tt)")
case CalcVal(t : Long, tt) => CalcVal(math.abs(t), q"_root_.scala.math.abs($tt)")
case CalcVal(t : Float, tt) => CalcVal(math.abs(t), q"_root_.scala.math.abs($tt)")
case CalcVal(t : Double, tt) => CalcVal(math.abs(t), q"_root_.scala.math.abs($tt)")
case _ => unsupported()
}
def NumberOfLeadingZeros : Calc = a match {
case CalcVal(t : Int, tt) => CalcVal(nlz(t), q"_root_.singleton.ops.impl.nlz($tt)")
case CalcVal(t : Long, tt) => CalcVal(nlz(t), q"_root_.singleton.ops.impl.nlz($tt)")
case _ => unsupported()
}
def Floor : Calc = a match {
case CalcVal(t : Double, tt) => CalcVal(math.floor(t), q"_root_.scala.math.floor($tt)")
case _ => unsupported()
}
def Ceil : Calc = a match {
case CalcVal(t : Double, tt) => CalcVal(math.ceil(t), q"_root_.scala.math.ceil($tt)")
case _ => unsupported()
}
def Round : Calc = a match {
case CalcVal(t : Float, tt) => CalcVal(math.round(t), q"_root_.scala.math.round($tt)")
case CalcVal(t : Double, tt) => CalcVal(math.round(t), q"_root_.scala.math.round($tt)")
case _ => unsupported()
}
def Sin : Calc = a match {
case CalcVal(t : Double, tt) => CalcVal(math.sin(t), q"_root_.scala.math.sin($tt)")
case _ => unsupported()
}
def Cos : Calc = a match {
case CalcVal(t : Double, tt) => CalcVal(math.cos(t), q"_root_.scala.math.cos($tt)")
case _ => unsupported()
}
def Tan : Calc = a match {
case CalcVal(t : Double, tt) => CalcVal.mayFail(Primitive.Double, math.tan(t), q"_root_.scala.math.tan($tt)")
case _ => unsupported()
}
def Sqrt : Calc = a match {
case CalcVal(t : Double, tt) => CalcVal.mayFail(Primitive.Double, math.sqrt(t), q"_root_.scala.math.sqrt($tt)")
case _ => unsupported()
}
def Log : Calc = a match {
case CalcVal(t : Double, tt) => CalcVal.mayFail(Primitive.Double, math.log(t), q"_root_.scala.math.log($tt)")
case _ => unsupported()
}
def Log10 : Calc = a match {
case CalcVal(t : Double, tt) => CalcVal.mayFail(Primitive.Double, math.log10(t), q"_root_.scala.math.log10($tt)")
case _ => unsupported()
}
def Reverse : Calc = a match {
case CalcVal(t : String, tt) => CalcVal(t.reverse, q"$tt.reverse")
case _ => unsupported()
}
def Not : Calc = a match {
case CalcVal(t : Boolean, tt) => CalcVal(!t, q"!$tt")
case _ => unsupported()
}
def Require : Calc = a match {
case CalcLit.Boolean(true) => CalcLit(true)
case CalcLit.Boolean(false) => b match {
case CalcLit.String(msg) =>
if (cArg.tpe.typeSymbol == symbolOf[Warn]) {
println(buildWarningMsg(msg))
CalcLit(false)
} else if (cArg.tpe.typeSymbol == symbolOf[NoSym]) {
abort(msg)
} else {
//redirection of implicit not found annotation is required to the given symbol
abort(msg, Some(cArg.tpe.typeSymbol.asType))
}
//directly using the java lib `require` resulted in compiler crash, so we use wrapped require instead
case CalcNLit(Primitive.String, msg, _) => cArg match {
case CalcUnknown(t, _) if t.typeSymbol == symbolOf[Warn] =>
CalcNLit(Primitive.Boolean, q"""{println(${buildWarningMsg(msg)}); false}""")
case _ =>
CalcNLit(Primitive.Boolean, q"{_root_.singleton.ops.impl._require(false, $msg); false}")
}
case _ => unsupported()
}
case CalcNLit(Primitive.Boolean, cond, _) => b match {
//directly using the java lib `require` resulted in compiler crash, so we use wrapped require instead
case CalcVal(msg : String, msgt) => cArg match {
case CalcUnknown(t, _) if t == symbolOf[Warn] =>
CalcNLit(Primitive.Boolean,
q"""{
if ($cond) true
else {
println(${buildWarningMsg(msgt)})
false
}
}""")
case _ =>
CalcNLit(Primitive.Boolean, q"{_root_.singleton.ops.impl._require($cond, $msgt); true}")
}
case _ => unsupported()
}
case _ => unsupported()
}
def ITE : Calc = (a, b, cArg) match {
//Also has special case handling inside unapply
case (CalcVal(it : Boolean,itt), CalcVal(tt : Char,ttt), CalcVal(et : Char,ett)) =>
CalcVal(if(it) tt else et, q"if ($itt) $ttt else $ett")
case (CalcVal(it : Boolean,itt), CalcVal(tt : Int,ttt), CalcVal(et : Int,ett)) =>
CalcVal(if(it) tt else et, q"if ($itt) $ttt else $ett")
case (CalcVal(it : Boolean,itt), CalcVal(tt : Long,ttt), CalcVal(et : Long,ett)) =>
CalcVal(if(it) tt else et, q"if ($itt) $ttt else $ett")
case (CalcVal(it : Boolean,itt), CalcVal(tt : Float,ttt), CalcVal(et : Float,ett)) =>
CalcVal(if(it) tt else et, q"if ($itt) $ttt else $ett")
case (CalcVal(it : Boolean,itt), CalcVal(tt : Double,ttt), CalcVal(et : Double,ett)) =>
CalcVal(if(it) tt else et, q"if ($itt) $ttt else $ett")
case (CalcVal(it : Boolean,itt), CalcVal(tt : String,ttt), CalcVal(et : String,ett)) =>
CalcVal(if(it) tt else et, q"if ($itt) $ttt else $ett")
case (CalcVal(it : Boolean,itt), CalcVal(tt : Boolean,ttt), CalcVal(et : Boolean,ett)) =>
CalcVal(if(it) tt else et, q"if ($itt) $ttt else $ett")
case _ => unsupported()
}
def Next : Calc = b match {
case (bv : CalcVal) => bv
case _ => unsupported()
}
def Plus : Calc = (a, b) match {
case (CalcVal(at : Char, att), CalcVal(bt : Char, btt)) => CalcVal(at + bt, q"$att + $btt")
case (CalcVal(at : Int, att), CalcVal(bt : Int, btt)) => CalcVal(at + bt, q"$att + $btt")
case (CalcVal(at : Long, att), CalcVal(bt : Long, btt)) => CalcVal(at + bt, q"$att + $btt")
case (CalcVal(at : Float, att), CalcVal(bt : Float, btt)) => CalcVal(at + bt, q"$att + $btt")
case (CalcVal(at : Double, att), CalcVal(bt : Double, btt)) => CalcVal(at + bt, q"$att + $btt")
case (CalcVal(at : String, att), CalcVal(bt : String, btt)) => CalcVal(at + bt, q"$att + $btt")
case _ => unsupported()
}
def Minus : Calc = (a, b) match {
case (CalcVal(at : Char, att), CalcVal(bt : Char, btt)) => CalcVal(at - bt, q"$att - $btt")
case (CalcVal(at : Int, att), CalcVal(bt : Int, btt)) => CalcVal(at - bt, q"$att - $btt")
case (CalcVal(at : Long, att), CalcVal(bt : Long, btt)) => CalcVal(at - bt, q"$att - $btt")
case (CalcVal(at : Float, att), CalcVal(bt : Float, btt)) => CalcVal(at - bt, q"$att - $btt")
case (CalcVal(at : Double, att), CalcVal(bt : Double, btt)) => CalcVal(at - bt, q"$att - $btt")
case _ => unsupported()
}
def Mul : Calc = (a, b) match {
case (CalcVal(at : Char, att), CalcVal(bt : Char, btt)) => CalcVal(at * bt, q"$att * $btt")
case (CalcVal(at : Int, att), CalcVal(bt : Int, btt)) => CalcVal(at * bt, q"$att * $btt")
case (CalcVal(at : Long, att), CalcVal(bt : Long, btt)) => CalcVal(at * bt, q"$att * $btt")
case (CalcVal(at : Float, att), CalcVal(bt : Float, btt)) => CalcVal(at * bt, q"$att * $btt")
case (CalcVal(at : Double, att), CalcVal(bt : Double, btt)) => CalcVal(at * bt, q"$att * $btt")
case _ => unsupported()
}
def Div : Calc = (a, b) match {
case (CalcVal(at : Char, att), CalcVal(bt : Char, btt)) => CalcVal.mayFail(Primitive.Int, at / bt, q"$att / $btt")
case (CalcVal(at : Int, att), CalcVal(bt : Int, btt)) => CalcVal.mayFail(Primitive.Int, at / bt, q"$att / $btt")
case (CalcVal(at : Long, att), CalcVal(bt : Long, btt)) => CalcVal.mayFail(Primitive.Long, at / bt, q"$att / $btt")
case (CalcVal(at : Float, att), CalcVal(bt : Float, btt)) => CalcVal.mayFail(Primitive.Float, at / bt, q"$att / $btt")
case (CalcVal(at : Double, att), CalcVal(bt : Double, btt)) => CalcVal.mayFail(Primitive.Double, at / bt, q"$att / $btt")
case _ => unsupported()
}
def Mod : Calc = (a, b) match {
case (CalcVal(at : Char, att), CalcVal(bt : Char, btt)) => CalcVal.mayFail(Primitive.Int, at % bt, q"$att % $btt")
case (CalcVal(at : Int, att), CalcVal(bt : Int, btt)) => CalcVal.mayFail(Primitive.Int, at % bt, q"$att % $btt")
case (CalcVal(at : Long, att), CalcVal(bt : Long, btt)) => CalcVal.mayFail(Primitive.Long, at % bt, q"$att % $btt")
case (CalcVal(at : Float, att), CalcVal(bt : Float, btt)) => CalcVal.mayFail(Primitive.Float, at % bt, q"$att % $btt")
case (CalcVal(at : Double, att), CalcVal(bt : Double, btt)) => CalcVal.mayFail(Primitive.Double, at % bt, q"$att % $btt")
case _ => unsupported()
}
def Sml : Calc = (a, b) match {
case (CalcVal(at : Char, att), CalcVal(bt : Char, btt)) => CalcVal(at < bt, q"$att < $btt")
case (CalcVal(at : Int, att), CalcVal(bt : Int, btt)) => CalcVal(at < bt, q"$att < $btt")
case (CalcVal(at : Long, att), CalcVal(bt : Long, btt)) => CalcVal(at < bt, q"$att < $btt")
case (CalcVal(at : Float, att), CalcVal(bt : Float, btt)) => CalcVal(at < bt, q"$att < $btt")
case (CalcVal(at : Double, att), CalcVal(bt : Double, btt)) => CalcVal(at < bt, q"$att < $btt")
case _ => unsupported()
}
def Big : Calc = (a, b) match {
case (CalcVal(at : Char, att), CalcVal(bt : Char, btt)) => CalcVal(at > bt, q"$att > $btt")
case (CalcVal(at : Int, att), CalcVal(bt : Int, btt)) => CalcVal(at > bt, q"$att > $btt")
case (CalcVal(at : Long, att), CalcVal(bt : Long, btt)) => CalcVal(at > bt, q"$att > $btt")
case (CalcVal(at : Float, att), CalcVal(bt : Float, btt)) => CalcVal(at > bt, q"$att > $btt")
case (CalcVal(at : Double, att), CalcVal(bt : Double, btt)) => CalcVal(at > bt, q"$att > $btt")
case _ => unsupported()
}
def SmlEq : Calc = (a, b) match {
case (CalcVal(at : Char, att), CalcVal(bt : Char, btt)) => CalcVal(at <= bt, q"$att <= $btt")
case (CalcVal(at : Int, att), CalcVal(bt : Int, btt)) => CalcVal(at <= bt, q"$att <= $btt")
case (CalcVal(at : Long, att), CalcVal(bt : Long, btt)) => CalcVal(at <= bt, q"$att <= $btt")
case (CalcVal(at : Float, att), CalcVal(bt : Float, btt)) => CalcVal(at <= bt, q"$att <= $btt")
case (CalcVal(at : Double, att), CalcVal(bt : Double, btt)) => CalcVal(at <= bt, q"$att <= $btt")
case _ => unsupported()
}
def BigEq : Calc = (a, b) match {
case (CalcVal(at : Char, att), CalcVal(bt : Char, btt)) => CalcVal(at >= bt, q"$att >= $btt")
case (CalcVal(at : Int, att), CalcVal(bt : Int, btt)) => CalcVal(at >= bt, q"$att >= $btt")
case (CalcVal(at : Long, att), CalcVal(bt : Long, btt)) => CalcVal(at >= bt, q"$att >= $btt")
case (CalcVal(at : Float, att), CalcVal(bt : Float, btt)) => CalcVal(at >= bt, q"$att >= $btt")
case (CalcVal(at : Double, att), CalcVal(bt : Double, btt)) => CalcVal(at >= bt, q"$att >= $btt")
case _ => unsupported()
}
def Eq : Calc = (a, b) match {
case (CalcVal(at : Char, att), CalcVal(bt : Char, btt)) => CalcVal(at == bt, q"$att == $btt")
case (CalcVal(at : Int, att), CalcVal(bt : Int, btt)) => CalcVal(at == bt, q"$att == $btt")
case (CalcVal(at : Long, att), CalcVal(bt : Long, btt)) => CalcVal(at == bt, q"$att == $btt")
case (CalcVal(at : Float, att), CalcVal(bt : Float, btt)) => CalcVal(at == bt, q"$att == $btt")
case (CalcVal(at : Double, att), CalcVal(bt : Double, btt)) => CalcVal(at == bt, q"$att == $btt")
case (CalcVal(at : String, att), CalcVal(bt : String, btt)) => CalcVal(at == bt, q"$att == $btt")
case (CalcVal(at : Boolean, att), CalcVal(bt : Boolean, btt)) => CalcVal(at == bt, q"$att == $btt")
case _ => unsupported()
}
def Neq : Calc = (a, b) match {
case (CalcVal(at : Char, att), CalcVal(bt : Char, btt)) => CalcVal(at != bt, q"$att != $btt")
case (CalcVal(at : Int, att), CalcVal(bt : Int, btt)) => CalcVal(at != bt, q"$att != $btt")
case (CalcVal(at : Long, att), CalcVal(bt : Long, btt)) => CalcVal(at != bt, q"$att != $btt")
case (CalcVal(at : Float, att), CalcVal(bt : Float, btt)) => CalcVal(at != bt, q"$att != $btt")
case (CalcVal(at : Double, att), CalcVal(bt : Double, btt)) => CalcVal(at != bt, q"$att != $btt")
case (CalcVal(at : String, att), CalcVal(bt : String, btt)) => CalcVal(at != bt, q"$att != $btt")
case (CalcVal(at : Boolean, att), CalcVal(bt : Boolean, btt)) => CalcVal(at != bt, q"$att != $btt")
case _ => unsupported()
}
def And : Calc = a match {
case CalcLit.Boolean(ab) => //`And` expressions where the LHS is a literal can be inlined
if (ab) b match {
case CalcVal(_ : Boolean,_) => b //inlining the value of RHS when the LHS is true
case _ => unsupported()
} else CalcLit(false) //inlining as false when the LHS is false
case _ => (a, b) match {
case (CalcVal(at : Boolean, att), CalcVal(bt : Boolean, btt)) => CalcVal(at && bt, q"$att && $btt")
case _ => unsupported()
}
}
def BitwiseAnd : Calc = (a, b) match {
case (CalcVal(at : Int, att), CalcVal(bt : Int, btt)) => CalcVal(at & bt, q"$att & $btt")
case (CalcVal(at : Long, att), CalcVal(bt : Long, btt)) => CalcVal(at & bt, q"$att & $btt")
case _ => unsupported()
}
def BitwiseOr : Calc = (a, b) match {
case (CalcVal(at : Int, att), CalcVal(bt : Int, btt)) => CalcVal(at | bt, q"$att | $btt")
case (CalcVal(at : Long, att), CalcVal(bt : Long, btt)) => CalcVal(at | bt, q"$att | $btt")
case _ => unsupported()
}
def Or : Calc = a match {
case CalcLit.Boolean(ab) => //`Or` expressions where the LHS is a literal can be inlined
if (!ab) b match {
case CalcVal(_ : Boolean,_) => b //inlining the value of RHS when the LHS is false
case _ => unsupported()
} else CalcLit(true) //inlining as true when the LHS is true
case _ => (a, b) match {
case (CalcVal(at : Boolean, att), CalcVal(bt : Boolean, btt)) => CalcVal(at || bt, q"$att || $btt")
case _ => unsupported()
}
}
def Pow : Calc = (a, b) match {
case (CalcVal(at : Double, att), CalcVal(bt : Double, btt)) =>
CalcVal(math.pow(at.toDouble, bt.toDouble), q"_root_.scala.math.pow($att.toDouble, $btt.toDouble)")
case _ => unsupported()
}
def Min : Calc = (a, b) match {
case (CalcVal(at : Int, att), CalcVal(bt : Int, btt)) =>
CalcVal(math.min(at, bt), q"_root_.scala.math.min($att, $btt)")
case (CalcVal(at : Long, att), CalcVal(bt : Long, btt)) =>
CalcVal(math.min(at, bt), q"_root_.scala.math.min($att, $btt)")
case (CalcVal(at : Float, att), CalcVal(bt : Float, btt)) =>
CalcVal(math.min(at, bt), q"_root_.scala.math.min($att, $btt)")
case (CalcVal(at : Double, att), CalcVal(bt : Double, btt)) =>
CalcVal(math.min(at, bt), q"_root_.scala.math.min($att, $btt)")
case _ => unsupported()
}
def Max : Calc = (a, b) match {
case (CalcVal(at : Int, att), CalcVal(bt : Int, btt)) =>
CalcVal(math.max(at, bt), q"_root_.scala.math.max($att, $btt)")
case (CalcVal(at : Long, att), CalcVal(bt : Long, btt)) =>
CalcVal(math.max(at, bt), q"_root_.scala.math.max($att, $btt)")
case (CalcVal(at : Float, att), CalcVal(bt : Float, btt)) =>
CalcVal(math.max(at, bt), q"_root_.scala.math.max($att, $btt)")
case (CalcVal(at : Double, att), CalcVal(bt : Double, btt)) =>
CalcVal(math.max(at, bt), q"_root_.scala.math.max($att, $btt)")
case _ => unsupported()
}
def Substring : Calc = (a, b) match {
case (CalcVal(at : String, att), CalcVal(bt : Int, btt)) =>
CalcVal.mayFail(Primitive.String, at.substring(bt), q"$att.substring($btt)")
case _ => unsupported()
}
def SubSequence : Calc = (a, b, cArg) match {
case (CalcVal(at : String, att), CalcVal(bt : Int, btt), CalcVal(ct : Int, ctt)) =>
CalcVal.mayFail(Primitive.String, at.subSequence(bt, ct), q"$att.subSequence($btt, $ctt)")
case _ => unsupported()
}
def StartsWith : Calc = (a, b) match {
case (CalcVal(at : String, att), CalcVal(bt : String, btt)) =>
CalcVal(at.startsWith(bt), q"$att.startsWith($btt)")
case _ => unsupported()
}
def EndsWith : Calc = (a, b) match {
case (CalcVal(at : String, att), CalcVal(bt : String, btt)) =>
CalcVal(at.endsWith(bt), q"$att.endsWith($btt)")
case _ => unsupported()
}
def Head : Calc = a match {
case CalcVal(at : String, att) =>
CalcVal.mayFail(Primitive.Char, at.head, q"$att.head")
case _ => unsupported()
}
def Tail : Calc = a match {
case CalcVal(at : String, att) => CalcVal(at.tail, q"$att.tail")
case _ => unsupported()
}
def CharAt : Calc = (a, b) match {
case (CalcVal(at : String, att), CalcVal(bt : Int, btt)) =>
CalcVal.mayFail(Primitive.Char, at.charAt(bt), q"$att.charAt($btt)")
case _ => unsupported()
}
def Length : Calc = a match {
case CalcVal(at : String, att) => CalcVal(at.length, q"$att.length")
case _ => unsupported()
}
def Matches : Calc = (a, b) match {
case (CalcVal(at : String, att), CalcVal(bt : String, btt)) =>
CalcVal.mayFail(Primitive.Boolean, at.matches(bt), q"$att.matches($btt)")
case _ => unsupported()
}
def FirstMatch : Calc = (a, b) match {
case (CalcVal(at : String, att), CalcVal(bt : String, btt)) =>
CalcVal.mayFail(Primitive.String, bt.r.findFirstIn(at).get, q"$btt.r.findFirstIn($att).get")
case _ => unsupported()
}
def PrefixMatch : Calc = (a, b) match {
case (CalcVal(at : String, att), CalcVal(bt : String, btt)) =>
CalcVal.mayFail(Primitive.String, bt.r.findPrefixOf(at).get, q"$btt.r.findPrefixOf($att).get")
case _ => unsupported()
}
def ReplaceFirstMatch : Calc = (a, b, cArg) match {
case (CalcVal(at : String, att), CalcVal(bt : String, btt), CalcVal(ct : String, ctt)) =>
CalcVal.mayFail(Primitive.String, bt.r.replaceFirstIn(at, ct), q"$btt.r.replaceFirstIn($att, $ctt)")
case _ => unsupported()
}
def ReplaceAllMatches : Calc = (a, b, cArg) match {
case (CalcVal(at : String, att), CalcVal(bt : String, btt), CalcVal(ct : String, ctt)) =>
CalcVal.mayFail(Primitive.String, bt.r.replaceAllIn(at, ct), q"$btt.r.replaceAllIn($att, $ctt)")
case _ => unsupported()
}
funcType match {
case funcTypes.AcceptNonLiteral => AcceptNonLiteral
case funcTypes.GetArg => GetArg
case funcTypes.Id => Id
case funcTypes.ToNat => ToNat
case funcTypes.ToChar => ToChar
case funcTypes.ToInt => ToInt
case funcTypes.ToLong => ToLong
case funcTypes.ToFloat => ToFloat
case funcTypes.ToDouble => ToDouble
case funcTypes.ToString => ToString
case funcTypes.Negate => Negate
case funcTypes.Abs => Abs
case funcTypes.NumberOfLeadingZeros => NumberOfLeadingZeros
case funcTypes.Floor => Floor
case funcTypes.Ceil => Ceil
case funcTypes.Round => Round
case funcTypes.Sin => Sin
case funcTypes.Cos => Cos
case funcTypes.Tan => Tan
case funcTypes.Sqrt => Sqrt
case funcTypes.Log => Log
case funcTypes.Log10 => Log10
case funcTypes.Reverse => Reverse
case funcTypes.! => Not
case funcTypes.Require => Require
case funcTypes.ITE => ITE
case funcTypes.==> => Next
case funcTypes.+ => Plus
case funcTypes.- => Minus
case funcTypes.* => Mul
case funcTypes./ => Div
case funcTypes.% => Mod
case funcTypes.< => Sml
case funcTypes.> => Big
case funcTypes.<= => SmlEq
case funcTypes.>= => BigEq
case funcTypes.== => Eq
case funcTypes.!= => Neq
case funcTypes.&& => And
case funcTypes.|| => Or
case funcTypes.BitwiseAnd => BitwiseAnd
case funcTypes.BitwiseOr => BitwiseOr
case funcTypes.Pow => Pow
case funcTypes.Min => Min
case funcTypes.Max => Max
case funcTypes.Substring => Substring
case funcTypes.SubSequence => SubSequence
case funcTypes.StartsWith => StartsWith
case funcTypes.EndsWith => EndsWith
case funcTypes.Head => Head
case funcTypes.Tail => Tail
case funcTypes.CharAt => CharAt
case funcTypes.Length => Length
case funcTypes.Matches => Matches
case funcTypes.FirstMatch => FirstMatch
case funcTypes.PrefixMatch => PrefixMatch
case funcTypes.ReplaceFirstMatch => ReplaceFirstMatch
case funcTypes.ReplaceAllMatches => ReplaceAllMatches
case _ => abort(s"Unsupported $funcType[$a, $b, $cArg]")
}
}
final class MaterializeOpAuxGen(opTpe: Type) {
def usingFuncName : Tree = {
val funcType = opTpe.typeArgs.head.typeSymbol.asType
val opResult = TypeCalc(opTpe)
val genTree = (funcType, opResult) match {
case (funcTypes.ToNat, CalcLit.Int(t)) =>
if (t < 0) abort(s"Nat cannot be a negative literal. Found: $t")
else genOpTreeNat(opTpe, t)
case (_, CalcLit(_, t)) => genOpTreeLit(opTpe, t)
case (funcTypes.AcceptNonLiteral | funcTypes.GetArg, t : CalcNLit) => genOpTreeNLit(opTpe, t)
case (funcTypes.GetArg, t : CalcUnknown) => genOpTreeUnknown(opTpe, t)
case (_, t: CalcNLit) =>
abort("Calculation has returned a non-literal type/value.\\nTo accept non-literal values, use `AcceptNonLiteral[T]`.")
case _ => extractionFailed(opTpe)
}
// println(genTree)
genTree
}
}
///////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////
// TwoFace Shell
///////////////////////////////////////////////////////////////////////////////////////////
def TwoFaceShellMaterializer[Shell](implicit shell : c.WeakTypeTag[Shell])
: TwoFaceShellMaterializer[Shell] = new TwoFaceShellMaterializer[Shell](weakTypeOf[Shell])
final class TwoFaceShellMaterializer[Shell](shellTpe : Type) {
def shell(shellAliasTpe : TypeSymbol) : c.Tree = {
val owner = c.internal.enclosingOwner
if (owner.asTerm.name.toString == "equals" && owner.owner.isClass && owner.owner.asClass.isCaseClass) {
abort("A case class equals workaround is required. See https://github.com/scala/bug/issues/10536")
}
val funcApplyTpe = shellTpe.typeArgs(0)
val funcArgsTpe = shellTpe.typeArgs(1)
val (tfValueTree, tfName) = TypeCalc(funcArgsTpe) match {
case (t: CalcVal) => (t.tree, t.name)
case _ => extractionFailed(shellTpe)
}
val tfTerm = TermName(tfName)
val tfType = TypeName(tfName)
val outTpe = TypeCalc(funcApplyTpe).tpe
val paramVec = for (i <- 4 to shellTpe.typeArgs.length by 2; typeTree = AppliedTypeTree(Ident(TypeName("<byname>")), List(tq"${shellTpe.typeArgs(i-1)}")))
yield ValDef(Modifiers(Flag.PARAM | Flag.BYNAMEPARAM),TermName(s"arg${(i-4)/2+1}"),typeTree,EmptyTree)
val paramTree = List(paramVec.toList)
val genTree =
q"""
new $shellTpe {
type Out = $outTpe
def apply(...$paramTree) : _root_.singleton.twoface.TwoFace.$tfType[$outTpe] = {
_root_.singleton.twoface.TwoFace.$tfTerm.create[$outTpe]($tfValueTree)
}
}
"""
// println(showCode(genTree))
genTree
}
}
///////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////
// TwoFace
///////////////////////////////////////////////////////////////////////////////////////////
def TwoFaceMaterializer : TwoFaceMaterializer = new TwoFaceMaterializer
final class TwoFaceMaterializer {
def genTwoFace(outTpe : Type, outTree : Tree, tfName : String) : c.Tree = {
val tfTerm = TermName(tfName)
q"""
_root_.singleton.twoface.TwoFace.$tfTerm.create[$outTpe]($outTree)
"""
}
def genTwoFace(calc : CalcVal) : c.Tree = {
genTwoFace(calc.tpe, calc.tree, calc.name)
}
def fromNumValue(numValueTree : c.Tree, tfSym : TypeSymbol) : c.Tree = {
// println(tfSym.name)
val genTree = genTwoFace(extractValueFromNumTree(numValueTree))
// println(genTree)
genTree
}
def toNumValue(tfTree : c.Tree, tfSym : TypeSymbol, tTpe : Type) : c.Tree = {
val calc = extractValueFromTwoFaceTree(tfTree)
val outTpe = calc.tpe
val outTree = calc.tree
val genTree =
q"""
$outTree.asInstanceOf[$tTpe]
"""
// println(genTree)
genTree
}
}
///////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////
// Checked0Param TwoFace
///////////////////////////////////////////////////////////////////////////////////////////
def Checked0ParamMaterializer[Chk, Cond, Msg, T](implicit chk : c.WeakTypeTag[Chk], cond : c.WeakTypeTag[Cond], msg : c.WeakTypeTag[Msg], t : c.WeakTypeTag[T]) :
Checked0ParamMaterializer[Chk, Cond, Msg, T] = new Checked0ParamMaterializer[Chk, Cond, Msg, T](symbolOf[Chk], weakTypeOf[Cond], weakTypeOf[Msg], weakTypeOf[T])
final class Checked0ParamMaterializer[Chk, Cond, Msg, T](chkSym : TypeSymbol, condTpe : Type, msgTpe : Type, tTpe : Type) {
def newChecked(calc : CalcVal, chkArgTpe : Type) : c.Tree = {
val outTpe = calc.tpe
val outTree = calc.tree
val outTpeWide = outTpe.widen
val fixedCondTpe = appliedType(condTpe.typeConstructor, outTpe).dealias
val fixedMsgTpe = appliedType(msgTpe.typeConstructor, outTpe).dealias
val condCalc = TypeCalc(fixedCondTpe) match {
case t : CalcVal => t
case _ => extractionFailed(fixedCondTpe)
}
val msgCalc = condCalc match {
case (CalcLit.Boolean(true)) => CalcLit("") //Not calculating message if condition is constant true
case _ => TypeCalc(fixedMsgTpe) match {
case t : CalcVal => t
case _ => extractionFailed(fixedMsgTpe)
}
}
val reqCalc = opCalc(funcTypes.Require, condCalc, msgCalc, CalcUnknown(typeOf[NoSym], None))
q"""
(new $chkSym[$condTpe, $msgTpe, $chkArgTpe]($outTree.asInstanceOf[$outTpe]))
"""
}
def newChecked(calc : CalcVal) : c.Tree = newChecked(calc, calc.tpe)
def fromOpImpl(opTree : c.Tree) : c.Tree = {
val numValueCalc = extractValueFromOpTree(opTree)
val genTree = newChecked(numValueCalc, tTpe)
// println(genTree)
genTree
}
def fromNumValue(numValueTree : c.Tree) : c.Tree = {
val numValueCalc = extractValueFromNumTree(numValueTree)
val genTree = newChecked(numValueCalc)
// println(genTree)
genTree
}
def fromTF(tfTree : c.Tree) : c.Tree = {
val tfValueCalc = extractValueFromTwoFaceTree(tfTree)
val genTree = newChecked(tfValueCalc)
// println(genTree)
genTree
}
def widen(chkTree : c.Tree) : c.Tree = {
val tfValueCalc = extractValueFromTwoFaceTree(chkTree)
val genTree = newChecked(tfValueCalc, tTpe)
// println(genTree)
genTree
}
}
///////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////
// Checked1Param TwoFace
///////////////////////////////////////////////////////////////////////////////////////////
def Checked1ParamMaterializer[Chk, Cond, Msg, T, ParamFace, Param](implicit chk : c.WeakTypeTag[Chk], cond : c.WeakTypeTag[Cond], msg : c.WeakTypeTag[Msg], t : c.WeakTypeTag[T], paramFace : c.WeakTypeTag[ParamFace], p : c.WeakTypeTag[Param]) :
Checked1ParamMaterializer[Chk, Cond, Msg, T, ParamFace, Param] = new Checked1ParamMaterializer[Chk, Cond, Msg, T, ParamFace, Param](symbolOf[Chk], weakTypeOf[Cond], weakTypeOf[Msg], weakTypeOf[T], weakTypeOf[ParamFace], weakTypeOf[Param])
final class Checked1ParamMaterializer[Chk, Cond, Msg, T, ParamFace, Param](chkSym : TypeSymbol, condTpe : Type, msgTpe : Type, tTpe : Type, paramFaceTpe : Type, paramTpe : Type) {
def newChecked(tCalc : CalcVal, chkArgTpe : Type) : c.Tree = {
val outTpe = tCalc.tpe
val outTree = tCalc.tree
val paramCalc = TypeCalc(paramTpe) match {
case t : CalcVal => t
case _ => extractionFailed(paramTpe)
}
val fixedCondTpe = appliedType(condTpe.typeConstructor, tCalc.tpe, paramCalc.tpe).dealias
val fixedMsgTpe = appliedType(msgTpe.typeConstructor, tCalc.tpe, paramCalc.tpe).dealias
val condCalc = TypeCalc(fixedCondTpe) match {
case t : CalcVal => t
case _ => extractionFailed(fixedCondTpe)
}
val msgCalc = condCalc match {
case (CalcLit.Boolean(true)) => CalcLit("") //Not calculating message if condition is constant true
case _ => TypeCalc(fixedMsgTpe) match {
case t : CalcVal => t
case _ => extractionFailed(fixedMsgTpe)
}
}
val reqCalc = opCalc(funcTypes.Require, condCalc, msgCalc, CalcUnknown(typeOf[NoSym], None))
q"""
(new $chkSym[$condTpe, $msgTpe, $chkArgTpe, $paramFaceTpe, $paramTpe]($outTree.asInstanceOf[$outTpe]))
"""
}
def newChecked(tCalc : CalcVal) : c.Tree =
newChecked(tCalc, tCalc.tpe)
def fromOpImpl(tOpTree : c.Tree) : c.Tree = {
val tCalc = extractValueFromOpTree(tOpTree)
val genTree = newChecked(tCalc, tTpe)
// println(genTree)
genTree
}
def fromNumValue(tNumTree : c.Tree) : c.Tree = {
val tCalc = extractValueFromNumTree(tNumTree)
val genTree = newChecked(tCalc)
// println(genTree)
genTree
}
def fromTF(tTFTree : c.Tree) : c.Tree = {
val tCalc = extractValueFromTwoFaceTree(tTFTree)
val genTree = newChecked(tCalc)
// println(genTree)
genTree
}
def widen(tTFTree : c.Tree) : c.Tree = {
val tCalc = extractValueFromTwoFaceTree(tTFTree)
val genTree = newChecked(tCalc, tTpe)
// println(genTree)
genTree
}
}
///////////////////////////////////////////////////////////////////////////////////////////
//copied from Shapeless
import scala.annotation.tailrec
def mkNatTpt(i: Int): Tree = {
val succSym = typeOf[shapeless.Succ[_]].typeConstructor.typeSymbol
val _0Sym = typeOf[shapeless._0].typeSymbol
@tailrec
def loop(i: Int, acc: Tree): Tree = {
if (i == 0) acc
else loop(i - 1, AppliedTypeTree(Ident(succSym), List(acc)))
}
loop(i, Ident(_0Sym))
}
//copied from Shapeless
def mkNatTpe(i: Int): Type = {
val succTpe = typeOf[shapeless.Succ[_]].typeConstructor
val _0Tpe = typeOf[shapeless._0]
@tailrec
def loop(i: Int, acc: Type): Type = {
if (i == 0) acc
else loop(i - 1, appliedType(succTpe, acc))
}
loop(i, _0Tpe)
}
//copied from Shapeless
def mkNatValue(i: Int): Tree =
q""" new ${mkNatTpt(i)} """
///////////////////////////////////////////////////////////////////////////////////////////
}
| fthomas/singleton-ops | src/main/scala/singleton/ops/impl/GeneralMacros.scala | Scala | apache-2.0 | 72,662 |
package colang.ast.raw
import colang.Strategy.Result
import colang.Strategy.Result.{NoMatch, Success}
import colang.ast.raw.ParserImpl.{Present, SingleTokenStrategy, identifierStrategy}
import colang.issues.{Issues, Terms}
import colang.tokens.{Ampersand, Identifier, LogicalAnd}
import colang.{MappedStrategy, SourceCode, StrategyUnion, TokenStream}
/**
* Represents a type reference.
*/
sealed trait Type extends Node
object Type {
val strategy = StrategyUnion(
ReferenceType.strategy,
SimpleType.strategy)
}
/**
* Represents a simple (non-reference) type reference.
* @param name type name
*/
case class SimpleType(name: Identifier) extends Type {
def source = name.source
}
object SimpleType {
val strategy = new MappedStrategy(identifierStrategy, SimpleType.apply)
}
/**
* Represents a reference type.
* @param referenced referenced simple type
* @param ampersand ampersand
*/
case class ReferenceType(referenced: Type, ampersand: Ampersand) extends Type {
def source = referenced.source + ampersand.source
}
object ReferenceType {
/**
* A strategy that matches valid reference types.
*/
private val validStrategy = new ParserImpl.Strategy[ReferenceType] {
def apply(stream: TokenStream): Result[TokenStream, ReferenceType] = {
ParserImpl.parseGroup()
.definingElement(SimpleType.strategy)
.definingElement(SingleTokenStrategy(classOf[Ampersand]))
.parse(stream)
.as[SimpleType, Ampersand] match {
case (Present(simpleType), Present(ampersand), issues, streamAfterType) =>
Success(ReferenceType(simpleType, ampersand), issues, streamAfterType)
case _ => NoMatch()
}
}
}
/**
* A strategy that matches "overreferenced" type with multiple ampersands (e.g. 'int&&').
*/
private val invalidStrategy = new ParserImpl.Strategy[Type] {
private val anyAmpersandStrategy = StrategyUnion(
SingleTokenStrategy(classOf[Ampersand]),
SingleTokenStrategy(classOf[LogicalAnd]))
private case class AmpersandSequence(source: SourceCode, count: Int) extends Node
private object AmpersandSequence {
val strategy = new ParserImpl.Strategy[AmpersandSequence] {
def apply(stream: TokenStream): Result[TokenStream, AmpersandSequence] = {
ParserImpl.parseSequence(
stream = stream,
elementStrategy = anyAmpersandStrategy,
elementDescription = Terms.Ampersand
) match {
case (tokens, issues, streamAfterTokens) =>
if (tokens.nonEmpty) {
val count = (tokens map {
case a: Ampersand => 1
case aa: LogicalAnd => 2
}).sum
Success(AmpersandSequence(tokens.head.source + tokens.last.source, count), issues, streamAfterTokens)
} else NoMatch()
}
}
}
}
def apply(stream: TokenStream): Result[TokenStream, Type] = {
ParserImpl.parseGroup()
.definingElement(SimpleType.strategy)
.definingElement(AmpersandSequence.strategy)
.parse(stream)
.as[SimpleType, AmpersandSequence] match {
case (Present(simpleType), Present(ampersandSequence), issues, streamAfterType)
if ampersandSequence.count >= 2 =>
val referencedTypeName = simpleType.name.value + ("&" * (ampersandSequence.count - 1))
val issue = Issues.OverreferencedType(simpleType.source + ampersandSequence.source, referencedTypeName)
Success(simpleType, issues :+ issue, streamAfterType)
case _ => NoMatch()
}
}
}
val strategy = StrategyUnion(
invalidStrategy,
validStrategy)
}
| merkispavel/colang | src/main/scala/colang/ast/raw/Type.scala | Scala | mit | 3,724 |
package me.yingrui.segment.util
object StringUtil {
def halfShape(original: Char): Char = {
if (isFullShapedChar(original)) {
return (original - 65248).toChar
}
return original
}
def isFullShapedChar(original: Char): Boolean = {
return (original >= '\uFF21' && original <= '\uFF41' || original >= '\uFF3A' && original <= '\uFF5A' || original >= '\uFF10' && original <= '\uFF19')
}
def halfShape(original: String): String = {
val chArray = original.toArray
for (i1 <- 0 until chArray.length()) {
if (isCharAlphabeticalOrDigital(chArray(i1))) {
chArray(i1) = StringUtil.halfShape(chArray(i1))
}
}
return new String(chArray)
}
def toUpperCase(original: String): String = {
val chArray = original.toArray
for (i1 <- 0 until chArray.length) {
if (isCharAlphabeticalOrDigital(chArray(i1))) {
chArray(i1) = StringUtil.toUpperCase(chArray(i1))
}
}
return new String(chArray)
}
def toUpperCase(original: Char): Char = {
if (isLowerCaseChar(original)) {
return (original - 32).toChar
}
return original
}
def isLowerCaseChar(original: Char): Boolean = {
return original >= 'a' && original <= 'z'
}
def doUpperCaseAndHalfShape(original: String): String = {
val chArray = original.toArray
for (i1 <- 0 until chArray.length) {
if (isCharAlphabeticalOrDigital(chArray(i1))) {
chArray(i1) = StringUtil.halfShape(chArray(i1))
chArray(i1) = StringUtil.toUpperCase(chArray(i1))
}
}
return new String(chArray)
}
def isCharAlphabeticalOrDigital(ch: Char): Boolean = {
return (ch >= 'A' && ch <= 'Z' || ch >= 'a' && ch <= 'z' || ch >= '0' && ch <= '9' || ch >= '\uFF21' && ch <= '\uFF41' || ch >= '\uFF3A' && ch <= '\uFF5A' || ch >= '\uFF10' && ch <= '\uFF19')
}
def isCharAlphabetical(ch: Char): Boolean = {
return (ch >= 'A' && ch <= 'Z' || ch >= 'a' && ch <= 'z' || ch >= '\uFF21' && ch <= '\uFF41' || ch >= '\uFF3A' && ch <= '\uFF5A')
}
}
| yingrui/mahjong | lib-segment/src/main/scala/me/yingrui/segment/util/StringUtil.scala | Scala | gpl-3.0 | 2,241 |
package org.jetbrains.plugins.scala
package lang
package parameterInfo
import java.awt.Color
import com.intellij.codeInsight.CodeInsightBundle
import com.intellij.codeInsight.completion.JavaCompletionUtil
import com.intellij.codeInsight.hint.ShowParameterInfoHandler
import com.intellij.codeInsight.lookup.{LookupElement, LookupItem}
import com.intellij.lang.parameterInfo._
import com.intellij.psi._
import com.intellij.psi.tree.IElementType
import com.intellij.psi.util.PsiTreeUtil
import com.intellij.util.ArrayUtil
import com.intellij.util.containers.hash.HashSet
import org.jetbrains.plugins.scala.editor.documentationProvider.ScalaDocumentationProvider
import org.jetbrains.plugins.scala.extensions._
import org.jetbrains.plugins.scala.lang.lexer.ScalaTokenTypes
import org.jetbrains.plugins.scala.lang.parameterInfo.ScalaFunctionParameterInfoHandler.AnnotationParameters
import org.jetbrains.plugins.scala.lang.psi.api.base.types.{ScParameterizedTypeElement, ScTypeElement, ScTypeElementExt}
import org.jetbrains.plugins.scala.lang.psi.api.base.{ScConstructor, ScPrimaryConstructor}
import org.jetbrains.plugins.scala.lang.psi.api.expr._
import org.jetbrains.plugins.scala.lang.psi.api.statements.ScFunction
import org.jetbrains.plugins.scala.lang.psi.api.statements.params.{PsiTypeParameterExt, ScParameter, ScParameterClause}
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.{ScClass, ScTypeDefinition}
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.{ScTypeParametersOwner, ScTypedDefinition}
import org.jetbrains.plugins.scala.lang.psi.fake.FakePsiMethod
import org.jetbrains.plugins.scala.lang.psi.types._
import org.jetbrains.plugins.scala.lang.psi.types.nonvalue.Parameter
import org.jetbrains.plugins.scala.lang.psi.types.result.TypingContext
import org.jetbrains.plugins.scala.lang.refactoring.util.ScalaNamesUtil
import org.jetbrains.plugins.scala.lang.resolve.processor.CompletionProcessor
import org.jetbrains.plugins.scala.lang.resolve.{ResolveUtils, ScalaResolveResult, StdKinds}
import scala.annotation.tailrec
import scala.collection.Seq
import scala.collection.mutable.ArrayBuffer
/**
* User: Alexander Podkhalyuzin
* Date: 18.01.2009
*/
class ScalaFunctionParameterInfoHandler extends ParameterInfoHandlerWithTabActionSupport[PsiElement, Any, ScExpression] {
def getArgListStopSearchClasses: java.util.Set[_ <: Class[_]] = {
java.util.Collections.singleton(classOf[PsiMethod])
}
def getParameterCloseChars: String = "{},);\\n"
def couldShowInLookup: Boolean = true
def getActualParameterDelimiterType: IElementType = ScalaTokenTypes.tCOMMA
def getActualParameters(elem: PsiElement): Array[ScExpression] = {
elem match {
case argExprList: ScArgumentExprList =>
argExprList.exprs.toArray
case _: ScUnitExpr => Array.empty
case p: ScParenthesisedExpr => p.expr.toArray
case t: ScTuple => t.exprs.toArray
case e: ScExpression => Array(e)
case _ => Array.empty
}
}
def getArgumentListClass: Class[PsiElement] = classOf[PsiElement]
def getActualParametersRBraceType: IElementType = ScalaTokenTypes.tRBRACE
def getArgumentListAllowedParentClasses: java.util.Set[Class[_]] = {
val set = new HashSet[Class[_]]()
set.add(classOf[ScMethodCall])
set.add(classOf[ScConstructor])
set.add(classOf[ScSelfInvocation])
set.add(classOf[ScInfixExpr])
set
}
def findElementForParameterInfo(context: CreateParameterInfoContext): PsiElement = {
findCall(context)
}
def findElementForUpdatingParameterInfo(context: UpdateParameterInfoContext): PsiElement = {
findCall(context)
}
def getParametersForDocumentation(p: Any, context: ParameterInfoContext): Array[Object] = {
p match {
case x: ScFunction =>
x.parameters.toArray
case _ => ArrayUtil.EMPTY_OBJECT_ARRAY
}
}
def showParameterInfo(element: PsiElement, context: CreateParameterInfoContext) {
context.showHint(element, element.getTextRange.getStartOffset, this)
}
def getParametersForLookup(item: LookupElement, context: ParameterInfoContext): Array[Object] = {
if (!item.isInstanceOf[LookupItem[_]]) return null
val allElements = JavaCompletionUtil.getAllPsiElements(item.asInstanceOf[LookupItem[_]])
if (allElements != null &&
allElements.size > 0 &&
allElements.get(0).isInstanceOf[PsiMethod]) {
return allElements.toArray(new Array[Object](allElements.size))
}
null
}
def updateParameterInfo(o: PsiElement, context: UpdateParameterInfoContext) {
if (context.getParameterOwner != o) context.removeHint()
val offset = context.getOffset
var child = o.getNode.getFirstChildNode
var i = 0
while (child != null && child.getStartOffset < offset) {
if (child.getElementType == ScalaTokenTypes.tCOMMA) i = i + 1
child = child.getTreeNext
}
context.setCurrentParameter(i)
}
def updateUI(p: Any, context: ParameterInfoUIContext) {
if (context == null || context.getParameterOwner == null || !context.getParameterOwner.isValid) return
context.getParameterOwner match {
case args: PsiElement =>
implicit val project = args.projectContext
val color: Color = context.getDefaultParameterColor
val index = context.getCurrentParameterIndex
val buffer: StringBuilder = new StringBuilder("")
var isGrey = false
//todo: var isGreen = true
var namedMode = false
def paramText(param: ScParameter, subst: ScSubstitutor) = {
ScalaDocumentationProvider.parseParameter(param, escape = false)(subst.subst(_).presentableText)
}
def applyToParameters(parameters: Seq[(Parameter, String)], subst: ScSubstitutor, canBeNaming: Boolean,
isImplicit: Boolean = false) {
if (parameters.nonEmpty) {
var k = 0
val exprs: Seq[ScExpression] = getActualParameters(args)
if (isImplicit) buffer.append("implicit ")
val used = new Array[Boolean](parameters.length)
while (k < parameters.length) {
val namedPrefix = "["
val namedPostfix = "]"
def appendFirst(useGrey: Boolean = false) {
val getIt = used.indexOf(false)
used(getIt) = true
if (namedMode) buffer.append(namedPrefix)
val param: (Parameter, String) = parameters(getIt)
buffer.append(param._2)
if (namedMode) buffer.append(namedPostfix)
}
def doNoNamed(expr: ScExpression) {
if (namedMode) {
isGrey = true
appendFirst()
} else {
val exprType = expr.getType(TypingContext.empty).getOrNothing
val getIt = used.indexOf(false)
used(getIt) = true
val param: (Parameter, String) = parameters(getIt)
val paramType = param._1.paramType
if (!exprType.conforms(paramType)) isGrey = true
buffer.append(param._2)
}
}
if (k == index || (k == parameters.length - 1 && index >= parameters.length &&
parameters.last._1.isRepeated)) {
buffer.append("<b>")
}
if (k < index && !isGrey) {
//slow checking
if (k >= exprs.length) { //shouldn't be
appendFirst(useGrey = true)
isGrey = true
} else {
exprs(k) match {
case assign@NamedAssignStmt(name) =>
val ind = parameters.indexWhere(param => ScalaNamesUtil.equivalent(param._1.name, name))
if (ind == -1 || used(ind)) {
doNoNamed(assign)
} else {
if (k != ind) namedMode = true
used(ind) = true
val param: (Parameter, String) = parameters(ind)
if (namedMode) buffer.append(namedPrefix)
buffer.append(param._2)
if (namedMode) buffer.append(namedPostfix)
assign.getRExpression match {
case Some(expr: ScExpression) =>
for (exprType <- expr.getType(TypingContext.empty)) {
val paramType = param._1.paramType
if (!exprType.conforms(paramType)) isGrey = true
}
case _ => isGrey = true
}
}
case expr: ScExpression =>
doNoNamed(expr)
}
}
} else {
//fast checking
if (k >= exprs.length) {
appendFirst()
} else {
exprs(k) match {
case NamedAssignStmt(name) =>
val ind = parameters.indexWhere(param => ScalaNamesUtil.equivalent(param._1.name, name))
if (ind == -1 || used(ind)) {
appendFirst()
} else {
if (k != ind) namedMode = true
used(ind) = true
if (namedMode) buffer.append(namedPrefix)
buffer.append(parameters(ind)._2)
if (namedMode) buffer.append(namedPostfix)
}
case _ => appendFirst()
}
}
}
if (k == index || (k == parameters.length - 1 && index >= parameters.length &&
parameters.last._1.isRepeated)) {
buffer.append("</b>")
}
k = k + 1
if (k != parameters.length) buffer.append(", ")
}
if (!isGrey && exprs.length > parameters.length && index >= parameters.length) {
if (!namedMode && parameters.last._1.isRepeated) {
val paramType = parameters.last._1.paramType
while (!isGrey && k < exprs.length.min(index)) {
if (k < index) {
for (exprType <- exprs(k).getType(TypingContext.empty)) {
if (!exprType.conforms(paramType)) isGrey = true
}
}
k = k + 1
}
} else isGrey = true
}
} else buffer.append(CodeInsightBundle.message("parameter.info.no.parameters"))
}
p match {
case x: String if x == "" =>
buffer.append(CodeInsightBundle.message("parameter.info.no.parameters"))
case (a: AnnotationParameters, _: Int) =>
val seq = a.seq
if (seq.isEmpty) buffer.append(CodeInsightBundle.message("parameter.info.no.parameters"))
else {
val paramsSeq: Seq[(Parameter, String)] = seq.zipWithIndex.map {
case ((name, tp, value), paramIndex) =>
val valueText = Option(value).map(_.getText)
.map(" = " + _)
.getOrElse("")
(new Parameter(name, None, tp, tp, value != null, false, false, paramIndex),
s"$name: ${tp.presentableText}$valueText")
}
applyToParameters(paramsSeq, ScSubstitutor.empty, canBeNaming = true, isImplicit = false)
}
case (sign: PhysicalSignature, i: Int) => //i can be -1 (it's update method)
val subst = sign.substitutor
sign.method match {
case method: ScFunction =>
val clauses = method.effectiveParameterClauses
if (clauses.length <= i || (i == -1 && clauses.isEmpty)) buffer.append(CodeInsightBundle.message("parameter.info.no.parameters"))
else {
val clause: ScParameterClause = if (i >= 0) clauses(i) else clauses.head
val length = clause.effectiveParameters.length
val parameters: Seq[ScParameter] = if (i != -1) clause.effectiveParameters else clause.effectiveParameters.take(length - 1)
applyToParameters(parameters.map(param =>
(Parameter(param), paramText(param, subst))), subst, canBeNaming = true, isImplicit = clause.isImplicit)
}
case method: FakePsiMethod =>
if (method.params.length == 0) buffer.append(CodeInsightBundle.message("parameter.info.no.parameters"))
else {
buffer.append(method.params.
map((param: Parameter) => {
val buffer: StringBuilder = new StringBuilder("")
val paramType = param.paramType
val name = param.name
if (name != "") {
buffer.append(name)
buffer.append(": ")
}
buffer.append(paramType.presentableText)
if (param.isRepeated) buffer.append("*")
if (param.isDefault) buffer.append(" = _")
val isBold = if (method.params.indexOf(param) == index || (param.isRepeated && method.params.indexOf(param) <= index)) true
else {
//todo: check type
false
}
val paramText = buffer.toString()
if (isBold) "<b>" + paramText + "</b>" else paramText
}).mkString(", "))
}
case method: PsiMethod =>
val p = method.getParameterList
if (p.getParameters.isEmpty) buffer.append(CodeInsightBundle.message("parameter.info.no.parameters"))
else {
buffer.append(p.getParameters.
map((param: PsiParameter) => {
val buffer: StringBuilder = new StringBuilder("")
val list = param.getModifierList
if (list == null) return
val lastSize = buffer.length
for (a <- list.getAnnotations) {
if (lastSize != buffer.length) buffer.append(" ")
val element = a.getNameReferenceElement
if (element != null) buffer.append("@").append(element.getText)
}
if (lastSize != buffer.length) buffer.append(" ")
val name = param.name
if (name != null) {
buffer.append(name)
}
buffer.append(": ")
buffer.append(subst.subst(param.paramType()).presentableText)
if (param.isVarArgs) buffer.append("*")
val isBold = if (p.getParameters.indexOf(param) == index || (param.isVarArgs && p.getParameters.indexOf(param) <= index)) true
else {
//todo: check type
false
}
val paramText = buffer.toString()
if (isBold) "<b>" + paramText + "</b>" else paramText
}).mkString(", "))
}
}
case (constructor: ScPrimaryConstructor, subst: ScSubstitutor, i: Int) if constructor.isValid =>
val clauses = constructor.effectiveParameterClauses
if (clauses.length <= i) buffer.append(CodeInsightBundle.message("parameter.info.no.parameters"))
else {
val clause: ScParameterClause = clauses(i)
applyToParameters(clause.effectiveParameters.map(param =>
(Parameter(param), paramText(param, subst))), subst, canBeNaming = true, isImplicit = clause.isImplicit)
}
case _ =>
}
val startOffset = buffer.indexOf("<b>")
if (startOffset != -1) buffer.replace(startOffset, startOffset + 3, "")
val endOffset = buffer.indexOf("</b>")
if (endOffset != -1) buffer.replace(endOffset, endOffset + 4, "")
if (buffer.toString != "")
context.setupUIComponentPresentation(buffer.toString(), startOffset, endOffset, isGrey, false, false, color)
else
context.setUIComponentEnabled(false)
case _ =>
}
}
def tracksParameterIndex: Boolean = true
trait Invocation {
def element: PsiElement
def parent: PsiElement = element.getParent
def invocationCount: Int
def callGeneric: Option[ScGenericCall] = None
def callReference: Option[ScReferenceExpression]
def arguments: Seq[ScExpression]
}
object Invocation {
private class CallInvocation(args: ScArgumentExprList) extends Invocation {
override def element: PsiElement = args
override def callGeneric: Option[ScGenericCall] = args.callGeneric
override def invocationCount: Int = args.invocationCount
override def callReference: Option[ScReferenceExpression] = args.callReference
override def arguments: Seq[ScExpression] = args.exprs
}
private trait InfixInvocation extends Invocation {
override def invocationCount: Int = 1
override def callReference: Option[ScReferenceExpression] = {
element.getParent match {
case i: ScInfixExpr => Some(i.operation)
}
}
}
private class InfixExpressionInvocation(expr: ScExpression) extends InfixInvocation {
override def element: PsiElement = expr
override def arguments: Seq[ScExpression] = Seq(expr)
}
private class InfixTupleInvocation(tuple: ScTuple) extends InfixInvocation {
override def element: PsiElement = tuple
override def arguments: Seq[ScExpression] = tuple.exprs
}
private class InfixUnitInvocation(u: ScUnitExpr) extends InfixInvocation {
override def element: PsiElement = u
override def arguments: Seq[ScExpression] = Seq(u)
}
def getInvocation(elem: PsiElement): Option[Invocation] = {
def create[T <: PsiElement](elem: T)(f: T => Invocation): Option[Invocation] = {
elem.getParent match {
case i: ScInfixExpr if i.getArgExpr == elem => Some(f(elem))
case _ => None
}
}
elem match {
case args: ScArgumentExprList => Some(new CallInvocation(args))
case t: ScTuple => create(t)(new InfixTupleInvocation(_))
case u: ScUnitExpr => create(u)(new InfixUnitInvocation(_))
case e: ScExpression => create(e)(new InfixExpressionInvocation(_))
case _ => None
}
}
}
def elementsForParameterInfo(args: Invocation): Seq[Object] = {
implicit val project = args.element.projectContext
args.parent match {
case call: MethodInvocation =>
val res: ArrayBuffer[Object] = new ArrayBuffer[Object]
def collectResult() {
val canBeUpdate = call.getParent match {
case assignStmt: ScAssignStmt if call == assignStmt.getLExpression => true
case notExpr if !notExpr.isInstanceOf[ScExpression] || notExpr.isInstanceOf[ScBlockExpr] => true
case _ => false
}
val count = args.invocationCount
val gen = args.callGeneric.getOrElse(null: ScGenericCall)
def collectSubstitutor(element: PsiElement): ScSubstitutor = {
if (gen == null) return ScSubstitutor.empty
val tp: Array[(String, Long)] = element match {
case tpo: ScTypeParametersOwner => tpo.typeParameters.map(_.nameAndId).toArray
case ptpo: PsiTypeParameterListOwner => ptpo.getTypeParameters.map(_.nameAndId)
case _ => return ScSubstitutor.empty
}
val typeArgs: Seq[ScTypeElement] = gen.arguments
val map = new collection.mutable.HashMap[(String, Long), ScType]
for (i <- 0 until Math.min(tp.length, typeArgs.length)) {
map += ((tp(i), typeArgs(i).calcType))
}
ScSubstitutor(map.toMap)
}
def collectForType(typez: ScType): Unit = {
def process(functionName: String): Unit = {
val i = if (functionName == "update") -1 else 0
val processor = new CompletionProcessor(StdKinds.refExprQualRef, call, true, Some(functionName))
processor.processType(typez, call)
val variants: Array[ScalaResolveResult] = processor.candidates
for {
variant <- variants
if !variant.getElement.isInstanceOf[PsiMember] ||
ResolveUtils.isAccessible(variant.getElement.asInstanceOf[PsiMember], call)
} {
variant match {
case ScalaResolveResult(method: ScFunction, subst: ScSubstitutor) =>
val signature: PhysicalSignature = new PhysicalSignature(method, subst.followed(collectSubstitutor(method)))
res += ((signature, i))
res ++= ScalaParameterInfoEnhancer.enhance(signature, args.arguments).map { (_, i) }
case _ =>
}
}
}
process("apply")
if (canBeUpdate) process("update")
}
args.callReference match {
case Some(ref: ScReferenceExpression) =>
if (count > 1) {
//todo: missed case with last implicit call
ref.bind() match {
case Some(ScalaResolveResult(function: ScFunction, subst: ScSubstitutor)) if function.
effectiveParameterClauses.length >= count =>
res += ((new PhysicalSignature(function, subst.followed(collectSubstitutor(function))), count - 1))
case _ =>
for (typez <- call.getEffectiveInvokedExpr.getType(TypingContext.empty)) //todo: implicit conversions
{collectForType(typez)}
}
} else {
val variants: Array[ResolveResult] = {
val sameName = ref.getSameNameVariants
if (sameName.isEmpty) ref.multiResolve(false)
else sameName
}
for {
variant <- variants
if !variant.getElement.isInstanceOf[PsiMember] ||
ResolveUtils.isAccessible(variant.getElement.asInstanceOf[PsiMember], ref)
} {
variant match {
//todo: Synthetic function
case ScalaResolveResult(method: PsiMethod, subst: ScSubstitutor) =>
val signature: PhysicalSignature = new PhysicalSignature(method, subst.followed(collectSubstitutor(method)))
res += ((signature, 0))
res ++= ScalaParameterInfoEnhancer.enhance(signature, args.arguments).map { (_, 0) }
case ScalaResolveResult(typed: ScTypedDefinition, subst: ScSubstitutor) =>
val typez = subst.subst(typed.getType(TypingContext.empty).getOrNothing) //todo: implicit conversions
collectForType(typez)
case _ =>
}
}
}
case None =>
call match {
case call: ScMethodCall =>
for (typez <- call.getEffectiveInvokedExpr.getType(TypingContext.empty)) { //todo: implicit conversions
collectForType(typez)
}
}
}
}
collectResult()
res
case constr: ScConstructor =>
val res: ArrayBuffer[Object] = new ArrayBuffer[Object]
val typeElement = constr.typeElement
val i = constr.arguments.indexOf(args.element)
typeElement.calcType.extractClassType match {
case Some((clazz: PsiClass, subst: ScSubstitutor)) =>
clazz match {
case clazz: ScClass =>
clazz.constructor match {
case Some(constr: ScPrimaryConstructor) if i < constr.effectiveParameterClauses.length =>
typeElement match {
case gen: ScParameterizedTypeElement =>
val tp = clazz.typeParameters.map(_.nameAndId)
val typeArgs: Seq[ScTypeElement] = gen.typeArgList.typeArgs
val map = new collection.mutable.HashMap[(String, Long), ScType]
for (i <- 0 until Math.min(tp.length, typeArgs.length)) {
map += ((tp(i), typeArgs(i).calcType))
}
val substitutor = ScSubstitutor(map.toMap)
res += ((constr, substitutor.followed(subst), i))
case _ => res += ((constr, subst, i))
}
case Some(_) if i == 0 => res += ""
case None => res += ""
case _ =>
}
for (constr <- clazz.functions if !constr.isInstanceOf[ScPrimaryConstructor] &&
constr.isConstructor && ((constr.clauses match {
case Some(x) => x.clauses.length
case None => 1
}) > i))
res += ((new PhysicalSignature(constr, subst), i))
case clazz: PsiClass if clazz.isAnnotationType =>
val resulting: (AnnotationParameters, Int) =
(AnnotationParameters(clazz.getMethods.toSeq.filter(_.isInstanceOf[PsiAnnotationMethod]).map(meth => (meth.name,
meth.getReturnType.toScType(),
meth.asInstanceOf[PsiAnnotationMethod].getDefaultValue))), i)
res += resulting
case clazz: PsiClass if !clazz.isInstanceOf[ScTypeDefinition] =>
for (constructor <- clazz.getConstructors) {
typeElement match {
case gen: ScParameterizedTypeElement =>
val tp = clazz.getTypeParameters.map(_.nameAndId)
val typeArgs: Seq[ScTypeElement] = gen.typeArgList.typeArgs
val map = new collection.mutable.HashMap[(String, Long), ScType]
for (i <- 0 until Math.min(tp.length, typeArgs.length)) {
map += ((tp(i), typeArgs(i).calcType))
}
val substitutor = ScSubstitutor(map.toMap)
res += ((new PhysicalSignature(constructor, substitutor.followed(subst)), i))
case _ => res += ((new PhysicalSignature(constructor, subst), i))
}
}
case _ =>
}
case _ =>
}
res
case self: ScSelfInvocation =>
val res: ArrayBuffer[Object] = new ArrayBuffer[Object]
val i = self.arguments.indexOf(args.element)
val clazz = PsiTreeUtil.getParentOfType(self, classOf[ScClass], true)
clazz match {
case clazz: ScClass =>
clazz.constructor match {
case Some(constr: ScPrimaryConstructor) if i < constr.effectiveParameterClauses.length =>
res += ((constr, ScSubstitutor.empty, i))
case Some(_) if i == 0 => res += ""
case None => res += ""
case _ =>
}
for {
constr <- clazz.functions
if !constr.isInstanceOf[ScPrimaryConstructor] &&
constr.isConstructor &&
constr.clauses.map(_.clauses.length).getOrElse(1) > i
} {
if (!PsiTreeUtil.isAncestor(constr, self, true) &&
constr.getTextRange.getStartOffset < self.getTextRange.getStartOffset) {
res += ((new PhysicalSignature(constr, ScSubstitutor.empty), i))
}
}
case _ =>
}
res
}
}
/**
* Returns context's argument psi and fill context items
* by appropriate PsiElements (in which we can resolve)
*
* @param context current context
* @return context's argument expression
*/
private def findCall(context: ParameterInfoContext): PsiElement = {
val file = context.getFile
val offset = context.getEditor.getCaretModel.getOffset
val element = file.findElementAt(offset)
if (element.isInstanceOf[PsiWhiteSpace])
if (element == null) return null
@tailrec
def findArgs(elem: PsiElement): Option[Invocation] = {
if (elem == null) return None
val res = Invocation.getInvocation(elem)
if (res.isDefined) return res
findArgs(elem.getParent)
}
val argsOption: Option[Invocation] = findArgs(element)
if (argsOption.isEmpty) return null
val args = argsOption.get
implicit val project = file.projectContext
context match {
case context: CreateParameterInfoContext =>
context.setItemsToShow(elementsForParameterInfo(args).toArray)
case context: UpdateParameterInfoContext =>
var el = element
while (el.getParent != args.element) el = el.getParent
var index = 1
for (expr <- getActualParameters(args.element) if expr != el) index += 1
context.setCurrentParameter(index)
context.setHighlightedParameter(el)
if (!equivalent(context.getObjectsToView, elementsForParameterInfo(args))) {
context.removeHint()
ShowParameterInfoHandler.invoke(project, context.getEditor, context.getFile, context.getOffset, null, false)
}
case _ =>
}
args.element
}
private def equivalent(seq1: Seq[AnyRef], seq2: Seq[AnyRef]): Boolean = {
seq1.size == seq2.size && seq1.zip(seq2).forall(equivObjectsToView)
}
private def equivObjectsToView(tuple: (AnyRef, AnyRef)): Boolean = tuple match {
case (s1: String, s2: String) =>
s1 == s2
case ((a1: AnnotationParameters, i1: Int), (a2: AnnotationParameters, i2: Int)) =>
i1 == i2 && a1 == a2
case ((sign1: PhysicalSignature, i1: Int), (sign2: PhysicalSignature, i2: Int)) =>
i1 == i2 && sign1.method == sign2.method
case ((pc1: ScPrimaryConstructor, _: ScSubstitutor, i1: Int), (pc2: ScPrimaryConstructor, _: ScSubstitutor, i2: Int)) =>
i1 == i2 && pc1 == pc2
case _ => false
}
}
object ScalaFunctionParameterInfoHandler {
case class AnnotationParameters(seq: Seq[(String, ScType, PsiAnnotationMemberValue)])
} | ilinum/intellij-scala | src/org/jetbrains/plugins/scala/lang/parameterInfo/ScalaFunctionParameterInfoHandler.scala | Scala | apache-2.0 | 30,790 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark
import java.util.concurrent.{ExecutorService, TimeUnit}
import scala.collection.Map
import scala.collection.mutable
import scala.concurrent.Await
import scala.concurrent.duration._
import scala.language.postfixOps
import org.scalatest.{BeforeAndAfterEach, PrivateMethodTester}
import org.mockito.Mockito.{mock, spy, verify, when}
import org.mockito.Matchers
import org.mockito.Matchers._
import org.apache.spark.executor.TaskMetrics
import org.apache.spark.rpc.{RpcCallContext, RpcEndpoint, RpcEnv, RpcEndpointRef}
import org.apache.spark.scheduler._
import org.apache.spark.scheduler.cluster.CoarseGrainedClusterMessages._
import org.apache.spark.scheduler.cluster.CoarseGrainedSchedulerBackend
import org.apache.spark.storage.BlockManagerId
import org.apache.spark.util.ManualClock
/**
* A test suite for the heartbeating behavior between the driver and the executors.
*/
class HeartbeatReceiverSuite
extends SparkFunSuite
with BeforeAndAfterEach
with PrivateMethodTester
with LocalSparkContext {
private val executorId1 = "executor-1"
private val executorId2 = "executor-2"
// Shared state that must be reset before and after each test
private var scheduler: TaskSchedulerImpl = null
private var heartbeatReceiver: HeartbeatReceiver = null
private var heartbeatReceiverRef: RpcEndpointRef = null
private var heartbeatReceiverClock: ManualClock = null
// Helper private method accessors for HeartbeatReceiver
private val _executorLastSeen = PrivateMethod[collection.Map[String, Long]]('executorLastSeen)
private val _executorTimeoutMs = PrivateMethod[Long]('executorTimeoutMs)
private val _killExecutorThread = PrivateMethod[ExecutorService]('killExecutorThread)
/**
* Before each test, set up the SparkContext and a custom [[HeartbeatReceiver]]
* that uses a manual clock.
*/
override def beforeEach(): Unit = {
val conf = new SparkConf()
.setMaster("local[2]")
.setAppName("test")
.set("spark.dynamicAllocation.testing", "true")
sc = spy(new SparkContext(conf))
scheduler = mock(classOf[TaskSchedulerImpl])
when(sc.taskScheduler).thenReturn(scheduler)
when(scheduler.sc).thenReturn(sc)
heartbeatReceiverClock = new ManualClock
heartbeatReceiver = new HeartbeatReceiver(sc, heartbeatReceiverClock)
heartbeatReceiverRef = sc.env.rpcEnv.setupEndpoint("heartbeat", heartbeatReceiver)
when(scheduler.executorHeartbeatReceived(any(), any(), any())).thenReturn(true)
}
/**
* After each test, clean up all state and stop the [[SparkContext]].
*/
override def afterEach(): Unit = {
super.afterEach()
scheduler = null
heartbeatReceiver = null
heartbeatReceiverRef = null
heartbeatReceiverClock = null
}
test("task scheduler is set correctly") {
assert(heartbeatReceiver.scheduler === null)
heartbeatReceiverRef.askWithRetry[Boolean](TaskSchedulerIsSet)
assert(heartbeatReceiver.scheduler !== null)
}
test("normal heartbeat") {
heartbeatReceiverRef.askWithRetry[Boolean](TaskSchedulerIsSet)
addExecutorAndVerify(executorId1)
addExecutorAndVerify(executorId2)
triggerHeartbeat(executorId1, executorShouldReregister = false)
triggerHeartbeat(executorId2, executorShouldReregister = false)
val trackedExecutors = getTrackedExecutors
assert(trackedExecutors.size === 2)
assert(trackedExecutors.contains(executorId1))
assert(trackedExecutors.contains(executorId2))
}
test("reregister if scheduler is not ready yet") {
addExecutorAndVerify(executorId1)
// Task scheduler is not set yet in HeartbeatReceiver, so executors should reregister
triggerHeartbeat(executorId1, executorShouldReregister = true)
}
test("reregister if heartbeat from unregistered executor") {
heartbeatReceiverRef.askWithRetry[Boolean](TaskSchedulerIsSet)
// Received heartbeat from unknown executor, so we ask it to re-register
triggerHeartbeat(executorId1, executorShouldReregister = true)
assert(getTrackedExecutors.isEmpty)
}
test("reregister if heartbeat from removed executor") {
heartbeatReceiverRef.askWithRetry[Boolean](TaskSchedulerIsSet)
addExecutorAndVerify(executorId1)
addExecutorAndVerify(executorId2)
// Remove the second executor but not the first
removeExecutorAndVerify(executorId2)
// Now trigger the heartbeats
// A heartbeat from the second executor should require reregistering
triggerHeartbeat(executorId1, executorShouldReregister = false)
triggerHeartbeat(executorId2, executorShouldReregister = true)
val trackedExecutors = getTrackedExecutors
assert(trackedExecutors.size === 1)
assert(trackedExecutors.contains(executorId1))
assert(!trackedExecutors.contains(executorId2))
}
test("expire dead hosts") {
val executorTimeout = heartbeatReceiver.invokePrivate(_executorTimeoutMs())
heartbeatReceiverRef.askWithRetry[Boolean](TaskSchedulerIsSet)
addExecutorAndVerify(executorId1)
addExecutorAndVerify(executorId2)
triggerHeartbeat(executorId1, executorShouldReregister = false)
triggerHeartbeat(executorId2, executorShouldReregister = false)
// Advance the clock and only trigger a heartbeat for the first executor
heartbeatReceiverClock.advance(executorTimeout / 2)
triggerHeartbeat(executorId1, executorShouldReregister = false)
heartbeatReceiverClock.advance(executorTimeout)
heartbeatReceiverRef.askWithRetry[Boolean](ExpireDeadHosts)
// Only the second executor should be expired as a dead host
verify(scheduler).executorLost(Matchers.eq(executorId2), any())
val trackedExecutors = getTrackedExecutors
assert(trackedExecutors.size === 1)
assert(trackedExecutors.contains(executorId1))
assert(!trackedExecutors.contains(executorId2))
}
test("expire dead hosts should kill executors with replacement (SPARK-8119)") {
// Set up a fake backend and cluster manager to simulate killing executors
val rpcEnv = sc.env.rpcEnv
val fakeClusterManager = new FakeClusterManager(rpcEnv)
val fakeClusterManagerRef = rpcEnv.setupEndpoint("fake-cm", fakeClusterManager)
val fakeSchedulerBackend = new FakeSchedulerBackend(scheduler, rpcEnv, fakeClusterManagerRef)
when(sc.schedulerBackend).thenReturn(fakeSchedulerBackend)
// Register fake executors with our fake scheduler backend
// This is necessary because the backend refuses to kill executors it does not know about
fakeSchedulerBackend.start()
val dummyExecutorEndpoint1 = new FakeExecutorEndpoint(rpcEnv)
val dummyExecutorEndpoint2 = new FakeExecutorEndpoint(rpcEnv)
val dummyExecutorEndpointRef1 = rpcEnv.setupEndpoint("fake-executor-1", dummyExecutorEndpoint1)
val dummyExecutorEndpointRef2 = rpcEnv.setupEndpoint("fake-executor-2", dummyExecutorEndpoint2)
fakeSchedulerBackend.driverEndpoint.askWithRetry[RegisterExecutorResponse](
RegisterExecutor(executorId1, dummyExecutorEndpointRef1, "dummy:4040", 0, Map.empty))
fakeSchedulerBackend.driverEndpoint.askWithRetry[RegisterExecutorResponse](
RegisterExecutor(executorId2, dummyExecutorEndpointRef2, "dummy:4040", 0, Map.empty))
heartbeatReceiverRef.askWithRetry[Boolean](TaskSchedulerIsSet)
addExecutorAndVerify(executorId1)
addExecutorAndVerify(executorId2)
triggerHeartbeat(executorId1, executorShouldReregister = false)
triggerHeartbeat(executorId2, executorShouldReregister = false)
// Adjust the target number of executors on the cluster manager side
assert(fakeClusterManager.getTargetNumExecutors === 0)
sc.requestTotalExecutors(2, 0, Map.empty)
assert(fakeClusterManager.getTargetNumExecutors === 2)
assert(fakeClusterManager.getExecutorIdsToKill.isEmpty)
// Expire the executors. This should trigger our fake backend to kill the executors.
// Since the kill request is sent to the cluster manager asynchronously, we need to block
// on the kill thread to ensure that the cluster manager actually received our requests.
// Here we use a timeout of O(seconds), but in practice this whole test takes O(10ms).
val executorTimeout = heartbeatReceiver.invokePrivate(_executorTimeoutMs())
heartbeatReceiverClock.advance(executorTimeout * 2)
heartbeatReceiverRef.askWithRetry[Boolean](ExpireDeadHosts)
val killThread = heartbeatReceiver.invokePrivate(_killExecutorThread())
killThread.shutdown() // needed for awaitTermination
killThread.awaitTermination(10L, TimeUnit.SECONDS)
// The target number of executors should not change! Otherwise, having an expired
// executor means we permanently adjust the target number downwards until we
// explicitly request new executors. For more detail, see SPARK-8119.
assert(fakeClusterManager.getTargetNumExecutors === 2)
assert(fakeClusterManager.getExecutorIdsToKill === Set(executorId1, executorId2))
}
/** Manually send a heartbeat and return the response. */
private def triggerHeartbeat(
executorId: String,
executorShouldReregister: Boolean): Unit = {
val metrics = new TaskMetrics
val blockManagerId = BlockManagerId(executorId, "localhost", 12345)
val response = heartbeatReceiverRef.askWithRetry[HeartbeatResponse](
Heartbeat(executorId, Array(1L -> metrics), blockManagerId))
if (executorShouldReregister) {
assert(response.reregisterBlockManager)
} else {
assert(!response.reregisterBlockManager)
// Additionally verify that the scheduler callback is called with the correct parameters
verify(scheduler).executorHeartbeatReceived(
Matchers.eq(executorId), Matchers.eq(Array(1L -> metrics)), Matchers.eq(blockManagerId))
}
}
private def addExecutorAndVerify(executorId: String): Unit = {
assert(
heartbeatReceiver.addExecutor(executorId).map { f =>
Await.result(f, 10.seconds)
} === Some(true))
}
private def removeExecutorAndVerify(executorId: String): Unit = {
assert(
heartbeatReceiver.removeExecutor(executorId).map { f =>
Await.result(f, 10.seconds)
} === Some(true))
}
private def getTrackedExecutors: Map[String, Long] = {
// We may receive undesired SparkListenerExecutorAdded from LocalBackend, so exclude it from
// the map. See SPARK-10800.
heartbeatReceiver.invokePrivate(_executorLastSeen()).
filterKeys(_ != SparkContext.DRIVER_IDENTIFIER)
}
}
// TODO: use these classes to add end-to-end tests for dynamic allocation!
/**
* Dummy RPC endpoint to simulate executors.
*/
private class FakeExecutorEndpoint(override val rpcEnv: RpcEnv) extends RpcEndpoint
/**
* Dummy scheduler backend to simulate executor allocation requests to the cluster manager.
*/
private class FakeSchedulerBackend(
scheduler: TaskSchedulerImpl,
rpcEnv: RpcEnv,
clusterManagerEndpoint: RpcEndpointRef)
extends CoarseGrainedSchedulerBackend(scheduler, rpcEnv) {
protected override def doRequestTotalExecutors(requestedTotal: Int): Boolean = {
clusterManagerEndpoint.askWithRetry[Boolean](
RequestExecutors(requestedTotal, localityAwareTasks, hostToLocalTaskCount))
}
protected override def doKillExecutors(executorIds: Seq[String]): Boolean = {
clusterManagerEndpoint.askWithRetry[Boolean](KillExecutors(executorIds))
}
}
/**
* Dummy cluster manager to simulate responses to executor allocation requests.
*/
private class FakeClusterManager(override val rpcEnv: RpcEnv) extends RpcEndpoint {
private var targetNumExecutors = 0
private val executorIdsToKill = new mutable.HashSet[String]
def getTargetNumExecutors: Int = targetNumExecutors
def getExecutorIdsToKill: Set[String] = executorIdsToKill.toSet
override def receiveAndReply(context: RpcCallContext): PartialFunction[Any, Unit] = {
case RequestExecutors(requestedTotal, _, _) =>
targetNumExecutors = requestedTotal
context.reply(true)
case KillExecutors(executorIds) =>
executorIdsToKill ++= executorIds
context.reply(true)
}
}
| chenc10/Spark-PAF | core/src/test/scala/org/apache/spark/HeartbeatReceiverSuite.scala | Scala | apache-2.0 | 12,840 |
package org.jetbrains.plugins.scala.lang.transformation
package calls
import org.jetbrains.plugins.scala.extensions.Resolved
import org.jetbrains.plugins.scala.lang.psi.api.expr.{ScInfixExpr, ScMethodCall}
import org.jetbrains.plugins.scala.lang.psi.impl.ScalaCode._
/**
* @author Pavel Fatin
*/
object ExpandAutoTupling extends AbstractTransformer {
def transformation = {
case e @ ScMethodCall(t @ Resolved(result), es) if result.tuplingUsed =>
e.replace(code"$t((${@@(es)}))")
case ScInfixExpr(_, Resolved(result), r) if result.tuplingUsed =>
r.replace(code"($r)")
}
}
| whorbowicz/intellij-scala | src/org/jetbrains/plugins/scala/lang/transformation/calls/ExpandAutoTupling.scala | Scala | apache-2.0 | 603 |
/*
* Copyright 2017 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.accounts.frs102.boxes.relatedPartyTransactions
import org.scalatest.mock.MockitoSugar
import org.scalatest.{BeforeAndAfter, Matchers, WordSpec}
import uk.gov.hmrc.ct.accounts.{AccountsFreeTextValidationFixture, MockFrs102AccountsRetriever}
import uk.gov.hmrc.ct.accounts.frs102.retriever.Frs102AccountsBoxRetriever
import uk.gov.hmrc.ct.box.CtValidation
import uk.gov.hmrc.ct.box.ValidatableBox._
class AC301ASpec extends WordSpec with MockitoSugar with Matchers with BeforeAndAfter
with MockFrs102AccountsRetriever with AccountsFreeTextValidationFixture[Frs102AccountsBoxRetriever] {
testTextFieldValidation("AC301A", AC301A, testUpperLimit = Some(StandardCohoTextFieldLimit), testMandatory = Some(true))
testTextFieldIllegalCharacterValidationReturnsIllegalCharacters("AC301A", AC301A)
"AC301A" should {
"be mandatory" in {
AC301A(None).validate(boxRetriever) shouldBe Set(CtValidation(Some("AC301A"), "error.AC301A.required", None))
}
}
}
| pncampbell/ct-calculations | src/test/scala/uk/gov/hmrc/ct/accounts/frs102/boxes/relatedPartyTransactions/AC301ASpec.scala | Scala | apache-2.0 | 1,598 |
trait Foo {
@transient protected var load = 1
@transient protected var a = 12
protected def init[B](in: java.io.ObjectInputStream): Unit = {
in.defaultReadObject
load = in.readInt
val sizea = in.readInt
a = 12
}
protected def serializeTo(out: java.io.ObjectOutputStream): Unit = {
out.defaultWriteObject
out.writeInt(load)
out.writeInt(a)
}
}
class Bar extends Foo with Serializable {
@transient protected var first: Any = null
def size = a
@transient var second: Any = null
def checkMember: Unit = { if (first == null) print("") }
private def writeObject(out: java.io.ObjectOutputStream): Unit = {
serializeTo(out)
}
private def readObject(in: java.io.ObjectInputStream): Unit = {
first = null
init(in)
}
}
object Test {
private def toObject[A](bytes: Array[Byte]): A = {
val in = new java.io.ObjectInputStream(new java.io.ByteArrayInputStream(bytes))
in.readObject.asInstanceOf[A]
}
private def toBytes(o: AnyRef): Array[Byte] = {
val bos = new java.io.ByteArrayOutputStream
val out = new java.io.ObjectOutputStream(bos)
out.writeObject(o)
out.close
bos.toByteArray
}
def main(args: Array[String]): Unit = {
val a1 = new Bar()
val serialized:Array[Byte] = toBytes(a1)
val deserialized: Bar = toObject(serialized)
deserialized.size
deserialized.checkMember
}
}
| folone/dotty | tests/run/t3038d.scala | Scala | bsd-3-clause | 1,400 |
package uk.gov.gds.ier.transaction.crown.confirmation
import uk.gov.gds.ier.model._
import uk.gov.gds.ier.test._
import uk.gov.gds.ier.transaction.crown.InprogressCrown
class ConfirmationFormTests
extends FormTestSuite
with ConfirmationForms
with WithMockAddressService {
it should "error out on empty json" in {
val js = JsNull
confirmationForm.bind(js).fold(
hasErrors => {
val errorMessage = Seq("Please complete this step")
hasErrors.errorMessages("statement") should be(errorMessage)
hasErrors.errorMessages("address") should be(errorMessage)
hasErrors.errorMessages("nationality") should be(errorMessage)
hasErrors.errorMessages("dob") should be(errorMessage)
hasErrors.errorMessages("name") should be(errorMessage)
hasErrors.errorMessages("previousName") should be(errorMessage)
hasErrors.errorMessages("NINO") should be(errorMessage)
hasErrors.errorMessages("job") should be(errorMessage)
hasErrors.errorMessages("contactAddress") should be(errorMessage)
hasErrors.errorMessages("openRegister") should be(errorMessage)
hasErrors.errorMessages("waysToVote") should be(errorMessage)
hasErrors.errorMessages("contact") should be(errorMessage)
hasErrors.globalErrorMessages.count(_ == "Please complete this step") should be(12)
hasErrors.errors.size should be(24)
},
success => fail("Should have errored out.")
)
}
it should "error out on empty application" in {
val application = InprogressCrown()
confirmationForm.fillAndValidate(application).fold(
hasErrors => {
val errorMessage = Seq("Please complete this step")
hasErrors.errorMessages("statement") should be(errorMessage)
hasErrors.errorMessages("address") should be(errorMessage)
hasErrors.errorMessages("nationality") should be(errorMessage)
hasErrors.errorMessages("dob") should be(errorMessage)
hasErrors.errorMessages("name") should be(errorMessage)
hasErrors.errorMessages("previousName") should be(errorMessage)
hasErrors.errorMessages("NINO") should be(errorMessage)
hasErrors.errorMessages("job") should be(errorMessage)
hasErrors.errorMessages("contactAddress") should be(errorMessage)
hasErrors.errorMessages("openRegister") should be(errorMessage)
hasErrors.errorMessages("waysToVote") should be(errorMessage)
hasErrors.errorMessages("contact") should be(errorMessage)
hasErrors.globalErrorMessages.count(_ == "Please complete this step") should be(12)
hasErrors.errors.size should be(24)
},
success => fail("Should have errored out.")
)
}
it should "succeed on waysToVote if postalOrProxy filled (InPerson)" in {
val application = completeCrownApplication.copy(
waysToVote = Some(WaysToVote(WaysToVoteType.InPerson)),
postalOrProxyVote = None
)
confirmationForm.fillAndValidate(application).hasErrors should be(false)
}
it should "succeed on waysToVote if postalOrProxy filled (ByPost)" in {
val application = completeCrownApplication.copy(
waysToVote = Some(WaysToVote(WaysToVoteType.ByPost)),
postalOrProxyVote = Some(PostalOrProxyVote(
typeVote = WaysToVoteType.ByPost,
postalVoteOption = Some(false),
deliveryMethod = None
))
)
confirmationForm.fillAndValidate(application).hasErrors should be(false)
}
it should "succeed on waysToVote if postalOrProxy filled (ByProxy)" in {
val application = completeCrownApplication.copy(
waysToVote = Some(WaysToVote(WaysToVoteType.ByProxy)),
postalOrProxyVote = Some(PostalOrProxyVote(
typeVote = WaysToVoteType.ByPost,
postalVoteOption = Some(false),
deliveryMethod = None
))
)
confirmationForm.fillAndValidate(application).hasErrors should be(false)
}
it should "error out on waysToVote if postalOrProxy not filled (ByPost)" in {
val application = completeCrownApplication.copy(
waysToVote = Some(WaysToVote(WaysToVoteType.ByPost)),
postalOrProxyVote = None
)
confirmationForm.fillAndValidate(application).fold(
hasErrors => {
val errorMessage = Seq("Please complete this step")
hasErrors.errorMessages("waysToVote") should be(errorMessage)
},
success => fail("Should have errored out.")
)
}
it should "error out on waysToVote if postalOrProxy not filled (ByProxy)" in {
val application = completeCrownApplication.copy(
waysToVote = Some(WaysToVote(WaysToVoteType.ByProxy)),
postalOrProxyVote = None
)
confirmationForm.fillAndValidate(application).fold(
hasErrors => {
val errorMessage = Seq("Please complete this step")
hasErrors.errorMessages("waysToVote") should be(errorMessage)
},
success => fail("Should have errored out.")
)
}
it should "succeed on hasAddress = true and existing previousAddress" in {
val application = completeCrownApplication.copy(
address = Some(LastAddress(
Some(HasAddressOption.YesAndLivingThere),
Some(PartialAddress(
Some("123 Fake Street, Fakerton"), Some("123456789"), "WR26NJ", None))
)),
previousAddress = Some(PartialPreviousAddress(Some(MovedHouseOption.NotMoved), None))
)
confirmationForm.fillAndValidate(application).hasErrors should be(false)
}
it should "succeed on hasAddress = false and missing previousAddress" in {
val application = completeCrownApplication.copy(
address = Some(LastAddress(
Some(HasAddressOption.No),
Some(PartialAddress(
Some("123 Fake Street, Fakerton"), Some("123456789"), "WR26NJ", None))
)),
previousAddress = None
)
confirmationForm.fillAndValidate(application).hasErrors should be(false)
}
it should "error out on hasAddress = true and missing previousAddress" in {
val application = completeCrownApplication.copy(
address = Some(LastAddress(
Some(HasAddressOption.YesAndLivingThere),
Some(PartialAddress(
Some("123 Fake Street, Fakerton"), Some("123456789"), "WR26NJ", None))
)),
previousAddress = None
)
confirmationForm.fillAndValidate(application).fold(
hasErrors => {
val errorMessage = Seq("Please complete this step")
hasErrors.errorMessages("previousAddress") should be(errorMessage)
},
success => fail("Should have errored out.")
)
}
it should "bind successfully if the previous address postcode was Northern Ireland" in {
confirmationForm.fillAndValidate(completeCrownApplication.copy(
previousAddress = Some(PartialPreviousAddress(
movedRecently = Some(MovedHouseOption.Yes),
previousAddress = Some(PartialAddress(
addressLine = None,
uprn = None,
postcode = "bt7 1aa",
manualAddress = None
))
))
)).fold (
hasErrors => {
fail("the form should be valid")
},
success => {
success.previousAddress.isDefined
}
)
}
}
| michaeldfallen/ier-frontend | test/uk/gov/gds/ier/transaction/crown/confirmation/ConfirmationFormTests.scala | Scala | mit | 7,167 |
package services.exports.flights.templates
import actors.PartitionedPortStateActor.{FlightsRequest, GetFlightsForTerminals}
import drt.shared.Terminals._
import drt.shared._
trait BhxFlightsWithSplitsExportWithCombinedTerminals {
val terminal: Terminal
val start: SDateLike
val end: SDateLike
val terminalsToQuery: Seq[Terminal] = Seq(T1, T2)
val flightsFilter: (ApiFlightWithSplits, Terminal) => Boolean = (fws, _) => terminalsToQuery.contains(fws.apiFlight.Terminal)
val requestForDiversions: FlightsRequest = GetFlightsForTerminals(start.millisSinceEpoch, end.millisSinceEpoch, terminalsToQuery)
}
case class BhxFlightsWithSplitsWithoutActualApiExportWithCombinedTerminals(start: SDateLike, end: SDateLike, terminal: Terminal) extends FlightsWithSplitsWithoutActualApiExport with BhxFlightsWithSplitsExportWithCombinedTerminals
case class BhxFlightsWithSplitsWithActualApiExportWithCombinedTerminals(start: SDateLike, end: SDateLike, terminal: Terminal) extends FlightsWithSplitsWithActualApiExport with BhxFlightsWithSplitsExportWithCombinedTerminals
| UKHomeOffice/drt-scalajs-spa-exploration | server/src/main/scala/services/exports/flights/templates/BhxFlightsWithSplitsExportWithCombinedTerminals.scala | Scala | apache-2.0 | 1,074 |
package us.stivers.blue.route
import scalax.util.{Try,Success,Failure}
import us.stivers.blue.http.{Request,Response,Method,Status}
/**
* A Route is able to route an HTTP Request to a Response
*/
trait Route extends (Request=>Try[Response]) {
def method: Option[Method]
def path: List[Segment]
}
object Route {
def apply[A: Responder](method: Method, handler: A): Route = DefaultRoute(Some(method), List.empty, handler)
def apply[A: Responder](method: Method, path: List[Segment], handler: A): Route = DefaultRoute(Some(method), path, handler)
def apply[A: Responder](path: List[Segment], handler: A): Route = DefaultRoute(None, path, handler)
case class DefaultRoute[A: Responder](method: Option[Method], path: List[Segment], handler: A) extends Route {
def apply(req: Request): Try[Response] = implicitly[Responder[A]].apply(req, this, handler)
}
} | cstivers78/blue | blue-core/src/main/scala/us/stivers/blue/route/Route.scala | Scala | apache-2.0 | 880 |
/*******************************************************************************
Copyright (c) 2013, S-Core.
All rights reserved.
Use is subject to license terms.
This distribution may include materials developed by third parties.
******************************************************************************/
package kr.ac.kaist.jsaf.analysis.typing.models.Tizen
import kr.ac.kaist.jsaf.analysis.typing.AddressManager._
import kr.ac.kaist.jsaf.analysis.cfg.{CFG, CFGExpr, InternalError}
import kr.ac.kaist.jsaf.analysis.typing.domain.{BoolFalse => F, BoolTrue => T, _}
import kr.ac.kaist.jsaf.analysis.typing.models._
import kr.ac.kaist.jsaf.analysis.typing._
import kr.ac.kaist.jsaf.analysis.typing.domain.UIntSingle
import kr.ac.kaist.jsaf.analysis.typing.domain.Context
import kr.ac.kaist.jsaf.analysis.typing.models.AbsInternalFunc
import kr.ac.kaist.jsaf.analysis.typing.models.AbsConstValue
import kr.ac.kaist.jsaf.analysis.typing.domain.Heap
object TIZENContactAddress extends Tizen {
private val name = "ContactAddress"
/* predefined locations */
val loc_cons = newSystemRecentLoc(name + "Cons")
val loc_proto = newSystemRecentLoc(name + "Proto")
/* constructor or object*/
private val prop_cons: List[(String, AbsProperty)] = List(
("@class", AbsConstValue(PropValue(AbsString.alpha("Function")))),
("@proto", AbsConstValue(PropValue(ObjectValue(Value(ObjProtoLoc), F, F, F)))),
("@extensible", AbsConstValue(PropValue(T))),
("@scope", AbsConstValue(PropValue(Value(NullTop)))),
("@construct", AbsInternalFunc("tizen.ContactAddress.constructor")),
("@hasinstance", AbsConstValue(PropValue(Value(NullTop)))),
("prototype", AbsConstValue(PropValue(ObjectValue(Value(loc_proto), F, F, F))))
)
/* prototype */
private val prop_proto: List[(String, AbsProperty)] = List(
("@class", AbsConstValue(PropValue(AbsString.alpha("CallbackObject")))),
("@proto", AbsConstValue(PropValue(ObjectValue(Value(ObjProtoLoc), F, F, F)))),
("@extensible", AbsConstValue(PropValue(T)))
)
override def getInitList(): List[(Loc, List[(String, AbsProperty)])] = List(
(loc_cons, prop_cons), (loc_proto, prop_proto)
)
override def getSemanticMap(): Map[String, SemanticFun] = {
Map(
("tizen.ContactAddress.constructor" -> (
(sem: Semantics, h: Heap, ctx: Context, he: Heap, ctxe: Context, cp: ControlPoint, cfg: CFG, fun: String, args: CFGExpr) => {
val lset_this = h(SinglePureLocalLoc)("@this")._1._2._2
val lset_env = h(SinglePureLocalLoc)("@env")._1._2._2
val set_addr = lset_env.foldLeft[Set[Address]](Set())((a, l) => a + locToAddr(l))
if (set_addr.size > 1) throw new InternalError("API heap allocation: Size of env address is " + set_addr.size)
val addr_env = set_addr.head
val addr1 = cfg.getAPIAddress(addr_env, 0)
val l_r1 = addrToLoc(addr1, Recent)
val (h_2, ctx_2) = Helper.Oldify(h, ctx, addr1)
val n_arglen = Operator.ToUInt32(getArgValue(h_2, ctx_2, args, "length"))
val o_new = ObjEmpty.
update("@class", PropValue(AbsString.alpha("Object"))).
update("@proto", PropValue(ObjectValue(Value(TIZENContactAddress.loc_proto), F, F, F))).
update("@extensible", PropValue(T))
val (h_3, es_1) = n_arglen match {
case UIntSingle(n) if n == 0 =>
val o_arr = Helper.NewArrayObject(AbsNumber.alpha(1))
val o_arr1 = o_arr.update("0", PropValue(ObjectValue(Value(AbsString.alpha("HOME")), T, T, T)))
val h_3 = h_2.update(l_r1, o_arr1)
val o_new2 = o_new.
update("country", PropValue(ObjectValue(Value(NullTop), T, T, T))).
update("region", PropValue(ObjectValue(Value(NullTop), T, T, T))).
update("city", PropValue(ObjectValue(Value(NullTop), T, T, T))).
update("streetAddress", PropValue(ObjectValue(Value(NullTop), T, T, T))).
update("additionalInformation", PropValue(ObjectValue(Value(NullTop), T, T, T))).
update("postalCode", PropValue(ObjectValue(Value(NullTop), T, T, T))).
update("isDefault", PropValue(ObjectValue(Value(F), T, T, T))).
update("types", PropValue(ObjectValue(Value(l_r1), T, T, T)))
val h_4 = lset_this.foldLeft(h_3)((_h, l) => _h.update(l, o_new2))
(h_4, TizenHelper.TizenExceptionBot)
case UIntSingle(n) if n == 1 =>
val v = getArgValue(h_2, ctx_2, args, "0")
val es =
if (v._1 <= PValueTop)
Set[WebAPIException](TypeMismatchError)
else TizenHelper.TizenExceptionBot
val o_arr = Helper.NewArrayObject(AbsNumber.alpha(1))
val o_arr1 = o_arr.update("0", PropValue(ObjectValue(Value(AbsString.alpha("HOME")), T, T, T)))
val h_3 = h_2.update(l_r1, o_arr1)
val (obj, ess) = v._2.foldLeft((o_new, TizenHelper.TizenExceptionBot))((_o, l) => {
val v_1 = Helper.Proto(h_3, l, AbsString.alpha("country"))
val v_2 = Helper.Proto(h_3, l, AbsString.alpha("region"))
val v_3 = Helper.Proto(h_3, l, AbsString.alpha("city"))
val v_4 = Helper.Proto(h_3, l, AbsString.alpha("streetAddress"))
val v_5 = Helper.Proto(h_3, l, AbsString.alpha("additionalInformation"))
val v_6 = Helper.Proto(h_3, l, AbsString.alpha("postalCode"))
val v_7 = Helper.Proto(h_3, l, AbsString.alpha("isDefault"))
val v_8 = Helper.Proto(h_3, l, AbsString.alpha("types"))
val es_1 =
if (v_1._1._1 </ UndefTop && v_1._1._5 </ StrTop)
Set[WebAPIException](TypeMismatchError)
else TizenHelper.TizenExceptionBot
val o_1 =
if (v_1._1._5 </ StrBot)
o_new.update("country", PropValue(ObjectValue(Value(v_1._1._5), T, T, T)))
else o_new.update("country", PropValue(ObjectValue(Value(NullTop), T, T, T)))
val es_2 =
if (v_2._1._1 </ UndefTop && v_2._1._5 </ StrTop)
Set[WebAPIException](TypeMismatchError)
else TizenHelper.TizenExceptionBot
val o_2 =
if (v_2._1._5 </ StrBot){
o_new.update("region", PropValue(ObjectValue(Value(v_2._1._5), T, T, T)))
}
else
o_new.update("region", PropValue(ObjectValue(Value(NullTop), T, T, T)))
val es_3 =
if (v_3._1._1 </ UndefTop && v_3._1._5 </ StrTop)
Set[WebAPIException](TypeMismatchError)
else TizenHelper.TizenExceptionBot
val o_3 =
if (v_3._1._5 </ StrBot)
o_new.update("city", PropValue(ObjectValue(Value(v_3._1._5), T, T, T)))
else o_new.update("city", PropValue(ObjectValue(Value(NullTop), T, T, T)))
val es_4 =
if (v_4._1._1 </ UndefTop && v_4._1._5 </ StrTop)
Set[WebAPIException](TypeMismatchError)
else TizenHelper.TizenExceptionBot
val o_4 =
if (v_4._1._5 </ StrBot)
o_new.update("streetAddress", PropValue(ObjectValue(Value(v_4._1._5), T, T, T)))
else o_new.update("streetAddress", PropValue(ObjectValue(Value(NullTop), T, T, T)))
val es_5 =
if (v_5._1._1 </ UndefTop && v_5._1._5 </ StrTop)
Set[WebAPIException](TypeMismatchError)
else TizenHelper.TizenExceptionBot
val o_5 =
if (v_5._1._5 </ StrBot)
o_new.update("additionalInformation", PropValue(ObjectValue(Value(v_5._1._5), T, T, T)))
else o_new.update("additionalInformation", PropValue(ObjectValue(Value(NullTop), T, T, T)))
val es_6 =
if (v_6._1._1 </ UndefTop && v_6._1._5 </ StrTop)
Set[WebAPIException](TypeMismatchError)
else TizenHelper.TizenExceptionBot
val o_6 =
if (v_6._1._5 </ StrBot)
o_new.update("postalCode", PropValue(ObjectValue(Value(v_6._1._5), T, T, T)))
else o_new.update("postalCode", PropValue(ObjectValue(Value(NullTop), T, T, T)))
val es_7 =
if (v_7._1._1 </ UndefTop && v_7._1._3 </ BoolTop)
Set[WebAPIException](TypeMismatchError)
else TizenHelper.TizenExceptionBot
val o_7 =
if (v_7._1._3 </ BoolBot)
o_new.update("isDefault", PropValue(ObjectValue(Value(v_7._1._3), T, T, T)))
else o_new.update("isDefault", PropValue(ObjectValue(Value(F), T, T, T)))
val (o_8, es_8) =
if (v_8 </ ValueBot) {
val es_ = v_8._2.foldLeft(TizenHelper.TizenExceptionBot)((_es, ll) => {
val n_length = Operator.ToUInt32(Helper.Proto(h_3, ll, AbsString.alpha("length")))
val ess = n_length match {
case NumBot =>
TizenHelper.TizenExceptionBot
case UIntSingle(n) => {
val es__ = (0 until n.toInt).foldLeft(TizenHelper.TizenExceptionBot)((_e, i) => {
val vi = Helper.Proto(h_3, ll, AbsString.alpha(i.toString))
val esi =
if (vi._1._5 </ StrTop) Set[WebAPIException](TypeMismatchError)
else TizenHelper.TizenExceptionBot
_e ++ esi
})
es__
}
case _ => {
val vi = Helper.Proto(h_3, ll, AbsString.alpha("@default_number"))
val esi =
if (vi._1._5 </ StrTop) Set[WebAPIException](TypeMismatchError)
else TizenHelper.TizenExceptionBot
esi
}
}
_es ++ ess
})
(o_new.update("types", PropValue(ObjectValue(Value(v_8._2), T, T, T))), es_)
}
else (o_new.update("types", PropValue(ObjectValue(Value(l_r1), T, T, T))), TizenHelper.TizenExceptionBot)
(_o._1 + o_1 + o_2 + o_3 + o_4 + o_5 + o_6 + o_7 + o_8,
_o._2 ++ es_1 ++ es_2 ++ es_3 ++ es_4 ++ es_5 ++ es_6 ++ es_7 ++ es_8)
})
val h_4 = lset_this.foldLeft(h_3)((_h, l) => _h.update(l, obj))
(h_4, es ++ ess)
case _ => {
(h_2, TizenHelper.TizenExceptionBot)
}
}
val (h_e, ctx_e) = TizenHelper.TizenRaiseException(h, ctx, es_1)
((Helper.ReturnStore(h_3, Value(lset_this)), ctx_2), (he + h_e, ctxe + ctx_e))
}
))
)
}
override def getPreSemanticMap(): Map[String, SemanticFun] = {
Map()
}
override def getDefMap(): Map[String, AccessFun] = {
Map()
}
override def getUseMap(): Map[String, AccessFun] = {
Map()
}
} | daejunpark/jsaf | src/kr/ac/kaist/jsaf/analysis/typing/models/Tizen/TIZENContactAddress.scala | Scala | bsd-3-clause | 11,590 |
/*
* Copyright 2012 Twitter Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.twitter.zipkin.hadoop.sources
import com.twitter.zipkin.gen.{BinaryAnnotation, Span, Annotation}
import com.twitter.scalding._
import com.twitter.zipkin.gen
import scala.collection.JavaConverters._
/**
* Preprocesses the data by merging different pieces of the same span
*/
class Preprocessed(args : Args) extends Job(args) with DefaultDateRangeJob {
val preprocessed = SpanSource()
.read
.mapTo(0 ->('trace_id, 'id, 'parent_id, 'annotations, 'binary_annotations)) {
s: Span => (s.trace_id, s.id, s.parent_id, s.annotations.toList, s.binary_annotations.toList)
}
.groupBy('trace_id, 'id, 'parent_id) {
_.reduce('annotations, 'binary_annotations) {
(left: (List[Annotation], List[BinaryAnnotation]), right: (List[Annotation], List[BinaryAnnotation])) =>
(left._1 ++ right._1, left._2 ++ right._2)
}
}
val onlyMerge = preprocessed
.mapTo(('trace_id, 'id, 'parent_id, 'annotations, 'binary_annotations) -> 'span) {
a : (Long, Long, Long, List[Annotation], List[BinaryAnnotation]) =>
a match {
case (tid, id, pid, annotations, binary_annotations) =>
val span = new gen.Span(tid, "", id, annotations.asJava, binary_annotations.asJava)
if (pid != 0) {
span.setParent_id(pid)
}
span
}
}.write(PrepNoNamesSpanSource())
}
| davidbernick/zipkin | zipkin-hadoop/src/main/scala/com/twitter/zipkin/hadoop/sources/Preprocessed.scala | Scala | apache-2.0 | 1,971 |
package com.hungrylearner.pso.swarm
import com.hungrylearner.pso.particle.Particle
import akka.actor.ActorContext
abstract class SwarmConfig[F,P]( val childCount: Int, val context: SimulationContext) {
val descendantParticleCount: Int
val descendantSwarmCount: Int
}
class RegionalSwarmConfig[F,P]( childCount: Int,
val childrenConfig: SwarmConfig[F,P],
val childName: String,
val childSwarmActorFactory: (ActorContext,Int)=>SwarmActor[F,P],
override val context: SimulationContext) extends SwarmConfig[F,P]( childCount, context) {
override val descendantParticleCount = childCount * childrenConfig.descendantParticleCount
override val descendantSwarmCount = childCount * childrenConfig.descendantSwarmCount
}
/**
*
* @param particleCount The number of particles in this swarm.
* @param particleFactory (swarmIndex, particleIndex, particleCount) => Particle[F,P]
* The swarmIndex is useful when each swarm needs to search different regions
* of the overall particle space.
* @param context SimulationContext
* @tparam F Fitness
* @tparam P Particle backing store
*/
class LocalSwarmConfig[F,P]( val particleCount: Int,
val particleFactory: (Int, Int, Int) => Particle[F,P],
override val context: SimulationContext) extends SwarmConfig[F,P]( particleCount, context) {
override val descendantParticleCount = particleCount
override val descendantSwarmCount = 1
}
| flintobrien/akka-multiswarm | src/main/scala/com/hungrylearner/pso/swarm/SwarmConfig.scala | Scala | apache-2.0 | 1,601 |
package aecor.runtime.akkageneric
import java.util.concurrent.TimeUnit
import akka.actor.ActorSystem
import akka.cluster.sharding.ClusterShardingSettings
import scala.concurrent.duration._
final case class GenericAkkaRuntimeSettings(numberOfShards: Int,
idleTimeout: FiniteDuration,
askTimeout: FiniteDuration,
clusterShardingSettings: ClusterShardingSettings)
object GenericAkkaRuntimeSettings {
/**
* Reads config from `aecor.akka-runtime`, see reference.conf for details
* @param system Actor system to get config from
* @return default settings
*/
def default(system: ActorSystem): GenericAkkaRuntimeSettings = {
val config = system.settings.config.getConfig("aecor.generic-akka-runtime")
def getMillisDuration(path: String): FiniteDuration =
Duration(config.getDuration(path, TimeUnit.MILLISECONDS), TimeUnit.MILLISECONDS)
GenericAkkaRuntimeSettings(
config.getInt("number-of-shards"),
getMillisDuration("idle-timeout"),
getMillisDuration("ask-timeout"),
ClusterShardingSettings(system)
)
}
}
| notxcain/aecor | modules/akka-cluster-runtime/src/main/scala/aecor/runtime/akkageneric/GenericAkkaRuntimeSettings.scala | Scala | mit | 1,205 |
/*
* Copyright 2014 Dennis Vis
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.talares.api.datatypes
import java.util.regex.Pattern
import org.joda.time.DateTime
import play.api.data.validation.ValidationError
import play.api.libs.json.{JsPath, JsError, JsSuccess, JsString, JsValue, Reads}
/**
* @author Dennis Vis
* @since 0.1.0
*/
package object items {
implicit object EdmDateTimeReads extends Reads[DateTime] {
val pattern = Pattern.compile("/Date\\\\(([0-9]+\\\\+[0-9]*)\\\\)/")
def reads(json: JsValue) = json match {
case JsString(s) =>
val matcher = pattern.matcher(s)
if (matcher.matches) {
val split = matcher.group(1).split("\\\\+")
val ticks = split(0).toLong
val offset = split(1).toInt
JsSuccess(new DateTime(ticks).plusMinutes(offset))
}
else JsError(Seq(JsPath() -> Seq(ValidationError("error.expected.edm.datetime", pattern.toString))))
case _ => JsError(Seq(JsPath() -> Seq(ValidationError("error.expected.jsstring"))))
}
}
} | talares/talares | src/talares/src/main/scala/org/talares/api/datatypes/items/package.scala | Scala | apache-2.0 | 1,579 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.codegen.over
import org.apache.flink.table.api.TableConfig
import org.apache.flink.table.planner.calcite.FlinkTypeFactory
import org.apache.flink.table.planner.codegen.CodeGenUtils.{BASE_ROW, newName}
import org.apache.flink.table.planner.codegen.Indenter.toISC
import org.apache.flink.table.planner.codegen.{CodeGenUtils, CodeGeneratorContext, ExprCodeGenerator, GenerateUtils}
import org.apache.flink.table.runtime.generated.{GeneratedRecordComparator, RecordComparator}
import org.apache.flink.table.types.logical.{BigIntType, IntType, LogicalType, LogicalTypeRoot, RowType}
import org.apache.calcite.avatica.util.DateTimeUtils
import org.apache.calcite.rex.{RexInputRef, RexWindowBound}
import org.apache.calcite.sql.fun.SqlStdOperatorTable.{GREATER_THAN, GREATER_THAN_OR_EQUAL, MINUS}
import org.apache.calcite.tools.RelBuilder
import java.math.BigDecimal
/**
* A code generator for generating [[RecordComparator]] on the [[RexWindowBound]] based range
* over window.
*
* @param inType type of the input
* @param bound the bound value for the window, its type may be Long or BigDecimal.
* @param key key position describe which fields are keys in what order
* @param keyType type for the key field.
* @param keyOrder sort order for the key field.
* @param isLowerBound the RexWindowBound is lower or not.
*/
class RangeBoundComparatorCodeGenerator(
relBuilder: RelBuilder,
config: TableConfig,
inType: RowType,
bound: Any,
key: Int = -1,
keyType: LogicalType = null,
keyOrder: Boolean = true,
isLowerBound: Boolean = true) {
def generateBoundComparator(name: String): GeneratedRecordComparator = {
val className = newName(name)
val input = CodeGenUtils.DEFAULT_INPUT1_TERM
val current = CodeGenUtils.DEFAULT_INPUT2_TERM
val ctx = CodeGeneratorContext(config)
val inputExpr = GenerateUtils.generateFieldAccess(ctx, inType, inputTerm = input, key)
val currentExpr = GenerateUtils.generateFieldAccess(ctx, inType, inputTerm = current, key)
// See RangeSlidingOverFrame:
// return -1 with lower bound will be eliminate
// return 1 with higher bound will be eliminate
// Except the null value from the window frame unless the last value is not null.
val oneIsNull = if (isLowerBound) "return -1;" else "return 1;"
def boundCompareZero: Int = {
bound match {
case bg: BigDecimal => bg.compareTo(BigDecimal.ZERO)
case _ => bound.asInstanceOf[Long].compareTo(0)
}
}
val allIsNull = if (isLowerBound) {
//put the null value into the window frame if the last value is null and the lower bound
// not more than 0.
if (boundCompareZero <= 0) "return 1;" else "return -1;"
} else {
//put the null value into the window frame if the last value is null and the upper bound
//not less than 0.
if (boundCompareZero >= 0) "return -1;" else "return 1;"
}
val comparatorCode =
j"""
${ctx.reuseLocalVariableCode()}
${inputExpr.code}
${currentExpr.code}
if (${inputExpr.nullTerm} && ${currentExpr.nullTerm}) {
$allIsNull
} else if (${inputExpr.nullTerm} || ${currentExpr.nullTerm}) {
$oneIsNull
} else {
${getComparatorCode(inputExpr.resultTerm, {currentExpr.resultTerm})}
}
""".stripMargin
val code =
j"""
public class $className implements ${classOf[RecordComparator].getCanonicalName} {
private final Object[] references;
${ctx.reuseMemberCode()}
public $className(Object[] references) {
this.references = references;
${ctx.reuseInitCode()}
${ctx.reuseOpenCode()}
}
@Override
public int compare($BASE_ROW $input, $BASE_ROW $current) {
${comparatorCode.mkString}
}
}
""".stripMargin
new GeneratedRecordComparator(className, code, ctx.references.toArray)
}
private def getComparatorCode(inputValue: String, currentValue: String): String = {
val (realBoundValue, realKeyType) = keyType.getTypeRoot match {
case LogicalTypeRoot.DATE =>
//The constant about time is expressed based millisecond unit in calcite, but
//the field about date is expressed based day unit. So here should keep the same unit for
// comparator.
(bound.asInstanceOf[Long] / DateTimeUtils.MILLIS_PER_DAY, new IntType())
case LogicalTypeRoot.TIME_WITHOUT_TIME_ZONE => (bound, new IntType())
case LogicalTypeRoot.TIMESTAMP_WITHOUT_TIME_ZONE => (bound, new BigIntType())
case _ => (bound, keyType)
}
val typeFactory = relBuilder.getTypeFactory.asInstanceOf[FlinkTypeFactory]
val relKeyType = typeFactory.createFieldTypeFromLogicalType(realKeyType)
//minus between inputValue and currentValue
val ctx = CodeGeneratorContext(config)
val exprCodeGenerator = new ExprCodeGenerator(ctx, false)
val minusCall = if (keyOrder) {
relBuilder.call(
MINUS, new RexInputRef(0, relKeyType), new RexInputRef(1, relKeyType))
} else {
relBuilder.call(
MINUS, new RexInputRef(1, relKeyType), new RexInputRef(0, relKeyType))
}
exprCodeGenerator.bindInput(realKeyType, inputValue).bindSecondInput(realKeyType, currentValue)
val literal = relBuilder.literal(realBoundValue)
// In order to avoid the loss of precision in long cast to int.
val comCall = if (isLowerBound) {
relBuilder.call(GREATER_THAN_OR_EQUAL, minusCall, literal)
} else {
relBuilder.call(GREATER_THAN, minusCall, literal)
}
val comExpr = exprCodeGenerator.generateExpression(comCall)
j"""
${ctx.reuseMemberCode()}
${ctx.reuseLocalVariableCode()}
${ctx.reuseInputUnboxingCode()}
${comExpr.code}
if (${comExpr.resultTerm}) {
return 1;
} else {
return -1;
}
""".stripMargin
}
}
| fhueske/flink | flink-table/flink-table-planner-blink/src/main/scala/org/apache/flink/table/planner/codegen/over/RangeBoundComparatorCodeGenerator.scala | Scala | apache-2.0 | 6,835 |
package com.ubeeko.htalk.tests
import org.scalatest._
import com.ubeeko.htalk.criteria._
import scala.concurrent.duration._
import java.util.Date
@Ignore
class TestTimeFilterSpec extends FlatSpec with Matchers {
"TimeFilter" should "filter with exact time" in {
val now = new Date().getTime()
val precise = "user" get "one" at (now)
val hGet = precise hBaseGet
assertResult(now)(hGet.getTimeRange().getMin())
}
it should "filter after a certain time" in {
val precise = "user" get "one" after (new Date().getTime())
}
it should "filter before a certain time" in {
val precise = "user" get "one" before (new Date().getTime())
}
it should "filter between to dates" in {
val precise = "user" get "one" between (new Date().getTime(), new Date().getTime())
}
it should "filter the lase two days" in {
val precise = "user" get "one" last (2.days)
}
it should "filter today only" in {
val precise = "user" get "one" today
}
} | eric-leblouch/htalk | src/test/scala/com/ubeeko/htalk/tests/TestTimeFilter.scala | Scala | apache-2.0 | 989 |
object Solution {
def main(args: Array[String]) {
val t = readLine.toInt
for (_ <- 1 to t) {
val n = readLine.toInt
println(1L * n / 2 * (n - n / 2))
}
}
}
| advancedxy/hackerrank | algorithms/warmup/HalloweenParty.scala | Scala | mit | 185 |
package com.larry.da.jobs.idmap
/**
* Created by larry on 19/10/15.
*/
/************************************************
* id type is :
* case "aguid" => "0"
* case "agsid" => "1"
* case "adx" => "2"
* case "baidu" => "3"
* case "tanx" => "4"
* case "agfid" => "5"
* *********************************************/
import java.text.SimpleDateFormat
import java.util.Calendar
import com.google.common.hash.Hashing.md5
import org.apache.hadoop.io.compress.GzipCodec
import org.apache.spark.graphx._
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}
import scala.collection.mutable.ArrayBuffer
/*
import com.larry.da.jobs.idmap.Person
import com.larry.da.jobs.idmap.Config
import com.larry.da.jobs.idmap.Utils
*/
object IdMapHistory_test {
var sc:SparkContext = _
var parallelism = 60 // sc.defaultParallelism
var graphParallelism = 20 // sc.defaultParallelism
val partitionCount = Config.partitionCount
val historyPath = "/user/dauser/aguid/history"
val hbasePath = "aguid/hbase"
val idmapPath = "aguid/idmap"
val hourlyPath = "aguid/hourly"
val hbaseOutPath = "/user/dauser/aguid/hbase_output"
val verticesLimit = 50
val logtimeFormat=new SimpleDateFormat("yyyy-MM-dd-HH");
var mergeData:RDD[Person] = _
var uidChange : RDD[(Long,(Long,Int))] = _
var hourRdds : Array[RDD[Person]] = _
def reduceHourData(data: RDD[Person]) = {
data.map(p=>{
((p.uid,p.idType,p.time/10),p)
}).aggregateByKey( new ArrayBuffer[Person])(
(arr,v) => arr += v,
(arr1,arr2) => arr1 ++= arr2
).map(x=>{
val ((uid, idType,m10), list) =x
((uid,idType),(m10,list.sortWith((a,b)=>if(a.time != b.time) a.time > b.time else a.cid > b.cid).take(verticesLimit)))
}).aggregateByKey(new ArrayBuffer[(Int,ArrayBuffer[Person])])(
(arr,v) => arr += v,
(arr1,arr2) => arr1 ++= arr2
).flatMap(x=>{
val ((uid,idType),list) =x;
val res = list.sortWith((a,b)=>a._1 > b._1).map(_._2)
res.tail.foldLeft(res.head)((a,b)=>if(a.length < verticesLimit) a ++= b else a).take(verticesLimit)
})
}
def getIndexHis(day:String,lastDay:String)={
val hours = day.split(",")
if(lastDay.split("-").last == "23"){
val day = hours.head.take(10)
val path = s"$hbaseOutPath/$day" //***
// val path = "/user/dauser/aguid/hbase_output/2016-01-09/part-m-0000[0-5]"
sc.textFile(path).map(_.split("\t")).map(x=>{
val Array(uid,cid,idType,time,num) =x;
new Person(md5.hashString(cid,Config.chaset_utf8).asLong(),Utils.unCompressAguid(uid),cid,idType.toInt,time.toInt,num.toInt)
}).map(p=>(p.cidL,p))
} else {
val path = s"$historyPath/$lastDay"
// sc.textFile(path).map(_.split("\t")).map(x => { val Array(guidL,uidL,guid,idType,time,num) =x; (guidL.toLong, (uidL.toLong,guid,idType,time.toInt,num.toInt)) })
sc.textFile(path).map(Person(_)).map(p=>(p.cidL,p))
}
}
def hbaseAddFormat(rdd:RDD[Person])={
rdd.map(p => {
((p.uid, p.idType), p)
}).aggregateByKey(new ArrayBuffer[Person], parallelism)(
(arr, v) => arr += v,
(arr1, arr2) => arr1 ++= arr2
).map(x => {
val ((uid, idType), list) = x;
(uid, (idType, list.sortWith((a, b) => a.time > b.time).map(p => Array(p.cid, p.time, p.num).mkString("|")).mkString(","), list.length))
}).aggregateByKey(new ArrayBuffer[(Int, String, Int)])(
(arr, v) => arr += v,
(arr1, arr2) => arr1 ++= arr2
).map(x => {
val (uid, list) = x;
val idsText = ArrayBuffer("", "", "", "", "")
var vertexCount = 0;
list.foreach(p => {
val (ix, ids, vc) = p;
idsText(ix.toInt - 1) = ids;
vertexCount += vc
})
val uidS = Utils.compressAguid(uid)
Array(uidS, idsText.mkString("\t"), vertexCount).mkString("\t")
})
}
def setOldestUid(rdd:RDD[(Long,(Long,Int))])= {
rdd.map(x => {
val (u1, (u2, time)) = x
(u2, (u1, time))
}).aggregateByKey(new ArrayBuffer[(Long, Int)])(
(arr, v) => arr += v,
(arr1, arr2) => arr1 ++= arr2
).flatMap(x => {
val (uid, list) = x;
var oldest = list.head
list.foreach(p => if (p._2 < oldest._2) oldest = p else if (p._2 == oldest._2 && p._1 < oldest._1) oldest = p)
val v2 = oldest._1
list.map(p => {
val (v1, time) = p;
(v1, (v2, time))
})
})
}
/** **********************************************
* compute hour's data
***********************************************/
def compute(hourList:String, historyHour:String): Unit = {
val hours = hourList.split(",")
val (hourSave,limitDown) = (hours.last, ( logtimeFormat.parse( hours.head ).getTime /60000 ).toInt )
hourRdds = hours.map(hour => sc.textFile(s"${hourlyPath}/$hour").map(Person(_)))
val hourAllIndex = sc.union(hourRdds).repartition(parallelism * 3)
//reduce hour count to verticesLimit
val indexHourReduce = reduceHourData(hourAllIndex).map(p=>(p.cidL,p))
//from history index file
val indexHis = getIndexHis(hourList, historyHour)
mergeData = indexHis.union(indexHourReduce).reduceByKey((a, b) => a.merge(b, true), Config.partitionCount).map(_._2) //***
val uidList = mergeData.filter(p => p.uidSet != null && p.uidSet.size > 1).map(_.uidSet.toArray).repartition(parallelism)
val vertices = uidList.flatMap(x => x).reduceByKey((a, b) => if (a < b) a else b)
val edges = uidList.flatMap(li => { val v1 = li.head._1; li.tail.map(v2 => new Edge(v1, v2._1, null)); })
val uidChangePre = Graph.fromEdges(edges, null).connectedComponents().vertices
//find and set oldest uid every group
uidChange = setOldestUid(uidChangePre.join(vertices)).cache()
}
def save(hourList:String): Unit = {
val hours = hourList.split(",")
val (hourSave,limitDown) = (hours.last, ( logtimeFormat.parse( hours.head ).getTime /60000 ).toInt )
val limitUp = limitDown + 480;
val uidChangeDic = sc.broadcast(uidChange.map(x => { val (v1, (v2, time)) = x; (v1, v2) }).filter(x => x._1 != x._2).collectAsMap())
val indexNew = mergeData.map(p => { p.uid = uidChangeDic.value.getOrElse(p.uid, p.uid); p.uidSet = null; p })
//----------uid to delete in hbase---------
val oldUidChange = uidChange.filter(x => x._2._2 < limitDown).map(x => { val (v1, (v2, time)) = x; (v1, v2); }).filter(x => x._1 != x._2);
oldUidChange.map(x => Array(Utils.compressAguid(x._1), Utils.compressAguid(x._2)).mkString("\t")).saveAsTextFile(s"${hbasePath}/$hourSave/verticesDel")
//----------hbase add ---------
val hbaseAdd = indexNew.filter(p => limitDown <= p.time && p.time < limitUp).cache() //.map(p=>{if(p.time > limitUp) p.time = limitDown; p})
hbaseAddFormat(hbaseAdd).coalesce(parallelism).saveAsTextFile(s"${hbasePath}/$hourSave/verticesAdd")
//----------test---------
uidChange.map(x => { val (v1, (v2, time)) = x; Array(Utils.compressAguid(v1),Utils.compressAguid(v2),time).mkString("\t") }).saveAsTextFile(s"${hbasePath}/$hourSave/uidChange")
val add = hbaseAdd.map(p=>(p.uid,p));
uidChange.filter(x=>x._1 != x._2._1).join(add).saveAsTextFile(s"${hbasePath}/$hourSave/uidChangeJoinAdd")
//----------save idmap hourly ---------
hours zip hourRdds map (pair => {
val (hour, rdd) = pair;
val res = rdd.coalesce(parallelism, true).map(p => { p.uid = uidChangeDic.value.getOrElse(p.uid,p.uid); p }).cache()
res.filter(p => p.idType == 1).map(p => Array(p.cid, Utils.compressAguid(p.uid), p.time).mkString("\t")).coalesce(parallelism, true).saveAsTextFile(s"${idmapPath}/$hour/agsid", classOf[GzipCodec]);
res.filter(p => p.idType != 1 && p.idType != 5).map(p => Array(p.cid, Utils.compressAguid(p.uid), p.idType, p.time).mkString("\t")).coalesce(parallelism, true).saveAsTextFile(s"${idmapPath}/$hour/channelid", classOf[GzipCodec])
})
//----------save index---------
if (!hourSave.endsWith("23")) {
// indexNew.map(_.toString).saveAsTextFile(s"${historyPath}/$hourSave", classOf[GzipCodec])
}
}
/** **********************************************
* main
***********************************************/
def main(args: Array[String]): Unit = {
// sc.stop
val conf = new SparkConf().setAppName("Idmap-history")
conf.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
.set("spark.kryoserializer.buffer.mb","128")
// conf.set("spark.kryo.registrationRequired", "true")
conf.registerKryoClasses(Array(
classOf[com.larry.da.jobs.idmap.Person],
classOf[com.larry.da.jobs.userdigest.UserMapping],//userdigest
classOf[scala.collection.mutable.WrappedArray.ofRef[_]]
))
sc = new SparkContext(conf)
// val args = "2016-01-18-00 - 8 1 150 11".split(" ")
1 to args.length zip args foreach(println)
val Array(current,lastHistoryArg,hourSpanStr,runCountStr,parallelismArg,methods) = args
parallelism = parallelismArg.toInt
graphParallelism = parallelism/3
val hourSpan = hourSpanStr.toInt
val runCount = runCountStr.toInt
var lastHistory = if( lastHistoryArg.length < 2) "" else lastHistoryArg
val sdf=new SimpleDateFormat("yyyy-MM-dd-HH");
val runHour= Calendar.getInstance()
println("-----------------------")
println(parallelism)
println(graphParallelism)
println(partitionCount)
println(historyPath)
println(hbasePath)
println(idmapPath)
println(hourlyPath)
println(hbaseOutPath)
println(methods)
println("-----------------------")
0 until runCount foreach(span => {
val hourList = new ArrayBuffer[String]
runHour.setTime(sdf.parse(current))
runHour.add(Calendar.HOUR,span*hourSpan)
0 until hourSpan foreach(n=>{
hourList.append(sdf.format(runHour.getTime))
runHour.add(Calendar.HOUR,1)
})
val lastHourString = if(lastHistory != "") {val tmp = lastHistory;lastHistory= ""; tmp} else {runHour.add(Calendar.HOUR,-1 - hourSpan); sdf.format( runHour.getTime)}
val hourString = hourList.mkString(",")
if(methods(0) == '1') com.larry.da.jobs.idmap.IdMapHourly_test.computeHourList(sc,parallelism,hourList)
if(methods(1) == '1') compute(hourString,lastHourString);save(hourString)
val endHourOfList = hourList.last
println("---------------------------------")
println(hourString + ";" + lastHourString)
println("endHourOfList is : " + endHourOfList)
println("---------------------------------")
if(endHourOfList.endsWith("23")){
if(methods(1) == '1') com.larry.da.jobs.userdigest.ChannelIdMerge.mergeIdMap(sc,endHourOfList.take(10))
}
})
}
}
| larry88/spark_da | src/main/scala/com/larry/da/jobs/idmap/IdMapHistory_test.scala | Scala | gpl-2.0 | 10,764 |
package com.arcusys.learn.models.request
import com.arcusys.learn.liferay.permission.PermissionCredentials
import org.scalatra.ScalatraServlet
import com.arcusys.learn.service.util.{ AntiSamyHelper, Parameter }
import scala.util.Try
object CategoryRequest extends BaseRequest {
val NewCourseId = "newCourseID"
val CategoryId = "categoryId"
val CategoryIds = "categoryIDs"
val Id = "id"
val ParentId = "parentId"
val Categories = "categories"
val Questions = "questions"
val Title = "title"
val Description = "description"
val Index = "index"
val DndMode = "dndMode"
val TargetId = "targetId"
val ItemType = "itemType"
def apply(controller: ScalatraServlet) = new Model(controller)
class Model(controller: ScalatraServlet) {
implicit val _controller = controller
def action = {
Parameter(Action).option match {
case Some(value) => CategoryActionType.withName(value.toUpperCase)
case None => None
}
}
def permissionCredentials = PermissionCredentials(Parameter(CourseId).longRequired, Parameter(PortletId).required, Parameter(PrimaryKey).required)
def itemType = Parameter(ItemType).required
def categoryId = Parameter(CategoryId).intOption
def categoryIds = Parameter(CategoryIds).multiWithEmpty.map(x => Try(x.toInt).get)
def courseId = Parameter(CourseId).intOption
def newCourseId = Parameter(NewCourseId).intOption
def parentId = Parameter(ParentId).intOption
def id = Parameter(Id).intRequired
def categoryIdSet = Parameter(Categories).multiRequired.map(x => x.toInt)
def questionsIdSet = Parameter(Questions).multiRequired.map(x => x.toInt)
def title = AntiSamyHelper.sanitize(Parameter(Title).required)
def description = Parameter(Description).option match {
case Some(value) => AntiSamyHelper.sanitize(value)
case None => ""
}
def index = Parameter(Index).intRequired
def targetId = Parameter(TargetId).intRequired
def dndMode = DndModeType.withName(Parameter(DndMode).required.toUpperCase)
}
}
| ViLPy/Valamis | learn-portlet/src/main/scala/com/arcusys/learn/models/request/CategoryRequest.scala | Scala | lgpl-3.0 | 2,088 |
//
// Rwait.scala -- Scala objects Rwait and Rclock, and class Rtime
// Project OrcScala
//
// $Id: Rclock.scala 2933 2011-12-15 16:26:02Z jthywissen $
//
// Created by dkitchin on Jan 13, 2011.
//
// Copyright (c) 2011 The University of Texas at Austin. All rights reserved.
//
// Use and redistribution of this file is governed by the license terms in
// the LICENSE file found in the project's top-level directory and also found at
// URL: http://orc.csres.utexas.edu/license.shtml .
//
package orc.lib.time
import scala.math.BigInt.int2bigInt
import orc.Handle
import orc.error.runtime.ArgumentTypeMismatchException
import orc.run.extensions.RwaitEvent
import orc.types.{ SimpleFunctionType, SignalType, RecordType, IntegerType }
import orc.values.sites.{ TypedSite, TotalSite0, Site1 }
import orc.values.OrcRecord
/**
*/
object Rclock extends TotalSite0 with TypedSite {
def eval() = {
new OrcRecord(
"time" -> new Rtime(System.currentTimeMillis()),
"wait" -> Rwait)
}
def orcType =
SimpleFunctionType(
new RecordType(
"time" -> SimpleFunctionType(IntegerType),
"wait" -> SimpleFunctionType(IntegerType, SignalType)))
}
/**
*/
class Rtime(startTime: Long) extends TotalSite0 {
def eval() = (System.currentTimeMillis() - startTime).asInstanceOf[AnyRef]
}
/**
*/
object Rwait extends Site1 {
def call(a: AnyRef, h: Handle) {
a match {
case delay: BigInt => {
if (delay > 0) {
h.notifyOrc(RwaitEvent(delay, h))
} else if (delay == 0) {
h.publish()
} else {
h.halt
}
}
case _ => throw new ArgumentTypeMismatchException(0, "Integer", if (a != null) a.getClass().toString() else "null")
}
}
}
| laurenyew/cOrcS | src/orc/lib/time/Rclock.scala | Scala | bsd-3-clause | 1,752 |
package io.dylemma.frp.test
import org.scalatest._
import io.dylemma.frp._
class EventStream_flatMap extends FunSuite with TestHelpers with Observer {
test("EventStream.flatMap basic functionality") {
val s = EventSource[Int]()
val t = EventSource[Int]()
val x = for {
i <- s
j <- t
} yield i -> j
val results = accumulateEvents(x)
s fire 1
s fire 2
t fire 1 //yield 2->1
t fire 2 //yield 2->2
s fire 3
t fire 4 //yield 3->4
assert(results.toList == List(2 -> 1, 2 -> 2, 3 -> 4))
}
test("EventStream.flatMap over multiple layers") {
val s = EventSource[Int]()
val t = EventSource[Int]()
val u = EventSource[Int]()
val x = for {
i <- s
j <- t
k <- u
} yield (i, j, k)
val results = accumulateEvents(x)
t fire 1 //ignore
u fire 1 //ignore
s fire 1 //(now waiting for t)
u fire 2 //ignore
t fire 2 //(now waiting for u)
u fire 3 // fire (1, 2, 3)
u fire 4 // fire (1, 2, 4)
s fire 2 //(now waiting for t)
u fire 5 //ignore
t fire 3 //(now waiting for u)
u fire 6 // fire (2, 3, 6)
assert(results.toList == (1, 2, 3) :: (1, 2, 4) :: (2, 3, 6) :: Nil)
}
test("EventStream.flatMap encounters nothing when the second stream fires nothing") {
val s = EventSource[Int]()
val t = EventSource[Int]()
val x = for {
i <- s
j <- t
} yield i -> j
val results = accumulateEvents(x)
s fire 1
s fire 2
s fire 3
s fire 4
assert(results.toList == Nil)
}
test("EventStream.flatMap does not end if the mapped stream ends") {
val s = EventSource[Int]()
val t = EventSource[Int]()
val x = for {
i <- s
j <- t
} yield i -> j
val stopped = awaitStop(x)
val results = accumulateEvents(x)
s fire 1
t fire 2
t.stop()
assert(!stopped())
assert(results.toList == List(1 -> 2))
}
test("EventStream.flatMap ends when the base stream ends") {
val s = EventSource[Int]()
val t = EventSource[Int]()
val x = for {
i <- s
j <- t
} yield i -> j
val stopped = awaitStop(x)
val results = accumulateEvents(x)
s fire 1
t fire 2
s.stop()
assert(stopped())
assert(results.toList == List(1 -> 2))
}
test("EventStream.flatMap on a stopped stream results in a stopped stream") {
val s = EventSource[Int]()
val t = EventSource[Int]()
s.stop()
val x = for {
i <- s
j <- t
} yield i -> j
assert(x.stopped)
}
} | dylemma/scala.frp | src/test/scala/io/dylemma/frp/test/EventStream_flatMap.scala | Scala | mit | 2,498 |
package topicvizz.core
import org.apache.pdfbox.pdmodel.PDDocument
import org.apache.pdfbox.util.PDFTextStripper
import java.net.URL
import java.io._
import java.util.Scanner
import java.util.regex.Pattern
import java.text.SimpleDateFormat
import topicvizz.common.PDFFilenameFilter
import topicvizz.common.tagging.Tag
import topicvizz.common.tagging.Tag
/**
*
*/
class TopicVizz_Parser {
private var oFileList: List[TopicVizz_File] = List()
private var oTopicMap: Map[String, TopicVizz_Topic] = Map()
private var oAuthorMap: Map[String, TopicVizz_Author] = Map()
/**
*
* @param sPath
*/
def parseFile(sPath: String) {
try {
// Load document
val pdfFile = new File(sPath)
// Parse meta-information
val tempType = matchString("(vortrag|beitrag)_.*.pdf", pdfFile.getName, "Undefined", 1)
val tempDate = matchString("\\\\d\\\\d\\\\d\\\\d-\\\\d\\\\d-\\\\d\\\\d", pdfFile.getName, "YYYY-MM-DD", 0)
val tempAuthorName = matchString("vortrag_vd-ak.*\\\\d\\\\d\\\\d\\\\d-\\\\d\\\\d-\\\\d\\\\d_(.*).pdf", pdfFile.getName, "Unknown", 1)
var tempAuthor: TopicVizz_Author = null
if (!oAuthorMap.contains(tempAuthorName)) {
tempAuthor = new TopicVizz_Author(tempAuthorName)
oAuthorMap += tempAuthorName.toUpperCase -> tempAuthor
} else {
tempAuthor = oAuthorMap.get(tempAuthorName).get
}
// Load pdf document
val pdDocument = PDDocument.load(pdfFile)
val bEncrypted = pdDocument.isEncrypted
if (!bEncrypted) {
// Parse text
val tempStripper = new PDFTextStripper()
val tempText = tempStripper.getText(pdDocument)
val tempFile = new TopicVizz_File(pdfFile.getName(), tempType, tempText, tempAuthor, tempDate)
tempAuthor.addFile(tempFile)
oFileList = oFileList.+:(tempFile)
// Annotate text
println("Annotating...")
val tempAText = annotate(tempText)
if (tempAText.indexOf("<Resources>") > -1) {
val tempACText = tempAText.substring(tempAText.indexOf("<Resources>"), tempAText.indexOf("</Resources>") + "</Resources>".length())
// Tag text
val tempTText = tag(tempACText)
// Mapping Topic -> Files
for (topic ← tempTText) {
if (!oTopicMap.contains(topic.name.toUpperCase)) {
val tempAbstract = getAbstract(topic.name, topic.value.toString)
val tempTopic = new TopicVizz_Topic(topic.name, tempAbstract, topic.value)
tempAuthor.addTopic(tempTopic)
tempTopic.addAuthor(tempAuthor)
tempTopic.addFile(tempFile)
tempTopic.addYearCount(tempDate.substring(0, 4))
oTopicMap += topic.name.toUpperCase -> tempTopic
} else {
val tempTopic = oTopicMap.get(topic.name.toUpperCase).get
if (!tempTopic.containsFile(tempFile)) {
tempTopic.addFile(tempFile)
}
if (!tempTopic.containsAuthor(tempAuthor)) {
tempTopic.addAuthor(tempAuthor)
}
if (!tempAuthor.containsTopic(tempTopic)) {
tempAuthor.addTopic(tempTopic)
}
tempTopic.addYearCount(tempDate.substring(0, 4))
}
}
}
}
} catch {
case e: Exception ⇒ e.printStackTrace()
}
}
def linkTopics() {
println("Linking...")
var count = 0
for (topic ← oTopicMap) {
count = count + 1
println(count + "/" + oTopicMap.size)
val tempAText = annotate(topic._2.getSAbstract)
if (tempAText.indexOf("<Resources>") > -1) {
val tempACText = tempAText.substring(tempAText.indexOf("<Resources>"), tempAText.indexOf("</Resources>") + "</Resources>".length())
val tempTText = tag(tempACText)
for (topic2 ← tempTText) {
if ((oTopicMap.contains(topic2.name.toUpperCase())) && (topic._2.getSTopic.toUpperCase() != topic2.name.toUpperCase())) {
val neighbour: TopicVizz_Topic = oTopicMap.apply(topic2.name.toUpperCase())
if (!topic._2.containsNeighbour(neighbour)) {
topic._2.addNeighbour(neighbour, topic2.simScore)
}
}
}
}
}
}
def createJSONFile(sPath: String) {
val file = new File(sPath)
val writer = new BufferedWriter(new OutputStreamWriter(
new FileOutputStream(sPath), "UTF-8"));
try {
println("Start writing JSON... [" + file.getPath + "]")
writer.write("{" + "\\n" +
"\\"topics\\" :" + "\\n" +
"[" + "\\n")
writer.flush()
for (topic ← oTopicMap) {
writer.write(createJSONTopicString(topic._2))
writer.flush()
}
writer.write("\\n" +
"],\\n" +
"\\"authors\\" :" + "\\n" +
"[" + "\\n")
writer.flush()
for (author ← oAuthorMap) {
writer.write(createJSONAuthorString(author._2))
writer.flush()
}
writer.write("\\n]" +
"\\n}")
writer.flush()
println("Finished writing.")
} finally {
writer.close()
}
}
def createJSONTopicString(oTopic: TopicVizz_Topic): String =
{
var json: String = ""
json +=
"{" + "\\n" +
"\\"id\\" :\\"" + oTopic.id + "\\",\\n" +
"\\"topic\\" :\\"" + oTopic.getSTopic.replace("\\"", "") + "\\",\\n" +
"\\"abstract\\" :\\"" + oTopic.getSAbstract.replace("\\"", "").replace("\\t", "") + "\\",\\n" +
"\\"files\\" : ["
for (file ← oTopic.files) {
json +=
"\\"" + file.getSFileName + "\\""
if (oTopic.files.last != file) {
json += ","
}
}
json +=
"]," + "\\n" +
"\\"mentioned_by\\" : ["
for (author ← oTopic.authors) {
json +=
"\\"" + author.id + "\\""
if (oTopic.authors.last != author) {
json += ","
}
}
json +=
"]," + "\\n" +
"\\"edges\\" : ["
for (neighbour ← oTopic.neighbours) {
json +=
"{\\"neighbour\\":" + "\\"" + neighbour._1.id + "\\"," +
"\\"weight\\":" + neighbour._2 + "}"
if (oTopic.neighbours.last != neighbour) {
json += ","
}
}
json +=
"]," + "\\n" +
"\\"frequency_per_year\\" : {"
for (year ← oTopic.yearsCount) {
json +=
"\\"" + year._1 + "\\":" + year._2
if (oTopic.yearsCount.last != year) {
json += ","
}
}
json +=
"}" + "\\n" +
"}"
if (oTopicMap.last._2 != oTopic) {
json += ",\\n"
}
json
}
def createJSONAuthorString(oAuthor: TopicVizz_Author): String =
{
var json: String = ""
json +=
"{" + "\\n" +
"\\"id\\" :\\"" + oAuthor.id + "\\",\\n" +
"\\"name\\" :\\"" + oAuthor.getSName + "\\",\\n" +
"\\"files\\" : ["
for (file ← oAuthor.files) {
json +=
"\\"" + file.getSFileName + "\\""
if (oAuthor.files.last != file) {
json += ","
}
}
json +=
"]," + "\\n" +
"\\"mentioned\\" : ["
for (topic ← oAuthor.topics) {
json +=
"\\"" + topic.id + "\\""
if (oAuthor.topics.last != topic) {
json += ","
}
}
json +=
"]\\n" +
"}"
if (oAuthorMap.last._2 != oAuthor) {
json += ","
}
json
}
/**
*
* @param sPath
*/
def parseDirectory(sPath: String) {
try {
val pdfDir = new File(sPath)
val filter = new PDFFilenameFilter()
var count = 0
for (inputFile ← pdfDir.listFiles(filter)) {
count = count + 1
parseFile(inputFile.getPath)
println(count + "\\\\" + pdfDir.listFiles(filter).length + " [" + inputFile.getName + "]")
}
} catch {
case e: Exception ⇒ println(e.getMessage)
}
}
/**
*
* @param plainText
* @return
*/
private def annotate(plainText: String): String = {
var out: OutputStreamWriter = null
var in: InputStream = null
var Decoder: Scanner = null
try {
try {
val urlEncoded = java.net.URLEncoder.encode(plainText, "UTF-8")
val url = new java.net.URL("http://de.dbpedia.org/spotlight/rest/annotate")
//val url = new java.net.URL("http://spotlight.dbpedia.org/rest/annotate")
val data = "text=" + urlEncoded + "&support=250&confidence=0.2"
val conn = url.openConnection()
conn.setRequestProperty("Accept", "text/xml")
conn.setDoOutput(true)
out = new java.io.OutputStreamWriter(conn.getOutputStream)
out.write(data)
out.flush()
in = conn.getInputStream
Decoder = new Scanner(in, "UTF-8")
var decodedText = ""
if (Decoder.hasNext) {
decodedText = Decoder.useDelimiter("\\\\A").next()
}
decodedText
} finally {
out.close()
Decoder.close()
in.close()
}
} catch {
case e: NullPointerException ⇒ "Annotation failed."
}
}
def getAbstract(sTopic: String, sURL: String): String = {
var out: OutputStreamWriter = null
var in: InputStream = null
var Decoder: Scanner = null
val query = "SELECT ?abstract FROM NAMED <http://dbpedia.org> WHERE " +
"{{ <" + sURL + "> " +
"<http://dbpedia.org/ontology/abstract> ?abstract. FILTER (LANG(?abstract)='de')}}"
try {
val urlEncoded = java.net.URLEncoder.encode(query, "UTF-8")
val url = new java.net.URL("http://de.dbpedia.org/sparql?default-graph-uri=http%3A%2F%2Fde.dbpedia.org")
val data = "query=" + urlEncoded
val conn = url.openConnection()
conn.setDoOutput(true)
out = new java.io.OutputStreamWriter(conn.getOutputStream)
out.write(data)
out.flush()
in = conn.getInputStream
Decoder = new Scanner(in, "UTF-8")
val decodedText = Decoder.useDelimiter("\\\\A").next()
decodedText.substring(decodedText.indexOf("<td>") + "</td>".length(), decodedText.indexOf("</td>") - 4)
} catch {
case e: Exception ⇒ e.getMessage()
} finally {
out.close()
Decoder.close()
in.close()
}
}
def tag(annotatedText: String): Set[Tag] = {
import scala.xml.XML._
val x = load(new StringReader(annotatedText))
(for (resource ← x \\ "Resource") yield Tag(resource \\ "@surfaceForm" text, new URL(resource \\ "@URI" text), (resource \\ "@similarityScore" text).toDouble))toSet
}
def matchString(sPattern: String, sString: String, sDefaultResult: String, iGroup: Integer): String = {
try {
val oPattern = Pattern.compile(sPattern)
val oMatcher = oPattern.matcher(sString)
if (oMatcher.find()) {
oMatcher.group(iGroup)
} else {
sDefaultResult
}
} catch {
case e: Exception ⇒
println(e.getMessage)
sDefaultResult
}
}
}
| fh-koeln/topicvizz | src/main/scala/topicvizz/core/TopicVizz_Parser.scala | Scala | apache-2.0 | 10,942 |
import spark.SparkContext
import spark.SparkContext._
import java.util.regex._
object SimpleJob {
def tagIn = { line: String =>
val regex = Pattern.compile(".*#([^#]+)#.+")
val m = regex.matcher(line)
if(m.find()) m.group(1) else ""
}
def main(args: Array[String]) = {
val logFile = "src/main/resources/public-timeline.txt"
//val logFile = "src/main/resources/sample-timeline.txt"
val sc = new SparkContext("local[8]", "Simple Job", "/Users/twer/sdks/spark-0.7.3",
List("target/scala-2.9.2/spark_sample_2.9.2-0.1.jar"))
val lines = sc.textFile(logFile).cache
val tags = lines.map(tagIn(_)).filter(! _.isEmpty).map(tag => (tag, 1)).reduceByKey(_ + _)
tags.saveAsTextFile("weibo-tags")
System.exit(0)
}
}
| nicholasren/weibo-data-analyst | src/main/scala/sample.scala | Scala | mit | 772 |
package assigner.search
import assigner.model._
import assigner.search.moves._
import org.coinor.opents._
/** Generator of all possible moves from a given assignment. */
class Manager(course: Course) extends MoveManager {
private val groups = course.groupMap
private val students = course.studentMap
def getAllMoves(solution: Solution) = solution match {
case assignment: Assignment =>
val swaps = for {
Seq(g1, g2) <- assignment.groupMap.keys.toSeq combinations 2
s1 <- assignment studentsIn g1
s2 <- assignment studentsIn g2
if !(g1.isQueue && students(s2).mandatory)
if !(g2.isQueue && students(s1).mandatory)
} yield Swap(s1, s2)
val switches = for {
(g1, ss1) <- assignment.groupMap.iterator
(g2, ss2) <- assignment.groupMap.iterator
if g1 != g2 && !g2.isQueue
if g1.isQueue || ss1.size > groups(g1).minSize
if ss2.size >= groups(g2).minSize
if ss2.size < groups(g2).maxSize
s <- ss1
} yield Switch(s, g2)
val refills = for {
(g, ss) <- assignment.groupMap.iterator
if !g.isQueue && ss.isEmpty
size = groups(g).minSize
if size > 0
queue = assignment.queue
if size <= queue.size
selection <- queue subsets size
} yield FillGroup(g, selection)
val drops = if (course.dropGroups) for {
g <- assignment.trueGroups.keys.iterator
if !groups(g).mandatory
if assignment studentsIn g forall { !students(_).mandatory }
} yield DropGroup(g)
else Iterator.empty
(swaps ++ switches ++ refills ++ drops).toArray
}
} | joroKr21/IoS-Algorithm | src/main/scala/assigner/search/Manager.scala | Scala | mit | 1,694 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive.execution
import java.io.File
import com.google.common.io.Files
import org.apache.hadoop.fs.{FileContext, FsConstants, Path}
import org.apache.spark.sql.{AnalysisException, QueryTest, Row, SaveMode}
import org.apache.spark.sql.catalyst.TableIdentifier
import org.apache.spark.sql.catalyst.analysis.NoSuchTableException
import org.apache.spark.sql.catalyst.catalog.{CatalogStorageFormat, CatalogTable, CatalogTableType}
import org.apache.spark.sql.execution.command.LoadDataCommand
import org.apache.spark.sql.hive.test.TestHiveSingleton
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.test.SQLTestUtils
import org.apache.spark.sql.types.StructType
class HiveCommandSuite extends QueryTest with SQLTestUtils with TestHiveSingleton {
import testImplicits._
protected override def beforeAll(): Unit = {
super.beforeAll()
// Use catalog to create table instead of SQL string here, because we don't support specifying
// table properties for data source table with SQL API now.
hiveContext.sessionState.catalog.createTable(
CatalogTable(
identifier = TableIdentifier("parquet_tab1"),
tableType = CatalogTableType.MANAGED,
storage = CatalogStorageFormat.empty,
schema = new StructType().add("c1", "int").add("c2", "string"),
provider = Some("parquet"),
properties = Map("my_key1" -> "v1")
),
ignoreIfExists = false
)
sql(
"""
|CREATE TABLE parquet_tab2 (c1 INT, c2 STRING)
|STORED AS PARQUET
|TBLPROPERTIES('prop1Key'="prop1Val", '`prop2Key`'="prop2Val")
""".stripMargin)
sql("CREATE TABLE parquet_tab3(col1 int, `col 2` int) USING hive")
sql("CREATE TABLE parquet_tab4 (price int, qty int) partitioned by (year int, month int)")
sql("INSERT INTO parquet_tab4 PARTITION(year = 2015, month = 1) SELECT 1, 1")
sql("INSERT INTO parquet_tab4 PARTITION(year = 2015, month = 2) SELECT 2, 2")
sql("INSERT INTO parquet_tab4 PARTITION(year = 2016, month = 2) SELECT 3, 3")
sql("INSERT INTO parquet_tab4 PARTITION(year = 2016, month = 3) SELECT 3, 3")
sql(
"""
|CREATE TABLE parquet_tab5 (price int, qty int)
|PARTITIONED BY (year int, month int, hour int, minute int, sec int, extra int)
""".stripMargin)
sql(
"""
|INSERT INTO parquet_tab5
|PARTITION(year = 2016, month = 3, hour = 10, minute = 10, sec = 10, extra = 1) SELECT 3, 3
""".stripMargin)
sql(
"""
|INSERT INTO parquet_tab5
|PARTITION(year = 2016, month = 4, hour = 10, minute = 10, sec = 10, extra = 1) SELECT 3, 3
""".stripMargin)
sql("CREATE VIEW parquet_view1 as select * from parquet_tab4")
}
override protected def afterAll(): Unit = {
try {
sql("DROP TABLE IF EXISTS parquet_tab1")
sql("DROP TABLE IF EXISTS parquet_tab2")
sql("DROP TABLE IF EXISTS parquet_tab3")
sql("DROP VIEW IF EXISTS parquet_view1")
sql("DROP TABLE IF EXISTS parquet_tab4")
sql("DROP TABLE IF EXISTS parquet_tab5")
} finally {
super.afterAll()
}
}
test("show tables") {
withTable("show1a", "show2b") {
sql("CREATE TABLE show1a(c1 int)")
sql("CREATE TABLE show2b(c2 int)")
checkAnswer(
sql("SHOW TABLES IN default 'show1*'"),
Row("default", "show1a", false) :: Nil)
checkAnswer(
sql("SHOW TABLES IN default 'show1*|show2*'"),
Row("default", "show1a", false) ::
Row("default", "show2b", false) :: Nil)
checkAnswer(
sql("SHOW TABLES 'show1*|show2*'"),
Row("default", "show1a", false) ::
Row("default", "show2b", false) :: Nil)
assert(
sql("SHOW TABLES").count() >= 2)
assert(
sql("SHOW TABLES IN default").count() >= 2)
}
}
test("show tblproperties of data source tables - basic") {
checkAnswer(
sql("SHOW TBLPROPERTIES parquet_tab1").filter(s"key = 'my_key1'"),
Row("my_key1", "v1") :: Nil
)
checkAnswer(
sql(s"SHOW TBLPROPERTIES parquet_tab1('my_key1')"),
Row("v1") :: Nil
)
}
test("show tblproperties for datasource table - errors") {
val message = intercept[AnalysisException] {
sql("SHOW TBLPROPERTIES badtable")
}.getMessage
assert(message.contains("Table not found: badtable"))
// When key is not found, a row containing the error is returned.
checkAnswer(
sql("SHOW TBLPROPERTIES parquet_tab1('invalid.prop.key')"),
Row("Table default.parquet_tab1 does not have property: invalid.prop.key") :: Nil
)
}
test("show tblproperties for hive table") {
checkAnswer(sql("SHOW TBLPROPERTIES parquet_tab2('prop1Key')"), Row("prop1Val"))
checkAnswer(sql("SHOW TBLPROPERTIES parquet_tab2('`prop2Key`')"), Row("prop2Val"))
}
test("show tblproperties for spark temporary table - AnalysisException is thrown") {
withTempView("parquet_temp") {
sql(
"""
|CREATE TEMPORARY VIEW parquet_temp (c1 INT, c2 STRING)
|USING org.apache.spark.sql.parquet.DefaultSource
""".stripMargin)
val message = intercept[AnalysisException] {
sql("SHOW TBLPROPERTIES parquet_temp")
}.getMessage
assert(message.contains("parquet_temp is a temp view not table"))
}
}
Seq(true, false).foreach { local =>
val loadQuery = if (local) "LOAD DATA LOCAL" else "LOAD DATA"
test(loadQuery) {
testLoadData(loadQuery, local)
}
}
private def testLoadData(loadQuery: String, local: Boolean): Unit = {
// employee.dat has two columns separated by '|', the first is an int, the second is a string.
// Its content looks like:
// 16|john
// 17|robert
val testData = hiveContext.getHiveFile("data/files/employee.dat").getCanonicalFile()
/**
* Run a function with a copy of the input data file when running with non-local input. The
* semantics in this mode are that the input file is moved to the destination, so we have
* to make a copy so that subsequent tests have access to the original file.
*/
def withInputFile(fn: File => Unit): Unit = {
if (local) {
fn(testData)
} else {
val tmp = File.createTempFile(testData.getName(), ".tmp")
Files.copy(testData, tmp)
try {
fn(tmp)
} finally {
tmp.delete()
}
}
}
withTable("non_part_table", "part_table") {
sql(
"""
|CREATE TABLE non_part_table (employeeID INT, employeeName STRING)
|ROW FORMAT DELIMITED
|FIELDS TERMINATED BY '|'
|LINES TERMINATED BY '\\n'
""".stripMargin)
// LOAD DATA INTO non-partitioned table can't specify partition
intercept[AnalysisException] {
sql(
s"""$loadQuery INPATH "${testData.toURI}" INTO TABLE non_part_table PARTITION(ds="1")""")
}
withInputFile { path =>
sql(s"""$loadQuery INPATH "${path.toURI}" INTO TABLE non_part_table""")
// Non-local mode is expected to move the file, while local mode is expected to copy it.
// Check once here that the behavior is the expected.
assert(local === path.exists())
}
checkAnswer(
sql("SELECT * FROM non_part_table WHERE employeeID = 16"),
Row(16, "john") :: Nil)
// Incorrect URI.
// file://path/to/data/files/employee.dat
//
// TODO: need a similar test for non-local mode.
if (local) {
val incorrectUri = "file://path/to/data/files/employee.dat"
intercept[AnalysisException] {
sql(s"""LOAD DATA LOCAL INPATH "$incorrectUri" INTO TABLE non_part_table""")
}
}
// Use URI as inpath:
// file:/path/to/data/files/employee.dat
withInputFile { path =>
sql(s"""$loadQuery INPATH "${path.toURI}" INTO TABLE non_part_table""")
}
checkAnswer(
sql("SELECT * FROM non_part_table WHERE employeeID = 16"),
Row(16, "john") :: Row(16, "john") :: Nil)
// Overwrite existing data.
withInputFile { path =>
sql(s"""$loadQuery INPATH "${path.toURI}" OVERWRITE INTO TABLE non_part_table""")
}
checkAnswer(
sql("SELECT * FROM non_part_table WHERE employeeID = 16"),
Row(16, "john") :: Nil)
sql(
"""
|CREATE TABLE part_table (employeeID INT, employeeName STRING)
|PARTITIONED BY (c STRING, d STRING)
|ROW FORMAT DELIMITED
|FIELDS TERMINATED BY '|'
|LINES TERMINATED BY '\\n'
""".stripMargin)
// LOAD DATA INTO partitioned table must specify partition
withInputFile { f =>
val path = f.toURI
intercept[AnalysisException] {
sql(s"""$loadQuery INPATH "$path" INTO TABLE part_table""")
}
intercept[AnalysisException] {
sql(s"""$loadQuery INPATH "$path" INTO TABLE part_table PARTITION(c="1")""")
}
intercept[AnalysisException] {
sql(s"""$loadQuery INPATH "$path" INTO TABLE part_table PARTITION(d="1")""")
}
intercept[AnalysisException] {
sql(s"""$loadQuery INPATH "$path" INTO TABLE part_table PARTITION(c="1", k="2")""")
}
}
withInputFile { f =>
sql(s"""$loadQuery INPATH "${f.toURI}" INTO TABLE part_table PARTITION(c="1", d="2")""")
}
checkAnswer(
sql("SELECT employeeID, employeeName FROM part_table WHERE c = '1' AND d = '2'"),
sql("SELECT * FROM non_part_table").collect())
// Different order of partition columns.
withInputFile { f =>
sql(s"""$loadQuery INPATH "${f.toURI}" INTO TABLE part_table PARTITION(d="1", c="2")""")
}
checkAnswer(
sql("SELECT employeeID, employeeName FROM part_table WHERE c = '2' AND d = '1'"),
sql("SELECT * FROM non_part_table"))
}
}
test("SPARK-28084 case insensitive names of static partitioning in INSERT commands") {
withTable("part_table") {
withSQLConf(SQLConf.CASE_SENSITIVE.key -> "false") {
sql("CREATE TABLE part_table (price int, qty int) partitioned by (year int, month int)")
sql("INSERT INTO part_table PARTITION(YEar = 2015, month = 1) SELECT 1, 1")
checkAnswer(sql("SELECT * FROM part_table"), Row(1, 1, 2015, 1))
}
}
}
test("SPARK-28084 case insensitive names of dynamic partitioning in INSERT commands") {
withTable("part_table") {
withSQLConf(
SQLConf.CASE_SENSITIVE.key -> "false",
"hive.exec.dynamic.partition.mode" -> "nonstrict") {
sql("CREATE TABLE part_table (price int) partitioned by (year int)")
sql("INSERT INTO part_table PARTITION(YEar) SELECT 1, 2019")
checkAnswer(sql("SELECT * FROM part_table"), Row(1, 2019))
}
}
}
test("Truncate Table") {
withTable("non_part_table", "part_table") {
sql(
"""
|CREATE TABLE non_part_table (employeeID INT, employeeName STRING)
|ROW FORMAT DELIMITED
|FIELDS TERMINATED BY '|'
|LINES TERMINATED BY '\\n'
""".stripMargin)
val testData = hiveContext.getHiveFile("data/files/employee.dat").toURI
sql(s"""LOAD DATA LOCAL INPATH "$testData" INTO TABLE non_part_table""")
checkAnswer(
sql("SELECT * FROM non_part_table WHERE employeeID = 16"),
Row(16, "john") :: Nil)
val testResults = sql("SELECT * FROM non_part_table").collect()
sql("TRUNCATE TABLE non_part_table")
checkAnswer(sql("SELECT * FROM non_part_table"), Seq.empty[Row])
sql(
"""
|CREATE TABLE part_table (employeeID INT, employeeName STRING)
|PARTITIONED BY (c STRING, d STRING)
|ROW FORMAT DELIMITED
|FIELDS TERMINATED BY '|'
|LINES TERMINATED BY '\\n'
""".stripMargin)
sql(s"""LOAD DATA LOCAL INPATH "$testData" INTO TABLE part_table PARTITION(c="1", d="1")""")
checkAnswer(
sql("SELECT employeeID, employeeName FROM part_table WHERE c = '1' AND d = '1'"),
testResults)
sql(s"""LOAD DATA LOCAL INPATH "$testData" INTO TABLE part_table PARTITION(c="1", d="2")""")
checkAnswer(
sql("SELECT employeeID, employeeName FROM part_table WHERE c = '1' AND d = '2'"),
testResults)
sql(s"""LOAD DATA LOCAL INPATH "$testData" INTO TABLE part_table PARTITION(c="2", d="2")""")
checkAnswer(
sql("SELECT employeeID, employeeName FROM part_table WHERE c = '2' AND d = '2'"),
testResults)
sql("TRUNCATE TABLE part_table PARTITION(c='1', d='1')")
checkAnswer(
sql("SELECT employeeID, employeeName FROM part_table WHERE c = '1' AND d = '1'"),
Seq.empty[Row])
checkAnswer(
sql("SELECT employeeID, employeeName FROM part_table WHERE c = '1' AND d = '2'"),
testResults)
sql("TRUNCATE TABLE part_table PARTITION(c='1')")
checkAnswer(
sql("SELECT employeeID, employeeName FROM part_table WHERE c = '1'"),
Seq.empty[Row])
sql("TRUNCATE TABLE part_table")
checkAnswer(
sql("SELECT employeeID, employeeName FROM part_table"),
Seq.empty[Row])
}
}
test("show partitions - show everything") {
checkAnswer(
sql("show partitions parquet_tab4"),
Row("year=2015/month=1") ::
Row("year=2015/month=2") ::
Row("year=2016/month=2") ::
Row("year=2016/month=3") :: Nil)
checkAnswer(
sql("show partitions default.parquet_tab4"),
Row("year=2015/month=1") ::
Row("year=2015/month=2") ::
Row("year=2016/month=2") ::
Row("year=2016/month=3") :: Nil)
}
test("show partitions - show everything more than 5 part keys") {
checkAnswer(
sql("show partitions parquet_tab5"),
Row("year=2016/month=3/hour=10/minute=10/sec=10/extra=1") ::
Row("year=2016/month=4/hour=10/minute=10/sec=10/extra=1") :: Nil)
}
test("show partitions - filter") {
checkAnswer(
sql("show partitions default.parquet_tab4 PARTITION(year=2015)"),
Row("year=2015/month=1") ::
Row("year=2015/month=2") :: Nil)
checkAnswer(
sql("show partitions default.parquet_tab4 PARTITION(year=2015, month=1)"),
Row("year=2015/month=1") :: Nil)
checkAnswer(
sql("show partitions default.parquet_tab4 PARTITION(month=2)"),
Row("year=2015/month=2") ::
Row("year=2016/month=2") :: Nil)
}
test("show partitions - empty row") {
withTempView("parquet_temp") {
sql(
"""
|CREATE TEMPORARY VIEW parquet_temp (c1 INT, c2 STRING)
|USING org.apache.spark.sql.parquet.DefaultSource
""".stripMargin)
// An empty sequence of row is returned for session temporary table.
intercept[NoSuchTableException] {
sql("SHOW PARTITIONS parquet_temp")
}
val message1 = intercept[AnalysisException] {
sql("SHOW PARTITIONS parquet_tab3")
}.getMessage
assert(message1.contains("not allowed on a table that is not partitioned"))
val message2 = intercept[AnalysisException] {
sql("SHOW PARTITIONS parquet_tab4 PARTITION(abcd=2015, xyz=1)")
}.getMessage
assert(message2.contains("Non-partitioning column(s) [abcd, xyz] are specified"))
val message3 = intercept[AnalysisException] {
sql("SHOW PARTITIONS parquet_view1")
}.getMessage
assert(message3.contains("is not allowed on a view"))
}
}
test("show partitions - datasource") {
withTable("part_datasrc") {
val df = (1 to 3).map(i => (i, s"val_$i", i * 2)).toDF("a", "b", "c")
df.write
.partitionBy("a")
.format("parquet")
.mode(SaveMode.Overwrite)
.saveAsTable("part_datasrc")
assert(sql("SHOW PARTITIONS part_datasrc").count() == 3)
}
}
test("SPARK-25918: LOAD DATA LOCAL INPATH should handle a relative path") {
val localFS = FileContext.getLocalFSFileContext()
val workingDir = localFS.getWorkingDirectory
val r = LoadDataCommand.makeQualified(
FsConstants.LOCAL_FS_URI, workingDir, new Path("kv1.txt"))
assert(r === new Path(s"$workingDir/kv1.txt"))
}
}
| ptkool/spark | sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveCommandSuite.scala | Scala | apache-2.0 | 17,080 |
package com.jejking.rprng.png
import akka.http.scaladsl.coding.DeflateCompressor
import akka.util.ByteString
import java.nio.charset.Charset
/**
* Functions for doing some basic work encoding random streams
* into PNGs.
*/
object Png {
private val TARGET_IDAT_CHUNK_SIZE = 1024 * 32
private val US_ASCII = Charset.forName("US-ASCII")
val PNG_SIGNATURE = ByteString(137, 80, 78, 71, 13, 10, 26, 10)
val IHDR_CHUNK_TYPE = ByteString("IHDR", US_ASCII)
val IDAT_CHUNK_TYPE = ByteString("IDAT", US_ASCII)
val IEND_CHUNK_TYPE = ByteString("IEND", US_ASCII)
def ihdr(width: Int, height:Int): ByteString = {
require(width > 0)
require(height > 0)
val IHDR_LENGTH = toUnsignedFourByteInt(13)
// constant for our case, not in general
val BIT_DEPTH = 8
val COLOUR_TYPE = 6 // Truecolour with Alpha
val COMPRESSION_METHOD = 0 // deflate
val FILTER_METHOD = 0 // adaptive filtering as per PNG spec
val INTERLACE_METHOD = 0 // none
val constantPart = ByteString(BIT_DEPTH.toByte,
COLOUR_TYPE.toByte,
COMPRESSION_METHOD.toByte,
FILTER_METHOD.toByte,
INTERLACE_METHOD.toByte)
val chunkData = toUnsignedFourByteInt(width) ++ toUnsignedFourByteInt(height) ++ constantPart
val partToCrc = IHDR_CHUNK_TYPE ++ chunkData
val crc = crc32(partToCrc)
IHDR_LENGTH ++ partToCrc ++ crc
}
def scanline(bytesPerPixel: Int, expectedWidth: Int): ByteString => ByteString = bs => {
require(bs.length == expectedWidth * bytesPerPixel)
ByteString(0.toByte) ++ bs
}
def idat(deflateHelper: DeflateHelper = new DeflateHelper())(bytes: ByteString, shouldFinish: Boolean) = {
def doCompression(): ByteString = {
if (shouldFinish) {
deflateHelper.compressAndFinish(bytes)
} else {
deflateHelper.compressAndFlush(bytes)
}
}
val compressedBytes = doCompression()
val toCheckSum = IDAT_CHUNK_TYPE ++ compressedBytes
val crc = crc32(toCheckSum)
val length = toUnsignedFourByteInt(compressedBytes.length)
length ++ toCheckSum ++ crc
}
def iend() = {
toUnsignedFourByteInt(0) ++ IEND_CHUNK_TYPE ++ crc32(IEND_CHUNK_TYPE)
}
def toUnsignedFourByteInt(value: Int): ByteString = {
ByteString( (value >>> 24).toByte,
(value >>> 16).toByte,
(value >>> 8).toByte,
(value >>> 0).toByte)
}
def crc32(byteString: ByteString): ByteString = {
val initialCrc: Int = 0xFFFFFFFF
val reversePolynomial = 0xEDB88320
val crc = ~byteString.foldLeft(initialCrc)((crcIn: Int, b: Byte) => {
var crcTmp = (crcIn ^ b) & 0xff
(0 to 7).foreach(_ => {
if ((crcTmp & 1) == 1) {
crcTmp = (crcTmp >>> 1) ^ reversePolynomial
} else {
crcTmp = crcTmp >>> 1
}
})
(crcIn >>> 8) ^ crcTmp
})
toUnsignedFourByteInt(crc)
}
}
| jejking/rprng | src/main/scala/com/jejking/rprng/png/Png.scala | Scala | apache-2.0 | 3,017 |
import java.nio.file.Path
import io.gatling.core.util.PathHelper._
object IDEPathHelper {
val gatlingConfUrl: Path = getClass.getClassLoader.getResource("gatling.conf").toURI
val projectRootDir = gatlingConfUrl.ancestor(3)
val mavenSourcesDirectory = projectRootDir / "src" / "test" / "scala"
val mavenResourcesDirectory = projectRootDir / "src" / "test" / "resources"
val mavenTargetDirectory = projectRootDir / "target"
val mavenBinariesDirectory = mavenTargetDirectory / "test-classes"
val dataDirectory = mavenResourcesDirectory / "data"
val bodiesDirectory = mavenResourcesDirectory / "bodies"
val recorderOutputDirectory = mavenSourcesDirectory
val resultsDirectory = mavenTargetDirectory / "gatling"
val recorderConfigFile = mavenResourcesDirectory / "recorder.conf"
} | markshao/splunk-gatling | src/test/scala/IDEPathHelper.scala | Scala | apache-2.0 | 793 |
//source: http://svn.code.sf.net/p/freemind2wiki/code/trunk/freemind2wiki/src/main/java/com/tngtech/freemind2wiki/FM2ConfluenceConverter.java
package org.raisercostin.freemind2wiki
import java.io.BufferedWriter
import java.io.Reader
import java.io.Writer
import java.util.ArrayList
import java.util.HashMap
import java.util.List
import java.util.Map
import org.dom4j.Document
import org.dom4j.Element
import org.dom4j.io.SAXReader
import FM2ConfluenceConverter._
import scala.reflect.{ BeanProperty, BooleanBeanProperty }
import scala.collection.JavaConversions._
import scala.reflect.{ BeanProperty, BooleanBeanProperty }
import java.io.Reader
import java.io.Writer
import java.io.FileReader
import java.io.FileWriter
import org.scalatest.FunSuite
import org.junit.runner.RunWith
import java.io.BufferedReader
import java.io.Reader
import java.io.Writer
import java.util.ArrayList
import java.util.HashMap
import java.util.LinkedList
import java.util.List
import java.util.Map
import java.util.regex.Pattern
import org.dom4j.Document
import org.dom4j.DocumentHelper
import org.dom4j.Element
import org.dom4j.io.OutputFormat
import org.dom4j.io.XMLWriter
import Confluence2FMConverter._
import scala.reflect.{ BeanProperty, BooleanBeanProperty }
import scala.collection.JavaConversions._
import org.slf4j.LoggerFactory
import java.io.FileInputStream
import java.io.FileOutputStream
import java.io.InputStreamReader
import java.io.OutputStreamWriter
import java.io.Reader
import java.io.Writer
import java.nio.charset.Charset
import java.util.Arrays
import java.util.Properties
import joptsimple.OptionParser
import org.scalatest.junit.JUnitRunner
object NodeIcon extends Enumeration {
val ATTACH = new NodeIcon("Attach", "attach", null)
val BACK = new NodeIcon("Back", "back", null)
val BELL = new NodeIcon("Bell", "bell", null)
val BOOKMARK = new NodeIcon("Bookmark", "bookmark", "(*)")
val BROKENLINE = new NodeIcon("Broken Line", "broken-line", null)
val NO = new NodeIcon("No", "button_cancel", "(x)")
val YES = new NodeIcon("Yes", "button_ok", "(/)")
val CALENDAR = new NodeIcon("Calendar", "calendar", null)
val BOMB = new NodeIcon("Bomb", "clanbomber", null)
val CLOCK = new NodeIcon("Clock", "clock", null)
val CLOSED = new NodeIcon("Closed", "closed", null)
val DECRYPTED = new NodeIcon("Decrypted", "decrypted", null)
val DESKTOP = new NodeIcon("Desktop", "desktop_new", null)
val DOWN = new NodeIcon("Down", "down", null)
val EDIT = new NodeIcon("Edit", "edit", null)
val ENCRYPTED = new NodeIcon("Encrypted", "encrypted", null)
val FAMILY = new NodeIcon("Family", "family", null)
val FEMA = new NodeIcon("FemaleMale", "fema", null)
val FEMALE1 = new NodeIcon("Female1", "female1", null)
val FEMALE2 = new NodeIcon("Female2", "female2", null)
val FLAG = new NodeIcon("Flag", "flag", null)
val FLAGBLACK = new NodeIcon("Flag-black", "flag-black", null)
val FLAGBLUE = new NodeIcon("Flag-blue", "flag-blue", null)
val FLAGGREEN = new NodeIcon("Flag-green", "flag-green", null)
val FLAGORANGE = new NodeIcon("Flag-orange", "flag-orange", null)
val FLAGPINK = new NodeIcon("Flag-pink", "flag-pink", null)
val FLAGYELLOW = new NodeIcon("Flag-yellow", "flag-yellow", null)
val FOLDER = new NodeIcon("Folder", "folder", null)
val FORWARD = new NodeIcon("Forward", "forward", null)
val BUTTERFLY = new NodeIcon("Butterfly", "freemind_butterfly", null)
val PRIORITY_0 = new NodeIcon("Prio 1", "full-0", null)
val PRIORITY_1 = new NodeIcon("Prio 1", "full-1", null)
val PRIORITY_2 = new NodeIcon("Prio 2", "full-2", null)
val PRIORITY_3 = new NodeIcon("Prio 3", "full-3", null)
val PRIORITY_4 = new NodeIcon("Prio 4", "full-4", null)
val PRIORITY_5 = new NodeIcon("Prio 5", "full-5", null)
val PRIORITY_6 = new NodeIcon("Prio 6", "full-6", null)
val PRIORITY_7 = new NodeIcon("Prio 7", "full-7", null)
val PRIORITY_8 = new NodeIcon("Prio 8", "full-8", null)
val PRIORITY_9 = new NodeIcon("Prio 9", "full-9", null)
val GO = new NodeIcon("Go", "go", null)
val HOME = new NodeIcon("Home", "gohome", null)
val GROUP = new NodeIcon("Group", "group", null)
val QUESTION = new NodeIcon("Question", "help", "(?)")
val HOURGLASS = new NodeIcon("Wait", "hourglass", null)
val IDEA = new NodeIcon("Idea", "idea", "(on)")
val INFO = new NodeIcon("Info", "info", "(i)")
val TELEPHONE = new NodeIcon("Telephone", "kaddressbook", null)
val MAILPROGRAM = new NodeIcon("Mailprogram", "kmail", null)
val SOUND = new NodeIcon("Sound", "knotify", null)
val MAILBOX = new NodeIcon("Mailbox", "korn", null)
val GOOD = new NodeIcon("Good", "ksmiletris", ":)")
val LAUNCH = new NodeIcon("Launch", "launch", null)
val ICQ = new NodeIcon("ICQ", "licq", null)
val LIST = new NodeIcon("List", "list", null)
val MAIL = new NodeIcon("Mail", "Mail", null)
val MALE1 = new NodeIcon("Male1", "male1", null)
val MALE2 = new NodeIcon("Male2", "male2", null)
val WARNING = new NodeIcon("Warning", "messagebox_warning", "(!)")
val PASSWORD = new NodeIcon("Password", "password", null)
val PENCIL = new NodeIcon("Pencil", "pencil", null)
val PENGUIN = new NodeIcon("Penguin", "penguin", null)
val PREPARE = new NodeIcon("Prepare", "prepare", null)
val REDO = new NodeIcon("Redo", "redo", null)
val SMILEYANGRY = new NodeIcon("SmileyAngry", "smiley_angry", null)
val SMILEYNEUTRAL = new NodeIcon("SmileyNeutral", "smiley_neutral", null)
val SMILEYOH = new NodeIcon("SmileyOh", "smiley_oh", null)
val SMILEYBAD = new NodeIcon("Bad", "smily_bad", ":(")
val STOP = new NodeIcon("Stop", "stop", null)
val STOPSIGN = new NodeIcon("StopSign", "stop-sign", null)
val UP = new NodeIcon("Up", "up", null)
val WIZARD = new NodeIcon("Wizard", "wizard", null)
val XMAG = new NodeIcon("Xmag", "xmag", null)
val EXCLAMATION = new NodeIcon("Exclamation", "yes", null)
class NodeIcon(@BeanProperty var desc: String, @BeanProperty var fmBuiltin: String, @BeanProperty var confluenceMarkup: String) extends Val
implicit def convertValue(v: Value): NodeIcon = v.asInstanceOf[NodeIcon]
}
object FM2ConfluenceConverter {
private var iconMap: Map[String, String] = new HashMap[String, String]()
val log = LoggerFactory.getLogger(classOf[FM2ConfluenceConverter])
for (i <- NodeIcon.values) {
val markup = i.getConfluenceMarkup
iconMap.put(i.getFmBuiltin, if (markup == null) "::" + i.getDesc + "::" else markup)
}
}
trait Converter {
def convert(source: Reader, target: Writer): Unit
}
class FM2ConfluenceConverter extends Converter {
import FM2ConfluenceConverter._
@BooleanBeanProperty
var createOrderedLists: Boolean = false
@BeanProperty
var headerNestingDepth: Int = 4
@BooleanBeanProperty
var ignoreFontStyle: Boolean = false
@BooleanBeanProperty
var ignoreColors: Boolean = false
@BooleanBeanProperty
var ignoreIcons: Boolean = false
@BooleanBeanProperty
var ignoreLinks: Boolean = false
override def convert(source: Reader, target: Writer) {
log.info("convert() - Using FM2ConfluenceConverter")
var doc: Document = null
val reader = new SAXReader()
doc = reader.read(source)
log.info("convert() - Loaded document " + doc)
val root = doc.getRootElement
val nodes = root.selectNodes("node")
var writer: BufferedWriter = null
try {
writer = new BufferedWriter(target)
writer.append("{toc}\\n\\n")
process(nodes, 0, writer)
writer.append("\\n\\n{attachments}")
writer.append("\\n")
} finally {
if (writer != null) {
writer.close()
}
}
log.info("convert() - Finished conversion.")
}
private def getAttributeValueIgnoreCase(e: Element, attribute: String): String = {
var v = e.attributeValue(attribute)
if (v == null || "" == v) {
v = e.attributeValue(attribute.toLowerCase())
if (v == null || "" == v) {
v = e.attributeValue(attribute.toUpperCase())
}
}
if (v == null) "" else v
}
private def process(nodes: List[_], depth: Int, target: Writer) {
for (n <- nodes) {
if (n.isInstanceOf[Element]) {
val e = n.asInstanceOf[Element]
if (depth < getHeaderNestingDepth) {
target.append("\\n")
}
if (depth < getHeaderNestingDepth) {
target.append("h").append("" + (depth + 1)).append(". ")
} else {
var i = 0
while (i <= depth - getHeaderNestingDepth) {
target.append(if (createOrderedLists) "#" else "*")
i += 1
}
target.append(' ')
}
val color = getAttributeValueIgnoreCase(e, "COLOR")
val link = getAttributeValueIgnoreCase(e, "LINK")
if (!(ignoreColors || color == null || "" == color)) {
target.append("{color:").append(color).append("}")
}
if (!(ignoreLinks || link == null || "" == link)) {
target.append("[")
}
var bold = false
var italic = false
if (!ignoreFontStyle) {
for (m <- e.selectNodes("font")) {
if (m.isInstanceOf[Element]) {
val fontNode = m.asInstanceOf[Element]
val b = getAttributeValueIgnoreCase(fontNode, "BOLD")
if (b != null) {
bold = "true".equalsIgnoreCase(b)
}
val i = getAttributeValueIgnoreCase(fontNode, "ITALIC")
if (i != null) {
italic = "true".equalsIgnoreCase(i)
}
} else {
log.warn("Expected node to be a font element.")
}
}
}
val icons = new ArrayList[String]()
if (!ignoreIcons) {
for (m <- e.selectNodes("icon")) {
if (m.isInstanceOf[Element]) {
val iconNode = m.asInstanceOf[Element]
val builtinIcon = getAttributeValueIgnoreCase(iconNode, "BUILTIN")
if (builtinIcon != null) {
icons.add(builtinIcon)
} else {
log.warn("process() - Icon has no BUILTIN attribute")
}
} else {
log.warn("Expected node to be an icon element.")
}
}
}
if (bold) {
target.append("*")
}
if (italic) {
target.append("_")
}
for (icon <- icons) {
if (iconMap.containsKey(icon)) {
target.append(iconMap.get(icon)).append(" ")
} else {
target.append("::").append(icon).append(":: ")
}
}
val text = getAttributeValueIgnoreCase(e, "TEXT")
if (text != null) {
target.append(text.trim())
} else {
log.warn("process() - A node has no text attribute.")
}
if (italic) {
target.append("_")
}
if (bold) {
target.append("*")
}
if (!(ignoreLinks || link == null || "" == link)) {
target.append("|").append(link).append("]")
}
if (!(ignoreColors || color == null || "" == color)) {
target.append("{color}")
}
target.append("\\n")
process(e.selectNodes("node"), depth + 1, target)
} else {
log.warn("process() - Expected node to be an element.")
}
}
}
}
object Property extends Enumeration {
val bold = new Property()
val italic = new Property()
val color = new Property()
val link = new Property()
val icon = new Property()
val text = new Property()
class Property extends Val
implicit def convertValue(v: Value): Property = v.asInstanceOf[Property]
}
object Confluence2FMConverter {
private val log = LoggerFactory.getLogger(classOf[Confluence2FMConverter])
private val LIST_ITEM_INDICATOR = Pattern.compile("(\\\\** )|(#* )")
private val MACRO_PATTERN = Pattern.compile("\\\\{\\\\{.*\\\\}\\\\}")
private val HEADER_INDICATOR = Pattern.compile("h[0123456]\\\\.")
private var iconMap: Map[String, String] = new HashMap[String, String]()
for (i <- NodeIcon.values) {
val markup = i.getConfluenceMarkup
if (markup != null) {
iconMap.put(markup, i.getFmBuiltin)
}
iconMap.put("::" + i.getDesc + "::", i.getFmBuiltin)
iconMap.put("::" + i.getFmBuiltin + "::", i.getFmBuiltin)
}
}
class Confluence2FMConverter extends Converter {
import Confluence2FMConverter._
import Property._
@BooleanBeanProperty
var prettyPrint: Boolean = false
override def convert(source: Reader, target: Writer) {
log.info("convert() - Using Confluence2FMConverter")
val document = DocumentHelper.createDocument()
val map = document.addElement("map")
val stack = new LinkedList[Element]()
var headerNestingDepth = 0
var nodeInserted = false
var br: BufferedReader = null
try {
br = new BufferedReader(source)
var line: String = null
var lineCount = 0
lineCount = 0
while ({ line = br.readLine(); line != null }) {
log.debug("Converting: '" + line + "'")
line = line.trim()
line = removeIgnoredConfluenceMarkup(line)
if ("" != line) {
if (stack.isEmpty) {
if (line.startsWith("h1.")) {
val root = map.addElement("node").addAttribute("TEXT", line.substring(3).trim())
stack.push(root)
nodeInserted = true
} else {
//continue
}
} else {
var depthToReach = -1
if (line.startsWith("h1.")) {
log.warn("convert() - Multiple root-level headers 'h1.' detected. All h1-headers except the first one are ignored. ")
}
val oldHeaderNestingDepth = headerNestingDepth
if (line.startsWith("h2.")) {
depthToReach = 1
headerNestingDepth = depthToReach
} else if (line.startsWith("h3.")) {
depthToReach = 2
headerNestingDepth = depthToReach
} else if (line.startsWith("h4.")) {
depthToReach = 3
headerNestingDepth = depthToReach
} else if (line.startsWith("h5.")) {
depthToReach = 4
headerNestingDepth = depthToReach
} else if (line.startsWith("h6.")) {
depthToReach = 5
headerNestingDepth = depthToReach
} else if (lookingAtListItem(line)) {
for (i <- 1 until line.length) {
val c = line.charAt(i)
if (c != '*' && c != '#') {
depthToReach = i + headerNestingDepth
//break
}
}
}
if (headerNestingDepth - oldHeaderNestingDepth > 2) {
depthToReach = oldHeaderNestingDepth + 1
headerNestingDepth = depthToReach
}
if (depthToReach < 0) {
log.debug("Line skipped")
//continue
}
var i = stack.size
while (i > depthToReach) {
stack.pop()
i -= 1
}
line = removeConfluenceNestingMarkup(line)
line = line.trim()
val props = new HashMap[Property, Any]()
determineProperties(line, props)
val node = stack.peek().addElement("node").addAttribute("TEXT", props.get(Property.text).asInstanceOf[String])
node.addAttribute("POSITION", "right")
if (props.containsKey(Property.link)) {
node.addAttribute("LINK", props.get(Property.link).asInstanceOf[String])
}
if (props.containsKey(Property.color)) {
node.addAttribute("COLOR", props.get(Property.color).asInstanceOf[String])
}
if (props.containsKey(Property.icon)) {
for (icon <- props.get(Property.icon).asInstanceOf[List[_]]) {
node.addElement("icon").addAttribute("BUILTIN", icon.asInstanceOf[String])
}
}
if (true == props.get(Property.bold) || true == props.get(Property.italic)) {
val fontNode = node.addElement("font").addAttribute("SIZE", "12")
if (true == props.get(Property.bold)) {
fontNode.addAttribute("BOLD", String.valueOf(props.get(Property.bold)))
}
if (true == props.get(Property.italic)) {
fontNode.addAttribute("ITALIC", String.valueOf(props.get(Property.italic)))
}
}
stack.push(node)
}
}
lineCount += 1
}
} finally {
if (br != null) {
br.close()
}
}
if (!nodeInserted) {
log.warn("convert() - The confluence markup is either empty or not well-formed and the resulting mind map will be empty.")
}
val format = if (prettyPrint) OutputFormat.createPrettyPrint() else OutputFormat.createCompactFormat()
val writer = new XMLWriter(target, format)
writer.write(document)
writer.println()
writer.close()
log.info("convert() - Finished conversion.")
}
private def lookingAtListItem(line: String): Boolean = {
LIST_ITEM_INDICATOR.matcher(line).lookingAt()
}
private def removeIgnoredConfluenceMarkup(lineP: String): String = {
var line = lineP
line = line.replaceAll("----", "")
line = line.replaceAll("---", "")
line = line.replaceAll("--", "")
line = MACRO_PATTERN.matcher(line).replaceAll("")
line
}
private def removeConfluenceNestingMarkup(lineP: String): String = {
var line = lineP
line = LIST_ITEM_INDICATOR.matcher(line).replaceFirst("")
line = HEADER_INDICATOR.matcher(line).replaceFirst("")
line
}
private def determineProperties(lineP: String, props: Map[Property, Any]) {
var line = lineP
line = line.trim()
if (line.startsWith("_") && line.endsWith("_") && line.length > 1) {
props.put(Property.italic, true)
line = line.substring(1, line.length - 1)
determineProperties(line, props)
} else if (line.startsWith("{_}") && line.endsWith("{_}") && line.length > 1) {
props.put(Property.italic, true)
line = line.substring(3, line.length - 3)
determineProperties(line, props)
} else if (line.startsWith("*") && line.endsWith("*") && line.length > 1) {
props.put(Property.bold, true)
line = line.substring(1, line.length - 1)
determineProperties(line, props)
} else if (line.startsWith("{*}") && line.endsWith("{*}") && line.length > 1) {
props.put(Property.bold, true)
line = line.substring(3, line.length - 3)
determineProperties(line, props)
} else if (line.startsWith("{color:") && line.endsWith("{color}")) {
val colonOffset = line.indexOf(":")
if (colonOffset == -1) {
log.warn("convert() - Strange color tag in line '" + line + "'")
line = line.substring(7, line.length - 7)
determineProperties(line, props)
} else {
val closeBracketOffset = line.indexOf('}')
props.put(Property.color, line.substring(7, closeBracketOffset))
line = line.substring(closeBracketOffset + 1, line.length - 7)
determineProperties(line, props)
}
} else if (line.startsWith("[") && line.endsWith("]")) {
val pipeOffset = line.indexOf('|')
val link = (if (pipeOffset == -1) line.substring(1, line.length - 1) else line.substring(pipeOffset + 1, line.length - 1))
val text = (if (pipeOffset == -1) link else line.substring(1, pipeOffset))
props.put(Property.link, link)
props.put(Property.text, text.trim())
determineProperties(text, props)
} else if (line.startsWith("::")) {
val endDoubleColonOffset = line.indexOf("::", 2)
if (endDoubleColonOffset != -1) {
if (!props.containsKey(Property.icon)) {
props.put(Property.icon, new ArrayList())
}
val iconDeclaration = line.substring(2, endDoubleColonOffset)
val builtin = iconMap.get("::" + iconDeclaration + "::")
if (builtin == null) {
props.get(Property.icon).asInstanceOf[List[String]].add(iconDeclaration)
} else {
props.get(Property.icon).asInstanceOf[List[String]].add(builtin)
}
line = line.substring(endDoubleColonOffset + 2)
determineProperties(line, props)
} else {
props.put(Property.text, line.trim())
}
} else {
for ((key, value) <- iconMap) {
val confluenceIconMarkup = key
if (line.startsWith(key)) {
val builtin = value
if (!props.containsKey(Property.icon)) {
props.put(Property.icon, new ArrayList())
}
props.get(Property.icon).asInstanceOf[List[String]].add(builtin)
line = line.substring(key.length)
determineProperties(line, props)
//break
}
}
props.put(Property.text, line.trim())
}
}
}
object FreeMindWikiConverter {
private val log = LoggerFactory.getLogger(FreeMindWikiConverter.getClass)
def main(args: Array[String]) {
val defaultCharset = Charset.defaultCharset().name()
try {
val parser = new OptionParser()
parser.acceptsAll(Arrays.asList("c", "convert"), "Specifies the type of conversion to perform. Valid arguments " + "are 'freemind2confluence' and 'confluence2freemind'.").withRequiredArg().describedAs("conversion")
parser.acceptsAll(Arrays.asList("s", "source"), "The source file. If not specified the converter will read " + "from standard input.").withRequiredArg().describedAs("file")
parser.acceptsAll(Arrays.asList("t", "target"), "The destination file. If not specified the converter will " + "write to standard output.").withRequiredArg().describedAs("file")
parser.accepts("source-charset", "The encoding of the converter input: " + "e.g. 'UTF-8', 'US-ASCII', 'ISO-8859-15'. " + "Your system default is: " + defaultCharset).withRequiredArg().describedAs("charset")
parser.accepts("target-charset", "The encoding of the converter output: " + "e.g. 'UTF-8', 'US-ASCII', 'ISO-8859-15'. " + "Your system default is: " + defaultCharset).withRequiredArg().describedAs("charset")
val optionSet = parser.parse(args)
if (!optionSet.hasArgument("c")) {
System.err.println("No conversion mode specified.")
System.err.println()
parser.printHelpOn(System.err)
System.exit(-1)
}
var converter: Converter = null
var sourceCharset = defaultCharset
var targetCharset = defaultCharset
val configuration = new Properties()
if (!optionSet.has("source-charset") || !optionSet.has("target-charset")) {
tryToLoadConfiguration(configuration)
}
val conversion = optionSet.argumentOf("c")
if ("freemind2confluence" == conversion) {
converter = new FM2ConfluenceConverter()
sourceCharset = configuration.getProperty("freemind.sourceCharset", sourceCharset)
targetCharset = configuration.getProperty("confluence.targetCharset", targetCharset)
} else if ("confluence2freemind" == conversion) {
converter = new Confluence2FMConverter()
sourceCharset = configuration.getProperty("confluence.sourceCharset", sourceCharset)
targetCharset = configuration.getProperty("freemind.targetCharset", targetCharset)
} else {
System.err.println("Unsupported conversion: " + conversion)
System.err.println()
parser.printHelpOn(System.err)
System.exit(-1)
return
}
if (optionSet.has("source-charset")) {
sourceCharset = optionSet.argumentOf("source-charset")
}
if (optionSet.has("target-charset")) {
targetCharset = optionSet.argumentOf("target-charset")
}
var r: Reader = null
var w: Writer = null
log.info("Using source charset: " + sourceCharset)
log.info("Using target charset: " + targetCharset)
r = if (optionSet.hasArgument("s")) new InputStreamReader(new FileInputStream(optionSet.valueOf("s").asInstanceOf[String]), sourceCharset) else new InputStreamReader(System.in, sourceCharset)
w = if (optionSet.hasArgument("t")) new OutputStreamWriter(new FileOutputStream(optionSet.valueOf("t").asInstanceOf[String]), targetCharset) else new OutputStreamWriter(System.out, targetCharset)
converter.convert(r, w)
} catch {
case e: Throwable => {
e.printStackTrace()
System.exit(-1)
}
}
}
private def tryToLoadConfiguration(properties: Properties) {
// val confPath = "/freemind2wiki.conf"
// val confIn = classOf[FreeMindWikiConverter].getResourceAsStream(confPath)
// if (confIn != null) {
// try {
// properties.load(confIn)
// } catch {
// case e: Exception => log.error(String.format("Could not load configuration file '%s'.", confPath))
// }
// } else {
// log.warn(String.format("Configuration file '%s' not in classpath.", confPath))
log.warn("Configuration file '%s' not in classpath.")
// }
}
}
@RunWith(classOf[JUnitRunner])
class ProductsScraperTest extends FunSuite {
//import io.Locations
test("convert1") {
val folder = """d:\\personal\\work\\raisercostin-utils\\test\\"""
val c = new FM2ConfluenceConverter
c.convert(new FileReader(folder + """darzar.mm"""), new FileWriter(folder + """darzar.md"""))
}
} | raisercostin/freemind2wiki-scala | src/main/scala/org/raisercostin/freemind2wiki/freemind2markdown.scala | Scala | apache-2.0 | 26,069 |
package test.scala.FizzBuzz
import main.scala.FizzBuzz.FizzBuzz
import test.scala.UnitSuite
class FizzBuzzSuite extends UnitSuite {
test("We know how to convert a List of Int as Either[String, Int] into a List[String]") {
val elements = FizzBuzz.toElement((1 to 5).toList)
assertResult(List("1", "2", "3", "4", "5")) {
FizzBuzz.toString(elements)
}
}
test("We know how to convert a List of String as Either[String, Int] int a List[String]") {
val fizzbuzz = List("Fizz", "Buzz", "FizzBuzz")
val lefts = FizzBuzz.toElement(fizzbuzz)
assertResult(fizzbuzz) {
FizzBuzz.toString(lefts)
}
}
test("We can convert multiples of 3 to Fizz") {
val elements = FizzBuzz.toElement((1 to 5).toList)
assertResult(List("1", "2", "Fizz", "4", "5")) {
FizzBuzz.toString(FizzBuzz.toFizz(elements))
}
}
test("We can convert multiples of 5 to Buzz") {
val elements = FizzBuzz.toElement((1 to 5).toList)
assertResult(List("1", "2", "3", "4", "Buzz")) {
FizzBuzz.toString(FizzBuzz.toBuzz(elements))
}
}
test("We can convert multiples of 3 and 5 to FizzBuzz") {
val elements = FizzBuzz.toElement(List(1, 3, 5, 7, 9, 11, 13, 15))
assertResult(List("1", "3", "5", "7", "9", "11", "13", "FizzBuzz")) {
FizzBuzz.toString(FizzBuzz.toFizzBuzz(elements))
}
}
test("We can do the FizzBuzz") {
val elements = List(1, 3, 5, 11, 15)
assertResult(List("1", "Fizz", "Buzz", "11", "FizzBuzz")) {
FizzBuzz(elements)
}
}
}
| ollielo/ScalaKata | src/test/scala/FizzBuzz/FizzBuzzSuite.scala | Scala | mit | 1,526 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.json
import java.io._
import java.nio.charset.{Charset, StandardCharsets, UnsupportedCharsetException}
import java.nio.file.Files
import java.sql.{Date, Timestamp}
import java.time.{LocalDate, ZoneId}
import java.util.Locale
import com.fasterxml.jackson.core.JsonFactory
import org.apache.hadoop.fs.{Path, PathFilter}
import org.apache.hadoop.io.SequenceFile.CompressionType
import org.apache.hadoop.io.compress.GzipCodec
import org.apache.spark.{SparkConf, SparkException, TestUtils}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{functions => F, _}
import org.apache.spark.sql.catalyst.json._
import org.apache.spark.sql.catalyst.util.DateTimeUtils
import org.apache.spark.sql.execution.ExternalRDD
import org.apache.spark.sql.execution.datasources.{CommonFileDataSourceSuite, DataSource, InMemoryFileIndex, NoopCache}
import org.apache.spark.sql.execution.datasources.v2.json.JsonScanBuilder
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.test.SharedSparkSession
import org.apache.spark.sql.types._
import org.apache.spark.sql.types.StructType.fromDDL
import org.apache.spark.sql.types.TestUDT.{MyDenseVector, MyDenseVectorUDT}
import org.apache.spark.sql.util.CaseInsensitiveStringMap
import org.apache.spark.util.Utils
class TestFileFilter extends PathFilter {
override def accept(path: Path): Boolean = path.getParent.getName != "p=2"
}
abstract class JsonSuite
extends QueryTest
with SharedSparkSession
with TestJsonData
with CommonFileDataSourceSuite {
import testImplicits._
override protected def dataSourceFormat = "json"
test("Type promotion") {
def checkTypePromotion(expected: Any, actual: Any): Unit = {
assert(expected.getClass == actual.getClass,
s"Failed to promote ${actual.getClass} to ${expected.getClass}.")
assert(expected == actual,
s"Promoted value ${actual}(${actual.getClass}) does not equal the expected value " +
s"${expected}(${expected.getClass}).")
}
val factory = new JsonFactory()
def enforceCorrectType(
value: Any,
dataType: DataType,
options: Map[String, String] = Map.empty): Any = {
val writer = new StringWriter()
Utils.tryWithResource(factory.createGenerator(writer)) { generator =>
generator.writeObject(value)
generator.flush()
}
val dummyOption = new JSONOptions(options, SQLConf.get.sessionLocalTimeZone)
val dummySchema = StructType(Seq.empty)
val parser = new JacksonParser(dummySchema, dummyOption, allowArrayAsStructs = true)
Utils.tryWithResource(factory.createParser(writer.toString)) { jsonParser =>
jsonParser.nextToken()
val converter = parser.makeConverter(dataType)
converter.apply(jsonParser)
}
}
val intNumber: Int = 2147483647
checkTypePromotion(intNumber, enforceCorrectType(intNumber, IntegerType))
checkTypePromotion(intNumber.toLong, enforceCorrectType(intNumber, LongType))
checkTypePromotion(intNumber.toDouble, enforceCorrectType(intNumber, DoubleType))
checkTypePromotion(
Decimal(intNumber), enforceCorrectType(intNumber, DecimalType.SYSTEM_DEFAULT))
val longNumber: Long = 9223372036854775807L
checkTypePromotion(longNumber, enforceCorrectType(longNumber, LongType))
checkTypePromotion(longNumber.toDouble, enforceCorrectType(longNumber, DoubleType))
checkTypePromotion(
Decimal(longNumber), enforceCorrectType(longNumber, DecimalType.SYSTEM_DEFAULT))
val doubleNumber: Double = 1.7976931348623157d
checkTypePromotion(doubleNumber.toDouble, enforceCorrectType(doubleNumber, DoubleType))
checkTypePromotion(DateTimeUtils.fromJavaTimestamp(new Timestamp(intNumber * 1000L)),
enforceCorrectType(intNumber, TimestampType))
checkTypePromotion(DateTimeUtils.fromJavaTimestamp(new Timestamp(intNumber.toLong * 1000L)),
enforceCorrectType(intNumber.toLong, TimestampType))
val strTime = "2014-09-30 12:34:56"
checkTypePromotion(
expected = DateTimeUtils.fromJavaTimestamp(Timestamp.valueOf(strTime)),
enforceCorrectType(strTime, TimestampType,
Map("timestampFormat" -> "yyyy-MM-dd HH:mm:ss")))
val strDate = "2014-10-15"
checkTypePromotion(
DateTimeUtils.fromJavaDate(Date.valueOf(strDate)), enforceCorrectType(strDate, DateType))
val ISO8601Time1 = "1970-01-01T01:00:01.0Z"
checkTypePromotion(DateTimeUtils.fromJavaTimestamp(new Timestamp(3601000)),
enforceCorrectType(
ISO8601Time1,
TimestampType,
Map("timestampFormat" -> "yyyy-MM-dd'T'HH:mm:ss.SX")))
val ISO8601Time2 = "1970-01-01T02:00:01-01:00"
checkTypePromotion(DateTimeUtils.fromJavaTimestamp(new Timestamp(10801000)),
enforceCorrectType(
ISO8601Time2,
TimestampType,
Map("timestampFormat" -> "yyyy-MM-dd'T'HH:mm:ssXXX")))
val ISO8601Date = "1970-01-01"
checkTypePromotion(DateTimeUtils.microsToDays(32400000000L, ZoneId.systemDefault),
enforceCorrectType(ISO8601Date, DateType))
}
test("Get compatible type") {
def checkDataType(t1: DataType, t2: DataType, expected: DataType): Unit = {
var actual = JsonInferSchema.compatibleType(t1, t2)
assert(actual == expected,
s"Expected $expected as the most general data type for $t1 and $t2, found $actual")
actual = JsonInferSchema.compatibleType(t2, t1)
assert(actual == expected,
s"Expected $expected as the most general data type for $t1 and $t2, found $actual")
}
// NullType
checkDataType(NullType, BooleanType, BooleanType)
checkDataType(NullType, IntegerType, IntegerType)
checkDataType(NullType, LongType, LongType)
checkDataType(NullType, DoubleType, DoubleType)
checkDataType(NullType, DecimalType.SYSTEM_DEFAULT, DecimalType.SYSTEM_DEFAULT)
checkDataType(NullType, StringType, StringType)
checkDataType(NullType, ArrayType(IntegerType), ArrayType(IntegerType))
checkDataType(NullType, StructType(Nil), StructType(Nil))
checkDataType(NullType, NullType, NullType)
// BooleanType
checkDataType(BooleanType, BooleanType, BooleanType)
checkDataType(BooleanType, IntegerType, StringType)
checkDataType(BooleanType, LongType, StringType)
checkDataType(BooleanType, DoubleType, StringType)
checkDataType(BooleanType, DecimalType.SYSTEM_DEFAULT, StringType)
checkDataType(BooleanType, StringType, StringType)
checkDataType(BooleanType, ArrayType(IntegerType), StringType)
checkDataType(BooleanType, StructType(Nil), StringType)
// IntegerType
checkDataType(IntegerType, IntegerType, IntegerType)
checkDataType(IntegerType, LongType, LongType)
checkDataType(IntegerType, DoubleType, DoubleType)
checkDataType(IntegerType, DecimalType.SYSTEM_DEFAULT, DecimalType.SYSTEM_DEFAULT)
checkDataType(IntegerType, StringType, StringType)
checkDataType(IntegerType, ArrayType(IntegerType), StringType)
checkDataType(IntegerType, StructType(Nil), StringType)
// LongType
checkDataType(LongType, LongType, LongType)
checkDataType(LongType, DoubleType, DoubleType)
checkDataType(LongType, DecimalType.SYSTEM_DEFAULT, DecimalType.SYSTEM_DEFAULT)
checkDataType(LongType, StringType, StringType)
checkDataType(LongType, ArrayType(IntegerType), StringType)
checkDataType(LongType, StructType(Nil), StringType)
// DoubleType
checkDataType(DoubleType, DoubleType, DoubleType)
checkDataType(DoubleType, DecimalType.SYSTEM_DEFAULT, DoubleType)
checkDataType(DoubleType, StringType, StringType)
checkDataType(DoubleType, ArrayType(IntegerType), StringType)
checkDataType(DoubleType, StructType(Nil), StringType)
// DecimalType
checkDataType(DecimalType.SYSTEM_DEFAULT, DecimalType.SYSTEM_DEFAULT,
DecimalType.SYSTEM_DEFAULT)
checkDataType(DecimalType.SYSTEM_DEFAULT, StringType, StringType)
checkDataType(DecimalType.SYSTEM_DEFAULT, ArrayType(IntegerType), StringType)
checkDataType(DecimalType.SYSTEM_DEFAULT, StructType(Nil), StringType)
// StringType
checkDataType(StringType, StringType, StringType)
checkDataType(StringType, ArrayType(IntegerType), StringType)
checkDataType(StringType, StructType(Nil), StringType)
// ArrayType
checkDataType(ArrayType(IntegerType), ArrayType(IntegerType), ArrayType(IntegerType))
checkDataType(ArrayType(IntegerType), ArrayType(LongType), ArrayType(LongType))
checkDataType(ArrayType(IntegerType), ArrayType(StringType), ArrayType(StringType))
checkDataType(ArrayType(IntegerType), StructType(Nil), StringType)
checkDataType(
ArrayType(IntegerType, true), ArrayType(IntegerType), ArrayType(IntegerType, true))
checkDataType(
ArrayType(IntegerType, true), ArrayType(IntegerType, false), ArrayType(IntegerType, true))
checkDataType(
ArrayType(IntegerType, true), ArrayType(IntegerType, true), ArrayType(IntegerType, true))
checkDataType(
ArrayType(IntegerType, false), ArrayType(IntegerType), ArrayType(IntegerType, true))
checkDataType(
ArrayType(IntegerType, false), ArrayType(IntegerType, false), ArrayType(IntegerType, false))
checkDataType(
ArrayType(IntegerType, false), ArrayType(IntegerType, true), ArrayType(IntegerType, true))
// StructType
checkDataType(StructType(Nil), StructType(Nil), StructType(Nil))
checkDataType(
StructType(StructField("f1", IntegerType, true) :: Nil),
StructType(StructField("f1", IntegerType, true) :: Nil),
StructType(StructField("f1", IntegerType, true) :: Nil))
checkDataType(
StructType(StructField("f1", IntegerType, true) :: Nil),
StructType(Nil),
StructType(StructField("f1", IntegerType, true) :: Nil))
checkDataType(
StructType(
StructField("f1", IntegerType, true) ::
StructField("f2", IntegerType, true) :: Nil),
StructType(StructField("f1", LongType, true) :: Nil),
StructType(
StructField("f1", LongType, true) ::
StructField("f2", IntegerType, true) :: Nil))
checkDataType(
StructType(
StructField("f1", IntegerType, true) :: Nil),
StructType(
StructField("f2", IntegerType, true) :: Nil),
StructType(
StructField("f1", IntegerType, true) ::
StructField("f2", IntegerType, true) :: Nil))
checkDataType(
StructType(
StructField("f1", IntegerType, true) :: Nil),
DecimalType.SYSTEM_DEFAULT,
StringType)
}
test("Complex field and type inferring with null in sampling") {
withTempView("jsonTable") {
val jsonDF = spark.read.json(jsonNullStruct)
val expectedSchema = StructType(
StructField("headers", StructType(
StructField("Charset", StringType, true) ::
StructField("Host", StringType, true) :: Nil)
, true) ::
StructField("ip", StringType, true) ::
StructField("nullstr", StringType, true):: Nil)
assert(expectedSchema === jsonDF.schema)
jsonDF.createOrReplaceTempView("jsonTable")
checkAnswer(
sql("select nullstr, headers.Host from jsonTable"),
Seq(Row("", "1.abc.com"), Row("", null), Row("", null), Row(null, null))
)
}
}
test("Primitive field and type inferring") {
withTempView("jsonTable") {
val jsonDF = spark.read.json(primitiveFieldAndType)
val expectedSchema = StructType(
StructField("bigInteger", DecimalType(20, 0), true) ::
StructField("boolean", BooleanType, true) ::
StructField("double", DoubleType, true) ::
StructField("integer", LongType, true) ::
StructField("long", LongType, true) ::
StructField("null", StringType, true) ::
StructField("string", StringType, true) :: Nil)
assert(expectedSchema === jsonDF.schema)
jsonDF.createOrReplaceTempView("jsonTable")
checkAnswer(
sql("select * from jsonTable"),
Row(new java.math.BigDecimal("92233720368547758070"),
true,
1.7976931348623157,
10,
21474836470L,
null,
"this is a simple string.")
)
}
}
test("Complex field and type inferring") {
withTempView("jsonTable") {
val jsonDF = spark.read.json(complexFieldAndType1)
val expectedSchema = StructType(
StructField("arrayOfArray1", ArrayType(ArrayType(StringType, true), true), true) ::
StructField("arrayOfArray2", ArrayType(ArrayType(DoubleType, true), true), true) ::
StructField("arrayOfBigInteger", ArrayType(DecimalType(21, 0), true), true) ::
StructField("arrayOfBoolean", ArrayType(BooleanType, true), true) ::
StructField("arrayOfDouble", ArrayType(DoubleType, true), true) ::
StructField("arrayOfInteger", ArrayType(LongType, true), true) ::
StructField("arrayOfLong", ArrayType(LongType, true), true) ::
StructField("arrayOfNull", ArrayType(StringType, true), true) ::
StructField("arrayOfString", ArrayType(StringType, true), true) ::
StructField("arrayOfStruct", ArrayType(
StructType(
StructField("field1", BooleanType, true) ::
StructField("field2", StringType, true) ::
StructField("field3", StringType, true) :: Nil), true), true) ::
StructField("struct", StructType(
StructField("field1", BooleanType, true) ::
StructField("field2", DecimalType(20, 0), true) :: Nil), true) ::
StructField("structWithArrayFields", StructType(
StructField("field1", ArrayType(LongType, true), true) ::
StructField("field2", ArrayType(StringType, true), true) :: Nil), true) :: Nil)
assert(expectedSchema === jsonDF.schema)
jsonDF.createOrReplaceTempView("jsonTable")
// Access elements of a primitive array.
checkAnswer(
sql("select arrayOfString[0], arrayOfString[1], arrayOfString[2] from jsonTable"),
Row("str1", "str2", null)
)
// Access an array of null values.
checkAnswer(
sql("select arrayOfNull from jsonTable"),
Row(Seq(null, null, null, null))
)
// Access elements of a BigInteger array (we use DecimalType internally).
checkAnswer(
sql("select arrayOfBigInteger[0], arrayOfBigInteger[1], arrayOfBigInteger[2] from " +
"jsonTable"),
Row(new java.math.BigDecimal("922337203685477580700"),
new java.math.BigDecimal("-922337203685477580800"), null)
)
// Access elements of an array of arrays.
checkAnswer(
sql("select arrayOfArray1[0], arrayOfArray1[1] from jsonTable"),
Row(Seq("1", "2", "3"), Seq("str1", "str2"))
)
// Access elements of an array of arrays.
checkAnswer(
sql("select arrayOfArray2[0], arrayOfArray2[1] from jsonTable"),
Row(Seq(1.0, 2.0, 3.0), Seq(1.1, 2.1, 3.1))
)
// Access elements of an array inside a filed with the type of ArrayType(ArrayType).
checkAnswer(
sql("select arrayOfArray1[1][1], arrayOfArray2[1][1] from jsonTable"),
Row("str2", 2.1)
)
// Access elements of an array of structs.
checkAnswer(
sql("select arrayOfStruct[0], arrayOfStruct[1], arrayOfStruct[2], arrayOfStruct[3] " +
"from jsonTable"),
Row(
Row(true, "str1", null),
Row(false, null, null),
Row(null, null, null),
null)
)
// Access a struct and fields inside of it.
checkAnswer(
sql("select struct, struct.field1, struct.field2 from jsonTable"),
Row(
Row(true, new java.math.BigDecimal("92233720368547758070")),
true,
new java.math.BigDecimal("92233720368547758070")) :: Nil
)
// Access an array field of a struct.
checkAnswer(
sql("select structWithArrayFields.field1, structWithArrayFields.field2 from jsonTable"),
Row(Seq(4, 5, 6), Seq("str1", "str2"))
)
// Access elements of an array field of a struct.
checkAnswer(
sql("select structWithArrayFields.field1[1], structWithArrayFields.field2[3] from " +
"jsonTable"),
Row(5, null)
)
}
}
test("GetField operation on complex data type") {
withTempView("jsonTable") {
val jsonDF = spark.read.json(complexFieldAndType1)
jsonDF.createOrReplaceTempView("jsonTable")
checkAnswer(
sql("select arrayOfStruct[0].field1, arrayOfStruct[0].field2 from jsonTable"),
Row(true, "str1")
)
// Getting all values of a specific field from an array of structs.
checkAnswer(
sql("select arrayOfStruct.field1, arrayOfStruct.field2 from jsonTable"),
Row(Seq(true, false, null), Seq("str1", null, null))
)
}
}
test("Type conflict in primitive field values") {
withTempView("jsonTable") {
val jsonDF = spark.read.json(primitiveFieldValueTypeConflict)
val expectedSchema = StructType(
StructField("num_bool", StringType, true) ::
StructField("num_num_1", LongType, true) ::
StructField("num_num_2", DoubleType, true) ::
StructField("num_num_3", DoubleType, true) ::
StructField("num_str", StringType, true) ::
StructField("str_bool", StringType, true) :: Nil)
assert(expectedSchema === jsonDF.schema)
jsonDF.createOrReplaceTempView("jsonTable")
checkAnswer(
sql("select * from jsonTable"),
Row("true", 11L, null, 1.1, "13.1", "str1") ::
Row("12", null, 21474836470.9, null, null, "true") ::
Row("false", 21474836470L, 92233720368547758070d, 100, "str1", "false") ::
Row(null, 21474836570L, 1.1, 21474836470L, "92233720368547758070", null) :: Nil
)
// Number and Boolean conflict: resolve the type as number in this query.
checkAnswer(
sql("select num_bool - 10 from jsonTable where num_bool > 11"),
Row(2)
)
// Widening to LongType
checkAnswer(
sql("select num_num_1 - 100 from jsonTable where num_num_1 > 11"),
Row(21474836370L) :: Row(21474836470L) :: Nil
)
checkAnswer(
sql("select num_num_1 - 100 from jsonTable where num_num_1 > 10"),
Row(-89) :: Row(21474836370L) :: Row(21474836470L) :: Nil
)
// Widening to DecimalType
checkAnswer(
sql("select num_num_2 + 1.3 from jsonTable where num_num_2 > 1.1"),
Row(21474836472.2) ::
Row(92233720368547758071.3) :: Nil
)
// Widening to Double
checkAnswer(
sql("select num_num_3 + 1.2 from jsonTable where num_num_3 > 1.1"),
Row(101.2) :: Row(21474836471.2) :: Nil
)
// Number and String conflict: resolve the type as number in this query.
checkAnswer(
sql("select num_str + 1.2 from jsonTable where num_str > 14d"),
Row(92233720368547758071.2)
)
// Number and String conflict: resolve the type as number in this query.
checkAnswer(
sql("select num_str + 1.2 from jsonTable where num_str >= 92233720368547758060"),
Row(new java.math.BigDecimal("92233720368547758071.2").doubleValue)
)
// String and Boolean conflict: resolve the type as string.
checkAnswer(
sql("select * from jsonTable where str_bool = 'str1'"),
Row("true", 11L, null, 1.1, "13.1", "str1")
)
}
}
test("Type conflict in complex field values") {
withTempView("jsonTable") {
val jsonDF = spark.read.json(complexFieldValueTypeConflict)
val expectedSchema = StructType(
StructField("array", ArrayType(LongType, true), true) ::
StructField("num_struct", StringType, true) ::
StructField("str_array", StringType, true) ::
StructField("struct", StructType(
StructField("field", StringType, true) :: Nil), true) ::
StructField("struct_array", StringType, true) :: Nil)
assert(expectedSchema === jsonDF.schema)
jsonDF.createOrReplaceTempView("jsonTable")
checkAnswer(
sql("select * from jsonTable"),
Row(Seq(), "11", "[1,2,3]", Row(null), "[]") ::
Row(null, """{"field":false}""", null, null, "{}") ::
Row(Seq(4, 5, 6), null, "str", Row(null), "[7,8,9]") ::
Row(Seq(7), "{}", """["str1","str2",33]""", Row("str"), """{"field":true}""") :: Nil
)
}
}
test("Type conflict in array elements") {
withTempView("jsonTable") {
val jsonDF = spark.read.json(arrayElementTypeConflict)
val expectedSchema = StructType(
StructField("array1", ArrayType(StringType, true), true) ::
StructField("array2", ArrayType(StructType(
StructField("field", LongType, true) :: Nil), true), true) ::
StructField("array3", ArrayType(StringType, true), true) :: Nil)
assert(expectedSchema === jsonDF.schema)
jsonDF.createOrReplaceTempView("jsonTable")
checkAnswer(
sql("select * from jsonTable"),
Row(Seq("1", "1.1", "true", null, "[]", "{}", "[2,3,4]",
"""{"field":"str"}"""), Seq(Row(214748364700L), Row(1)), null) ::
Row(null, null, Seq("""{"field":"str"}""", """{"field":1}""")) ::
Row(null, null, Seq("1", "2", "3")) :: Nil
)
// Treat an element as a number.
checkAnswer(
sql("select array1[0] + 1 from jsonTable where array1 is not null"),
Row(2)
)
}
}
test("Handling missing fields") {
withTempView("jsonTable") {
val jsonDF = spark.read.json(missingFields)
val expectedSchema = StructType(
StructField("a", BooleanType, true) ::
StructField("b", LongType, true) ::
StructField("c", ArrayType(LongType, true), true) ::
StructField("d", StructType(
StructField("field", BooleanType, true) :: Nil), true) ::
StructField("e", StringType, true) :: Nil)
assert(expectedSchema === jsonDF.schema)
jsonDF.createOrReplaceTempView("jsonTable")
}
}
test("Loading a JSON dataset from a text file") {
withTempView("jsonTable") {
val dir = Utils.createTempDir()
dir.delete()
val path = dir.getCanonicalPath
primitiveFieldAndType.map(record => record.replaceAll("\\n", " ")).write.text(path)
val jsonDF = spark.read.json(path)
val expectedSchema = StructType(
StructField("bigInteger", DecimalType(20, 0), true) ::
StructField("boolean", BooleanType, true) ::
StructField("double", DoubleType, true) ::
StructField("integer", LongType, true) ::
StructField("long", LongType, true) ::
StructField("null", StringType, true) ::
StructField("string", StringType, true) :: Nil)
assert(expectedSchema === jsonDF.schema)
jsonDF.createOrReplaceTempView("jsonTable")
checkAnswer(
sql("select * from jsonTable"),
Row(new java.math.BigDecimal("92233720368547758070"),
true,
1.7976931348623157,
10,
21474836470L,
null,
"this is a simple string.")
)
}
}
test("Loading a JSON dataset primitivesAsString returns schema with primitive types as strings") {
withTempView("jsonTable") {
val dir = Utils.createTempDir()
dir.delete()
val path = dir.getCanonicalPath
primitiveFieldAndType.map(record => record.replaceAll("\\n", " ")).write.text(path)
val jsonDF = spark.read.option("primitivesAsString", "true").json(path)
val expectedSchema = StructType(
StructField("bigInteger", StringType, true) ::
StructField("boolean", StringType, true) ::
StructField("double", StringType, true) ::
StructField("integer", StringType, true) ::
StructField("long", StringType, true) ::
StructField("null", StringType, true) ::
StructField("string", StringType, true) :: Nil)
assert(expectedSchema === jsonDF.schema)
jsonDF.createOrReplaceTempView("jsonTable")
checkAnswer(
sql("select * from jsonTable"),
Row("92233720368547758070",
"true",
"1.7976931348623157",
"10",
"21474836470",
null,
"this is a simple string.")
)
}
}
test("Loading a JSON dataset primitivesAsString returns complex fields as strings") {
withTempView("jsonTable") {
val jsonDF = spark.read.option("primitivesAsString", "true").json(complexFieldAndType1)
val expectedSchema = StructType(
StructField("arrayOfArray1", ArrayType(ArrayType(StringType, true), true), true) ::
StructField("arrayOfArray2", ArrayType(ArrayType(StringType, true), true), true) ::
StructField("arrayOfBigInteger", ArrayType(StringType, true), true) ::
StructField("arrayOfBoolean", ArrayType(StringType, true), true) ::
StructField("arrayOfDouble", ArrayType(StringType, true), true) ::
StructField("arrayOfInteger", ArrayType(StringType, true), true) ::
StructField("arrayOfLong", ArrayType(StringType, true), true) ::
StructField("arrayOfNull", ArrayType(StringType, true), true) ::
StructField("arrayOfString", ArrayType(StringType, true), true) ::
StructField("arrayOfStruct", ArrayType(
StructType(
StructField("field1", StringType, true) ::
StructField("field2", StringType, true) ::
StructField("field3", StringType, true) :: Nil), true), true) ::
StructField("struct", StructType(
StructField("field1", StringType, true) ::
StructField("field2", StringType, true) :: Nil), true) ::
StructField("structWithArrayFields", StructType(
StructField("field1", ArrayType(StringType, true), true) ::
StructField("field2", ArrayType(StringType, true), true) :: Nil), true) :: Nil)
assert(expectedSchema === jsonDF.schema)
jsonDF.createOrReplaceTempView("jsonTable")
// Access elements of a primitive array.
checkAnswer(
sql("select arrayOfString[0], arrayOfString[1], arrayOfString[2] from jsonTable"),
Row("str1", "str2", null)
)
// Access an array of null values.
checkAnswer(
sql("select arrayOfNull from jsonTable"),
Row(Seq(null, null, null, null))
)
// Access elements of a BigInteger array (we use DecimalType internally).
checkAnswer(
sql("select arrayOfBigInteger[0], arrayOfBigInteger[1], arrayOfBigInteger[2] from " +
"jsonTable"),
Row("922337203685477580700", "-922337203685477580800", null)
)
// Access elements of an array of arrays.
checkAnswer(
sql("select arrayOfArray1[0], arrayOfArray1[1] from jsonTable"),
Row(Seq("1", "2", "3"), Seq("str1", "str2"))
)
// Access elements of an array of arrays.
checkAnswer(
sql("select arrayOfArray2[0], arrayOfArray2[1] from jsonTable"),
Row(Seq("1", "2", "3"), Seq("1.1", "2.1", "3.1"))
)
// Access elements of an array inside a filed with the type of ArrayType(ArrayType).
checkAnswer(
sql("select arrayOfArray1[1][1], arrayOfArray2[1][1] from jsonTable"),
Row("str2", "2.1")
)
// Access elements of an array of structs.
checkAnswer(
sql("select arrayOfStruct[0], arrayOfStruct[1], arrayOfStruct[2], arrayOfStruct[3] " +
"from jsonTable"),
Row(
Row("true", "str1", null),
Row("false", null, null),
Row(null, null, null),
null)
)
// Access a struct and fields inside of it.
checkAnswer(
sql("select struct, struct.field1, struct.field2 from jsonTable"),
Row(
Row("true", "92233720368547758070"),
"true",
"92233720368547758070") :: Nil
)
// Access an array field of a struct.
checkAnswer(
sql("select structWithArrayFields.field1, structWithArrayFields.field2 from jsonTable"),
Row(Seq("4", "5", "6"), Seq("str1", "str2"))
)
// Access elements of an array field of a struct.
checkAnswer(
sql("select structWithArrayFields.field1[1], structWithArrayFields.field2[3] from " +
"jsonTable"),
Row("5", null)
)
}
}
test("Loading a JSON dataset prefersDecimal returns schema with float types as BigDecimal") {
withTempView("jsonTable") {
val jsonDF = spark.read.option("prefersDecimal", "true").json(primitiveFieldAndType)
val expectedSchema = StructType(
StructField("bigInteger", DecimalType(20, 0), true) ::
StructField("boolean", BooleanType, true) ::
StructField("double", DecimalType(17, 16), true) ::
StructField("integer", LongType, true) ::
StructField("long", LongType, true) ::
StructField("null", StringType, true) ::
StructField("string", StringType, true) :: Nil)
assert(expectedSchema === jsonDF.schema)
jsonDF.createOrReplaceTempView("jsonTable")
checkAnswer(
sql("select * from jsonTable"),
Row(BigDecimal("92233720368547758070"),
true,
BigDecimal("1.7976931348623157"),
10,
21474836470L,
null,
"this is a simple string.")
)
}
}
test("Find compatible types even if inferred DecimalType is not capable of other IntegralType") {
val mixedIntegerAndDoubleRecords = Seq(
"""{"a": 3, "b": 1.1}""",
s"""{"a": 3.1, "b": 0.${"0" * 38}1}""").toDS()
val jsonDF = spark.read
.option("prefersDecimal", "true")
.json(mixedIntegerAndDoubleRecords)
// The values in `a` field will be decimals as they fit in decimal. For `b` field,
// they will be doubles as `1.0E-39D` does not fit.
val expectedSchema = StructType(
StructField("a", DecimalType(21, 1), true) ::
StructField("b", DoubleType, true) :: Nil)
assert(expectedSchema === jsonDF.schema)
checkAnswer(
jsonDF,
Row(BigDecimal("3"), 1.1D) ::
Row(BigDecimal("3.1"), 1.0E-39D) :: Nil
)
}
test("Infer big integers correctly even when it does not fit in decimal") {
val jsonDF = spark.read
.json(bigIntegerRecords)
// The value in `a` field will be a double as it does not fit in decimal. For `b` field,
// it will be a decimal as `92233720368547758070`.
val expectedSchema = StructType(
StructField("a", DoubleType, true) ::
StructField("b", DecimalType(20, 0), true) :: Nil)
assert(expectedSchema === jsonDF.schema)
checkAnswer(jsonDF, Row(1.0E38D, BigDecimal("92233720368547758070")))
}
test("Infer floating-point values correctly even when it does not fit in decimal") {
val jsonDF = spark.read
.option("prefersDecimal", "true")
.json(floatingValueRecords)
// The value in `a` field will be a double as it does not fit in decimal. For `b` field,
// it will be a decimal as `0.01` by having a precision equal to the scale.
val expectedSchema = StructType(
StructField("a", DoubleType, true) ::
StructField("b", DecimalType(2, 2), true):: Nil)
assert(expectedSchema === jsonDF.schema)
checkAnswer(jsonDF, Row(1.0E-39D, BigDecimal("0.01")))
val mergedJsonDF = spark.read
.option("prefersDecimal", "true")
.json(floatingValueRecords.union(bigIntegerRecords))
val expectedMergedSchema = StructType(
StructField("a", DoubleType, true) ::
StructField("b", DecimalType(22, 2), true):: Nil)
assert(expectedMergedSchema === mergedJsonDF.schema)
checkAnswer(
mergedJsonDF,
Row(1.0E-39D, BigDecimal("0.01")) ::
Row(1.0E38D, BigDecimal("92233720368547758070")) :: Nil
)
}
test("Loading a JSON dataset from a text file with SQL") {
val dir = Utils.createTempDir()
dir.delete()
val path = dir.toURI.toString
primitiveFieldAndType.map(record => record.replaceAll("\\n", " ")).write.text(path)
sql(
s"""
|CREATE TEMPORARY VIEW jsonTableSQL
|USING org.apache.spark.sql.json
|OPTIONS (
| path '$path'
|)
""".stripMargin)
checkAnswer(
sql("select * from jsonTableSQL"),
Row(new java.math.BigDecimal("92233720368547758070"),
true,
1.7976931348623157,
10,
21474836470L,
null,
"this is a simple string.")
)
}
test("Applying schemas") {
withTempView("jsonTable1", "jsonTable2") {
val dir = Utils.createTempDir()
dir.delete()
val path = dir.getCanonicalPath
primitiveFieldAndType.map(record => record.replaceAll("\\n", " ")).write.text(path)
val schema = StructType(
StructField("bigInteger", DecimalType.SYSTEM_DEFAULT, true) ::
StructField("boolean", BooleanType, true) ::
StructField("double", DoubleType, true) ::
StructField("integer", IntegerType, true) ::
StructField("long", LongType, true) ::
StructField("null", StringType, true) ::
StructField("string", StringType, true) :: Nil)
val jsonDF1 = spark.read.schema(schema).json(path)
assert(schema === jsonDF1.schema)
jsonDF1.createOrReplaceTempView("jsonTable1")
checkAnswer(
sql("select * from jsonTable1"),
Row(new java.math.BigDecimal("92233720368547758070"),
true,
1.7976931348623157,
10,
21474836470L,
null,
"this is a simple string.")
)
val jsonDF2 = spark.read.schema(schema).json(primitiveFieldAndType)
assert(schema === jsonDF2.schema)
jsonDF2.createOrReplaceTempView("jsonTable2")
checkAnswer(
sql("select * from jsonTable2"),
Row(new java.math.BigDecimal("92233720368547758070"),
true,
1.7976931348623157,
10,
21474836470L,
null,
"this is a simple string.")
)
}
}
test("Applying schemas with MapType") {
withTempView("jsonWithSimpleMap", "jsonWithComplexMap") {
val schemaWithSimpleMap = StructType(
StructField("map", MapType(StringType, IntegerType, true), false) :: Nil)
val jsonWithSimpleMap = spark.read.schema(schemaWithSimpleMap).json(mapType1)
jsonWithSimpleMap.createOrReplaceTempView("jsonWithSimpleMap")
checkAnswer(
sql("select `map` from jsonWithSimpleMap"),
Row(Map("a" -> 1)) ::
Row(Map("b" -> 2)) ::
Row(Map("c" -> 3)) ::
Row(Map("c" -> 1, "d" -> 4)) ::
Row(Map("e" -> null)) :: Nil
)
withSQLConf(SQLConf.SUPPORT_QUOTED_REGEX_COLUMN_NAME.key -> "false") {
checkAnswer(
sql("select `map`['c'] from jsonWithSimpleMap"),
Row(null) ::
Row(null) ::
Row(3) ::
Row(1) ::
Row(null) :: Nil
)
}
val innerStruct = StructType(
StructField("field1", ArrayType(IntegerType, true), true) ::
StructField("field2", IntegerType, true) :: Nil)
val schemaWithComplexMap = StructType(
StructField("map", MapType(StringType, innerStruct, true), false) :: Nil)
val jsonWithComplexMap = spark.read.schema(schemaWithComplexMap).json(mapType2)
jsonWithComplexMap.createOrReplaceTempView("jsonWithComplexMap")
checkAnswer(
sql("select `map` from jsonWithComplexMap"),
Row(Map("a" -> Row(Seq(1, 2, 3, null), null))) ::
Row(Map("b" -> Row(null, 2))) ::
Row(Map("c" -> Row(Seq(), 4))) ::
Row(Map("c" -> Row(null, 3), "d" -> Row(Seq(null), null))) ::
Row(Map("e" -> null)) ::
Row(Map("f" -> Row(null, null))) :: Nil
)
withSQLConf(SQLConf.SUPPORT_QUOTED_REGEX_COLUMN_NAME.key -> "false") {
checkAnswer(
sql("select `map`['a'].field1, `map`['c'].field2 from jsonWithComplexMap"),
Row(Seq(1, 2, 3, null), null) ::
Row(null, null) ::
Row(null, 4) ::
Row(null, 3) ::
Row(null, null) ::
Row(null, null) :: Nil
)
}
}
}
test("SPARK-2096 Correctly parse dot notations") {
withTempView("jsonTable") {
val jsonDF = spark.read.json(complexFieldAndType2)
jsonDF.createOrReplaceTempView("jsonTable")
checkAnswer(
sql("select arrayOfStruct[0].field1, arrayOfStruct[0].field2 from jsonTable"),
Row(true, "str1")
)
checkAnswer(
sql(
"""
|select complexArrayOfStruct[0].field1[1].inner2[0],
|complexArrayOfStruct[1].field2[0][1]
|from jsonTable
""".stripMargin),
Row("str2", 6)
)
}
}
test("SPARK-3390 Complex arrays") {
withTempView("jsonTable") {
val jsonDF = spark.read.json(complexFieldAndType2)
jsonDF.createOrReplaceTempView("jsonTable")
checkAnswer(
sql(
"""
|select arrayOfArray1[0][0][0], arrayOfArray1[1][0][1], arrayOfArray1[1][1][0]
|from jsonTable
""".stripMargin),
Row(5, 7, 8)
)
checkAnswer(
sql(
"""
|select arrayOfArray2[0][0][0].inner1, arrayOfArray2[1][0],
|arrayOfArray2[1][1][1].inner2[0], arrayOfArray2[2][0][0].inner3[0][0].inner4
|from jsonTable
""".stripMargin),
Row("str1", Nil, "str4", 2)
)
}
}
test("SPARK-3308 Read top level JSON arrays") {
withTempView("jsonTable") {
val jsonDF = spark.read.json(jsonArray)
jsonDF.createOrReplaceTempView("jsonTable")
checkAnswer(
sql(
"""
|select a, b, c
|from jsonTable
""".stripMargin),
Row("str_a_1", null, null) ::
Row("str_a_2", null, null) ::
Row(null, "str_b_3", null) ::
Row("str_a_4", "str_b_4", "str_c_4") :: Nil
)
}
}
test("Corrupt records: FAILFAST mode") {
// `FAILFAST` mode should throw an exception for corrupt records.
val exceptionOne = intercept[SparkException] {
spark.read
.option("mode", "FAILFAST")
.json(corruptRecords)
}.getMessage
assert(exceptionOne.contains(
"Malformed records are detected in schema inference. Parse Mode: FAILFAST."))
val exceptionTwo = intercept[SparkException] {
spark.read
.option("mode", "FAILFAST")
.schema("a string")
.json(corruptRecords)
.collect()
}.getMessage
assert(exceptionTwo.contains(
"Malformed records are detected in record parsing. Parse Mode: FAILFAST."))
}
test("Corrupt records: DROPMALFORMED mode") {
val schemaOne = StructType(
StructField("a", StringType, true) ::
StructField("b", StringType, true) ::
StructField("c", StringType, true) :: Nil)
val schemaTwo = StructType(
StructField("a", StringType, true) :: Nil)
// `DROPMALFORMED` mode should skip corrupt records
val jsonDFOne = spark.read
.option("mode", "DROPMALFORMED")
.json(corruptRecords)
checkAnswer(
jsonDFOne,
Row("str_a_4", "str_b_4", "str_c_4") :: Nil
)
assert(jsonDFOne.schema === schemaOne)
val jsonDFTwo = spark.read
.option("mode", "DROPMALFORMED")
.schema(schemaTwo)
.json(corruptRecords)
checkAnswer(
jsonDFTwo,
Row("str_a_4") :: Nil)
assert(jsonDFTwo.schema === schemaTwo)
}
test("SPARK-19641: Additional corrupt records: DROPMALFORMED mode") {
val schema = new StructType().add("dummy", StringType)
// `DROPMALFORMED` mode should skip corrupt records
val jsonDF = spark.read
.option("mode", "DROPMALFORMED")
.json(additionalCorruptRecords)
checkAnswer(
jsonDF,
Row("test"))
assert(jsonDF.schema === schema)
}
test("Corrupt records: PERMISSIVE mode, without designated column for malformed records") {
val schema = StructType(
StructField("a", StringType, true) ::
StructField("b", StringType, true) ::
StructField("c", StringType, true) :: Nil)
val jsonDF = spark.read.schema(schema).json(corruptRecords)
checkAnswer(
jsonDF.select($"a", $"b", $"c"),
Seq(
// Corrupted records are replaced with null
Row(null, null, null),
Row(null, null, null),
Row(null, null, null),
Row("str_a_4", "str_b_4", "str_c_4"),
Row(null, null, null))
)
}
test("Corrupt records: PERMISSIVE mode, with designated column for malformed records") {
// Test if we can query corrupt records.
withSQLConf(SQLConf.COLUMN_NAME_OF_CORRUPT_RECORD.key -> "_unparsed") {
val jsonDF = spark.read.json(corruptRecords)
val schema = StructType(
StructField("_unparsed", StringType, true) ::
StructField("a", StringType, true) ::
StructField("b", StringType, true) ::
StructField("c", StringType, true) :: Nil)
assert(schema === jsonDF.schema)
// In HiveContext, backticks should be used to access columns starting with a underscore.
checkAnswer(
jsonDF.select($"a", $"b", $"c", $"_unparsed"),
Row(null, null, null, "{") ::
Row(null, null, null, """{"a":1, b:2}""") ::
Row(null, null, null, """{"a":{, b:3}""") ::
Row("str_a_4", "str_b_4", "str_c_4", null) ::
Row(null, null, null, "]") :: Nil
)
checkAnswer(
jsonDF.filter($"_unparsed".isNull).select($"a", $"b", $"c"),
Row("str_a_4", "str_b_4", "str_c_4")
)
checkAnswer(
jsonDF.filter($"_unparsed".isNotNull).select($"_unparsed"),
Row("{") ::
Row("""{"a":1, b:2}""") ::
Row("""{"a":{, b:3}""") ::
Row("]") :: Nil
)
}
}
test("SPARK-13953 Rename the corrupt record field via option") {
val jsonDF = spark.read
.option("columnNameOfCorruptRecord", "_malformed")
.json(corruptRecords)
val schema = StructType(
StructField("_malformed", StringType, true) ::
StructField("a", StringType, true) ::
StructField("b", StringType, true) ::
StructField("c", StringType, true) :: Nil)
assert(schema === jsonDF.schema)
checkAnswer(
jsonDF.selectExpr("a", "b", "c", "_malformed"),
Row(null, null, null, "{") ::
Row(null, null, null, """{"a":1, b:2}""") ::
Row(null, null, null, """{"a":{, b:3}""") ::
Row("str_a_4", "str_b_4", "str_c_4", null) ::
Row(null, null, null, "]") :: Nil
)
}
test("SPARK-4068: nulls in arrays") {
withTempView("jsonTable") {
val jsonDF = spark.read.json(nullsInArrays)
jsonDF.createOrReplaceTempView("jsonTable")
val schema = StructType(
StructField("field1",
ArrayType(ArrayType(ArrayType(ArrayType(StringType, true), true), true), true), true) ::
StructField("field2",
ArrayType(ArrayType(
StructType(StructField("Test", LongType, true) :: Nil), true), true), true) ::
StructField("field3",
ArrayType(ArrayType(
StructType(StructField("Test", StringType, true) :: Nil), true), true), true) ::
StructField("field4",
ArrayType(ArrayType(ArrayType(LongType, true), true), true), true) :: Nil)
assert(schema === jsonDF.schema)
checkAnswer(
sql(
"""
|SELECT field1, field2, field3, field4
|FROM jsonTable
""".stripMargin),
Row(Seq(Seq(null), Seq(Seq(Seq("Test")))), null, null, null) ::
Row(null, Seq(null, Seq(Row(1))), null, null) ::
Row(null, null, Seq(Seq(null), Seq(Row("2"))), null) ::
Row(null, null, null, Seq(Seq(null, Seq(1, 2, 3)))) :: Nil
)
}
}
test("SPARK-4228 DataFrame to JSON") {
withTempView("applySchema1", "applySchema2", "primitiveTable", "complexTable") {
val schema1 = StructType(
StructField("f1", IntegerType, false) ::
StructField("f2", StringType, false) ::
StructField("f3", BooleanType, false) ::
StructField("f4", ArrayType(StringType), nullable = true) ::
StructField("f5", IntegerType, true) :: Nil)
val rowRDD1 = unparsedStrings.map { r =>
val values = r.split(",").map(_.trim)
val v5 = try values(3).toInt catch {
case _: NumberFormatException => null
}
Row(values(0).toInt, values(1), values(2).toBoolean, r.split(",").toList, v5)
}
val df1 = spark.createDataFrame(rowRDD1, schema1)
df1.createOrReplaceTempView("applySchema1")
val df2 = df1.toDF
val result = df2.toJSON.collect()
// scalastyle:off
assert(result(0) === "{\\"f1\\":1,\\"f2\\":\\"A1\\",\\"f3\\":true,\\"f4\\":[\\"1\\",\\" A1\\",\\" true\\",\\" null\\"]}")
assert(result(3) === "{\\"f1\\":4,\\"f2\\":\\"D4\\",\\"f3\\":true,\\"f4\\":[\\"4\\",\\" D4\\",\\" true\\",\\" 2147483644\\"],\\"f5\\":2147483644}")
// scalastyle:on
val schema2 = StructType(
StructField("f1", StructType(
StructField("f11", IntegerType, false) ::
StructField("f12", BooleanType, false) :: Nil), false) ::
StructField("f2", MapType(StringType, IntegerType, true), false) :: Nil)
val rowRDD2 = unparsedStrings.map { r =>
val values = r.split(",").map(_.trim)
val v4 = try values(3).toInt catch {
case _: NumberFormatException => null
}
Row(Row(values(0).toInt, values(2).toBoolean), Map(values(1) -> v4))
}
val df3 = spark.createDataFrame(rowRDD2, schema2)
df3.createOrReplaceTempView("applySchema2")
val df4 = df3.toDF
val result2 = df4.toJSON.collect()
assert(result2(1) === "{\\"f1\\":{\\"f11\\":2,\\"f12\\":false},\\"f2\\":{\\"B2\\":null}}")
assert(result2(3) === "{\\"f1\\":{\\"f11\\":4,\\"f12\\":true},\\"f2\\":{\\"D4\\":2147483644}}")
val jsonDF = spark.read.json(primitiveFieldAndType)
val primTable = spark.read.json(jsonDF.toJSON)
primTable.createOrReplaceTempView("primitiveTable")
checkAnswer(
sql("select * from primitiveTable"),
Row(new java.math.BigDecimal("92233720368547758070"),
true,
1.7976931348623157,
10,
21474836470L,
"this is a simple string.")
)
val complexJsonDF = spark.read.json(complexFieldAndType1)
val compTable = spark.read.json(complexJsonDF.toJSON)
compTable.createOrReplaceTempView("complexTable")
// Access elements of a primitive array.
checkAnswer(
sql("select arrayOfString[0], arrayOfString[1], arrayOfString[2] from complexTable"),
Row("str1", "str2", null)
)
// Access an array of null values.
checkAnswer(
sql("select arrayOfNull from complexTable"),
Row(Seq(null, null, null, null))
)
// Access elements of a BigInteger array (we use DecimalType internally).
checkAnswer(
sql("select arrayOfBigInteger[0], arrayOfBigInteger[1], arrayOfBigInteger[2] " +
" from complexTable"),
Row(new java.math.BigDecimal("922337203685477580700"),
new java.math.BigDecimal("-922337203685477580800"), null)
)
// Access elements of an array of arrays.
checkAnswer(
sql("select arrayOfArray1[0], arrayOfArray1[1] from complexTable"),
Row(Seq("1", "2", "3"), Seq("str1", "str2"))
)
// Access elements of an array of arrays.
checkAnswer(
sql("select arrayOfArray2[0], arrayOfArray2[1] from complexTable"),
Row(Seq(1.0, 2.0, 3.0), Seq(1.1, 2.1, 3.1))
)
// Access elements of an array inside a filed with the type of ArrayType(ArrayType).
checkAnswer(
sql("select arrayOfArray1[1][1], arrayOfArray2[1][1] from complexTable"),
Row("str2", 2.1)
)
// Access a struct and fields inside of it.
checkAnswer(
sql("select struct, struct.field1, struct.field2 from complexTable"),
Row(
Row(true, new java.math.BigDecimal("92233720368547758070")),
true,
new java.math.BigDecimal("92233720368547758070")) :: Nil
)
// Access an array field of a struct.
checkAnswer(
sql("select structWithArrayFields.field1, structWithArrayFields.field2 from complexTable"),
Row(Seq(4, 5, 6), Seq("str1", "str2"))
)
// Access elements of an array field of a struct.
checkAnswer(
sql("select structWithArrayFields.field1[1], structWithArrayFields.field2[3] " +
"from complexTable"),
Row(5, null)
)
}
}
test("Dataset toJSON doesn't construct rdd") {
val containsRDD = spark.emptyDataFrame.toJSON.queryExecution.logical.find {
case ExternalRDD(_, _) => true
case _ => false
}
assert(containsRDD.isEmpty, "Expected logical plan of toJSON to not contain an RDD")
}
test("JSONRelation equality test") {
withTempPath(dir => {
val path = dir.getCanonicalFile.toURI.toString
sparkContext.parallelize(1 to 100)
.map(i => s"""{"a": 1, "b": "str$i"}""").saveAsTextFile(path)
val d1 = DataSource(
spark,
userSpecifiedSchema = None,
partitionColumns = Array.empty[String],
bucketSpec = None,
className = classOf[JsonFileFormat].getCanonicalName,
options = Map("path" -> path)).resolveRelation()
val d2 = DataSource(
spark,
userSpecifiedSchema = None,
partitionColumns = Array.empty[String],
bucketSpec = None,
className = classOf[JsonFileFormat].getCanonicalName,
options = Map("path" -> path)).resolveRelation()
assert(d1 === d2)
})
}
test("SPARK-6245 JsonInferSchema.infer on empty RDD") {
// This is really a test that it doesn't throw an exception
val options = new JSONOptions(Map.empty[String, String], "UTC")
val emptySchema = new JsonInferSchema(options).infer(
empty.rdd,
CreateJacksonParser.string)
assert(StructType(Seq()) === emptySchema)
}
test("SPARK-7565 MapType in JsonRDD") {
withSQLConf(SQLConf.COLUMN_NAME_OF_CORRUPT_RECORD.key -> "_unparsed") {
withTempDir { dir =>
val schemaWithSimpleMap = StructType(
StructField("map", MapType(StringType, IntegerType, true), false) :: Nil)
val df = spark.read.schema(schemaWithSimpleMap).json(mapType1)
val path = dir.getAbsolutePath
df.write.mode("overwrite").parquet(path)
// order of MapType is not defined
assert(spark.read.parquet(path).count() == 5)
val df2 = spark.read.json(corruptRecords)
df2.write.mode("overwrite").parquet(path)
checkAnswer(spark.read.parquet(path), df2.collect())
}
}
}
test("SPARK-8093 Erase empty structs") {
val options = new JSONOptions(Map.empty[String, String], "UTC")
val emptySchema = new JsonInferSchema(options).infer(
emptyRecords.rdd,
CreateJacksonParser.string)
assert(StructType(Seq()) === emptySchema)
}
test("JSON with Partition") {
def makePartition(rdd: RDD[String], parent: File, partName: String, partValue: Any): File = {
val p = new File(parent, s"$partName=${partValue.toString}")
rdd.saveAsTextFile(p.getCanonicalPath)
p
}
withTempPath(root => {
withTempView("test_myjson_with_part") {
val d1 = new File(root, "d1=1")
// root/dt=1/col1=abc
val p1_col1 = makePartition(
sparkContext.parallelize(2 to 5).map(i => s"""{"a": 1, "b": "str$i"}"""),
d1,
"col1",
"abc")
// root/dt=1/col1=abd
val p2 = makePartition(
sparkContext.parallelize(6 to 10).map(i => s"""{"a": 1, "b": "str$i"}"""),
d1,
"col1",
"abd")
spark.read.json(root.getAbsolutePath).createOrReplaceTempView("test_myjson_with_part")
checkAnswer(sql(
"SELECT count(a) FROM test_myjson_with_part where d1 = 1 and col1='abc'"), Row(4))
checkAnswer(sql(
"SELECT count(a) FROM test_myjson_with_part where d1 = 1 and col1='abd'"), Row(5))
checkAnswer(sql(
"SELECT count(a) FROM test_myjson_with_part where d1 = 1"), Row(9))
}
})
}
test("backward compatibility") {
// This test we make sure our JSON support can read JSON data generated by previous version
// of Spark generated through toJSON method and JSON data source.
// The data is generated by the following program.
// Here are a few notes:
// - Spark 1.5.0 cannot save timestamp data. So, we manually added timestamp field (col13)
// in the JSON object.
// - For Spark before 1.5.1, we do not generate UDTs. So, we manually added the UDT value to
// JSON objects generated by those Spark versions (col17).
// - If the type is NullType, we do not write data out.
// Create the schema.
val struct =
StructType(
StructField("f1", FloatType, true) ::
StructField("f2", ArrayType(BooleanType), true) :: Nil)
val dataTypes =
Seq(
StringType, BinaryType, NullType, BooleanType,
ByteType, ShortType, IntegerType, LongType,
FloatType, DoubleType, DecimalType(25, 5), DecimalType(6, 5),
DateType, TimestampType,
ArrayType(IntegerType), MapType(StringType, LongType), struct,
new MyDenseVectorUDT())
val fields = dataTypes.zipWithIndex.map { case (dataType, index) =>
StructField(s"col$index", dataType, nullable = true)
}
val schema = StructType(fields)
val constantValues =
Seq(
"a string in binary".getBytes(StandardCharsets.UTF_8),
null,
true,
1.toByte,
2.toShort,
3,
Long.MaxValue,
0.25.toFloat,
0.75,
new java.math.BigDecimal(s"1234.23456"),
new java.math.BigDecimal(s"1.23456"),
java.sql.Date.valueOf("2015-01-01"),
java.sql.Timestamp.valueOf("2015-01-01 23:50:59.123"),
Seq(2, 3, 4),
Map("a string" -> 2000L),
Row(4.75.toFloat, Seq(false, true)),
new MyDenseVector(Array(0.25, 2.25, 4.25)))
val data =
Row.fromSeq(Seq("Spark " + spark.sparkContext.version) ++ constantValues) :: Nil
// Data generated by previous versions.
// scalastyle:off
val existingJSONData =
"""{"col0":"Spark 1.2.2","col1":"YSBzdHJpbmcgaW4gYmluYXJ5","col3":true,"col4":1,"col5":2,"col6":3,"col7":9223372036854775807,"col8":0.25,"col9":0.75,"col10":1234.23456,"col11":1.23456,"col12":"2015-01-01","col13":"2015-01-01 23:50:59.123","col14":[2,3,4],"col15":{"a string":2000},"col16":{"f1":4.75,"f2":[false,true]},"col17":[0.25,2.25,4.25]}""" ::
"""{"col0":"Spark 1.3.1","col1":"YSBzdHJpbmcgaW4gYmluYXJ5","col3":true,"col4":1,"col5":2,"col6":3,"col7":9223372036854775807,"col8":0.25,"col9":0.75,"col10":1234.23456,"col11":1.23456,"col12":"2015-01-01","col13":"2015-01-01 23:50:59.123","col14":[2,3,4],"col15":{"a string":2000},"col16":{"f1":4.75,"f2":[false,true]},"col17":[0.25,2.25,4.25]}""" ::
"""{"col0":"Spark 1.3.1","col1":"YSBzdHJpbmcgaW4gYmluYXJ5","col3":true,"col4":1,"col5":2,"col6":3,"col7":9223372036854775807,"col8":0.25,"col9":0.75,"col10":1234.23456,"col11":1.23456,"col12":"2015-01-01","col13":"2015-01-01 23:50:59.123","col14":[2,3,4],"col15":{"a string":2000},"col16":{"f1":4.75,"f2":[false,true]},"col17":[0.25,2.25,4.25]}""" ::
"""{"col0":"Spark 1.4.1","col1":"YSBzdHJpbmcgaW4gYmluYXJ5","col3":true,"col4":1,"col5":2,"col6":3,"col7":9223372036854775807,"col8":0.25,"col9":0.75,"col10":1234.23456,"col11":1.23456,"col12":"2015-01-01","col13":"2015-01-01 23:50:59.123","col14":[2,3,4],"col15":{"a string":2000},"col16":{"f1":4.75,"f2":[false,true]},"col17":[0.25,2.25,4.25]}""" ::
"""{"col0":"Spark 1.4.1","col1":"YSBzdHJpbmcgaW4gYmluYXJ5","col3":true,"col4":1,"col5":2,"col6":3,"col7":9223372036854775807,"col8":0.25,"col9":0.75,"col10":1234.23456,"col11":1.23456,"col12":"2015-01-01","col13":"2015-01-01 23:50:59.123","col14":[2,3,4],"col15":{"a string":2000},"col16":{"f1":4.75,"f2":[false,true]},"col17":[0.25,2.25,4.25]}""" ::
"""{"col0":"Spark 1.5.0","col1":"YSBzdHJpbmcgaW4gYmluYXJ5","col3":true,"col4":1,"col5":2,"col6":3,"col7":9223372036854775807,"col8":0.25,"col9":0.75,"col10":1234.23456,"col11":1.23456,"col12":"2015-01-01","col13":"2015-01-01 23:50:59.123","col14":[2,3,4],"col15":{"a string":2000},"col16":{"f1":4.75,"f2":[false,true]},"col17":[0.25,2.25,4.25]}""" ::
"""{"col0":"Spark 1.5.0","col1":"YSBzdHJpbmcgaW4gYmluYXJ5","col3":true,"col4":1,"col5":2,"col6":3,"col7":9223372036854775807,"col8":0.25,"col9":0.75,"col10":1234.23456,"col11":1.23456,"col12":"16436","col13":"2015-01-01 23:50:59.123","col14":[2,3,4],"col15":{"a string":2000},"col16":{"f1":4.75,"f2":[false,true]},"col17":[0.25,2.25,4.25]}""" :: Nil
// scalastyle:on
// Generate data for the current version.
val df = spark.createDataFrame(spark.sparkContext.parallelize(data, 1), schema)
withTempPath { path =>
df.write.format("json").mode("overwrite").save(path.getCanonicalPath)
// df.toJSON will convert internal rows to external rows first and then generate
// JSON objects. While, df.write.format("json") will write internal rows directly.
val allJSON =
existingJSONData ++
df.toJSON.collect() ++
sparkContext.textFile(path.getCanonicalPath).collect()
Utils.deleteRecursively(path)
sparkContext.parallelize(allJSON, 1).saveAsTextFile(path.getCanonicalPath)
// Read data back with the schema specified.
val col0Values =
Seq(
"Spark 1.2.2",
"Spark 1.3.1",
"Spark 1.3.1",
"Spark 1.4.1",
"Spark 1.4.1",
"Spark 1.5.0",
"Spark 1.5.0",
"Spark " + spark.sparkContext.version,
"Spark " + spark.sparkContext.version)
val expectedResult = col0Values.map { v =>
Row.fromSeq(Seq(v) ++ constantValues)
}
checkAnswer(
spark.read.format("json").schema(schema).load(path.getCanonicalPath),
expectedResult
)
}
}
test("SPARK-11544 test pathfilter") {
withTempPath { dir =>
val path = dir.getCanonicalPath
val df = spark.range(2)
df.write.json(path + "/p=1")
df.write.json(path + "/p=2")
assert(spark.read.json(path).count() === 4)
val extraOptions = Map(
"mapred.input.pathFilter.class" -> classOf[TestFileFilter].getName,
"mapreduce.input.pathFilter.class" -> classOf[TestFileFilter].getName
)
assert(spark.read.options(extraOptions).json(path).count() === 2)
withClue("SPARK-32621: 'path' option can cause issues while inferring schema") {
// During infer, "path" option is used again on top of the paths that have already been
// listed. When a partition is removed by TestFileFilter, this will cause a conflict while
// inferring partitions because the original path in the "path" option will list the
// partition directory that has been removed.
assert(
spark.read.options(extraOptions).format("json").option("path", path).load.count() === 2)
}
}
}
test("SPARK-12057 additional corrupt records do not throw exceptions") {
// Test if we can query corrupt records.
withSQLConf(SQLConf.COLUMN_NAME_OF_CORRUPT_RECORD.key -> "_unparsed") {
withTempView("jsonTable") {
val schema = StructType(
StructField("_unparsed", StringType, true) ::
StructField("dummy", StringType, true) :: Nil)
{
// We need to make sure we can infer the schema.
val jsonDF = spark.read.json(additionalCorruptRecords)
assert(jsonDF.schema === schema)
}
{
val jsonDF = spark.read.schema(schema).json(additionalCorruptRecords)
jsonDF.createOrReplaceTempView("jsonTable")
// In HiveContext, backticks should be used to access columns starting with a underscore.
checkAnswer(
sql(
"""
|SELECT dummy, _unparsed
|FROM jsonTable
""".stripMargin),
Row("test", null) ::
Row(null, """[1,2,3]""") ::
Row(null, """":"test", "a":1}""") ::
Row(null, """42""") ::
Row(null, """ ","ian":"test"}""") :: Nil
)
}
}
}
}
test("Parse JSON rows having an array type and a struct type in the same field.") {
withTempDir { dir =>
val dir = Utils.createTempDir()
dir.delete()
val path = dir.getCanonicalPath
arrayAndStructRecords.map(record => record.replaceAll("\\n", " ")).write.text(path)
val schema =
StructType(
StructField("a", StructType(
StructField("b", StringType) :: Nil
)) :: Nil)
val jsonDF = spark.read.schema(schema).json(path)
assert(jsonDF.count() == 2)
}
}
test("SPARK-12872 Support to specify the option for compression codec") {
withTempDir { dir =>
val dir = Utils.createTempDir()
dir.delete()
val path = dir.getCanonicalPath
primitiveFieldAndType.map(record => record.replaceAll("\\n", " ")).write.text(path)
val jsonDF = spark.read.json(path)
val jsonDir = new File(dir, "json").getCanonicalPath
jsonDF.coalesce(1).write
.format("json")
.option("compression", "gZiP")
.save(jsonDir)
val compressedFiles = new File(jsonDir).listFiles()
assert(compressedFiles.exists(_.getName.endsWith(".json.gz")))
val jsonCopy = spark.read
.format("json")
.load(jsonDir)
assert(jsonCopy.count == jsonDF.count)
val jsonCopySome = jsonCopy.selectExpr("string", "long", "boolean")
val jsonDFSome = jsonDF.selectExpr("string", "long", "boolean")
checkAnswer(jsonCopySome, jsonDFSome)
}
}
test("SPARK-13543 Write the output as uncompressed via option()") {
val extraOptions = Map[String, String](
"mapreduce.output.fileoutputformat.compress" -> "true",
"mapreduce.output.fileoutputformat.compress.type" -> CompressionType.BLOCK.toString,
"mapreduce.output.fileoutputformat.compress.codec" -> classOf[GzipCodec].getName,
"mapreduce.map.output.compress" -> "true",
"mapreduce.map.output.compress.codec" -> classOf[GzipCodec].getName
)
withTempDir { dir =>
val dir = Utils.createTempDir()
dir.delete()
val path = dir.getCanonicalPath
primitiveFieldAndType.map(record => record.replaceAll("\\n", " ")).write.text(path)
val jsonDF = spark.read.json(path)
val jsonDir = new File(dir, "json").getCanonicalPath
jsonDF.coalesce(1).write
.format("json")
.option("compression", "none")
.options(extraOptions)
.save(jsonDir)
val compressedFiles = new File(jsonDir).listFiles()
assert(compressedFiles.exists(!_.getName.endsWith(".json.gz")))
val jsonCopy = spark.read
.format("json")
.options(extraOptions)
.load(jsonDir)
assert(jsonCopy.count == jsonDF.count)
val jsonCopySome = jsonCopy.selectExpr("string", "long", "boolean")
val jsonDFSome = jsonDF.selectExpr("string", "long", "boolean")
checkAnswer(jsonCopySome, jsonDFSome)
}
}
test("Casting long as timestamp") {
withTempView("jsonTable") {
val schema = (new StructType).add("ts", TimestampType)
val jsonDF = spark.read.schema(schema).json(timestampAsLong)
jsonDF.createOrReplaceTempView("jsonTable")
checkAnswer(
sql("select ts from jsonTable"),
Row(java.sql.Timestamp.valueOf("2016-01-02 03:04:05"))
)
}
}
test("wide nested json table") {
val nested = (1 to 100).map { i =>
s"""
|"c$i": $i
""".stripMargin
}.mkString(", ")
val json = s"""
|{"a": [{$nested}], "b": [{$nested}]}
""".stripMargin
val df = spark.read.json(Seq(json).toDS())
assert(df.schema.size === 2)
df.collect()
}
test("Write dates correctly with dateFormat option") {
val customSchema = new StructType(Array(StructField("date", DateType, true)))
withTempDir { dir =>
// With dateFormat option.
val datesWithFormatPath = s"${dir.getCanonicalPath}/datesWithFormat.json"
val datesWithFormat = spark.read
.schema(customSchema)
.option("dateFormat", "dd/MM/yyyy HH:mm")
.json(datesRecords)
datesWithFormat.write
.format("json")
.option("dateFormat", "yyyy/MM/dd")
.save(datesWithFormatPath)
// This will load back the dates as string.
val stringSchema = StructType(StructField("date", StringType, true) :: Nil)
val stringDatesWithFormat = spark.read
.schema(stringSchema)
.json(datesWithFormatPath)
val expectedStringDatesWithFormat = Seq(
Row("2015/08/26"),
Row("2014/10/27"),
Row("2016/01/28"))
checkAnswer(stringDatesWithFormat, expectedStringDatesWithFormat)
}
}
test("Write timestamps correctly with timestampFormat option") {
val customSchema = new StructType(Array(StructField("date", TimestampType, true)))
withTempDir { dir =>
// With dateFormat option.
val timestampsWithFormatPath = s"${dir.getCanonicalPath}/timestampsWithFormat.json"
val timestampsWithFormat = spark.read
.schema(customSchema)
.option("timestampFormat", "dd/MM/yyyy HH:mm")
.json(datesRecords)
timestampsWithFormat.write
.format("json")
.option("timestampFormat", "yyyy/MM/dd HH:mm")
.save(timestampsWithFormatPath)
// This will load back the timestamps as string.
val stringSchema = StructType(StructField("date", StringType, true) :: Nil)
val stringTimestampsWithFormat = spark.read
.schema(stringSchema)
.json(timestampsWithFormatPath)
val expectedStringDatesWithFormat = Seq(
Row("2015/08/26 18:00"),
Row("2014/10/27 18:30"),
Row("2016/01/28 20:00"))
checkAnswer(stringTimestampsWithFormat, expectedStringDatesWithFormat)
}
}
test("Write timestamps correctly with timestampFormat option and timeZone option") {
val customSchema = new StructType(Array(StructField("date", TimestampType, true)))
withTempDir { dir =>
// With dateFormat option and timeZone option.
val timestampsWithFormatPath = s"${dir.getCanonicalPath}/timestampsWithFormat.json"
val timestampsWithFormat = spark.read
.schema(customSchema)
.option("timestampFormat", "dd/MM/yyyy HH:mm")
.json(datesRecords)
timestampsWithFormat.write
.format("json")
.option("timestampFormat", "yyyy/MM/dd HH:mm")
.option(DateTimeUtils.TIMEZONE_OPTION, "UTC")
.save(timestampsWithFormatPath)
// This will load back the timestamps as string.
val stringSchema = StructType(StructField("date", StringType, true) :: Nil)
val stringTimestampsWithFormat = spark.read
.schema(stringSchema)
.json(timestampsWithFormatPath)
val expectedStringDatesWithFormat = Seq(
Row("2015/08/27 01:00"),
Row("2014/10/28 01:30"),
Row("2016/01/29 04:00"))
checkAnswer(stringTimestampsWithFormat, expectedStringDatesWithFormat)
val readBack = spark.read
.schema(customSchema)
.option("timestampFormat", "yyyy/MM/dd HH:mm")
.option(DateTimeUtils.TIMEZONE_OPTION, "UTC")
.json(timestampsWithFormatPath)
checkAnswer(readBack, timestampsWithFormat)
}
}
test("SPARK-18433: Improve DataSource option keys to be more case-insensitive") {
val records = Seq("""{"a": 3, "b": 1.1}""", """{"a": 3.1, "b": 0.000001}""").toDS()
val schema = StructType(
StructField("a", DecimalType(21, 1), true) ::
StructField("b", DecimalType(7, 6), true) :: Nil)
val df1 = spark.read.option("prefersDecimal", "true").json(records)
assert(df1.schema == schema)
val df2 = spark.read.option("PREfersdecimaL", "true").json(records)
assert(df2.schema == schema)
}
test("SPARK-18352: Parse normal multi-line JSON files (compressed)") {
withTempPath { dir =>
val path = dir.getCanonicalPath
primitiveFieldAndType
.toDF("value")
.write
.option("compression", "GzIp")
.text(path)
assert(new File(path).listFiles().exists(_.getName.endsWith(".gz")))
val jsonDF = spark.read.option("multiLine", true).json(path)
val jsonDir = new File(dir, "json").getCanonicalPath
jsonDF.coalesce(1).write
.option("compression", "gZiP")
.json(jsonDir)
assert(new File(jsonDir).listFiles().exists(_.getName.endsWith(".json.gz")))
val originalData = spark.read.json(primitiveFieldAndType)
checkAnswer(jsonDF, originalData)
checkAnswer(spark.read.schema(originalData.schema).json(jsonDir), originalData)
}
}
test("SPARK-18352: Parse normal multi-line JSON files (uncompressed)") {
withTempPath { dir =>
val path = dir.getCanonicalPath
primitiveFieldAndType
.toDF("value")
.write
.text(path)
val jsonDF = spark.read.option("multiLine", true).json(path)
val jsonDir = new File(dir, "json").getCanonicalPath
jsonDF.coalesce(1).write.json(jsonDir)
val compressedFiles = new File(jsonDir).listFiles()
assert(compressedFiles.exists(_.getName.endsWith(".json")))
val originalData = spark.read.json(primitiveFieldAndType)
checkAnswer(jsonDF, originalData)
checkAnswer(spark.read.schema(originalData.schema).json(jsonDir), originalData)
}
}
test("SPARK-18352: Expect one JSON document per file") {
// the json parser terminates as soon as it sees a matching END_OBJECT or END_ARRAY token.
// this might not be the optimal behavior but this test verifies that only the first value
// is parsed and the rest are discarded.
// alternatively the parser could continue parsing following objects, which may further reduce
// allocations by skipping the line reader entirely
withTempPath { dir =>
val path = dir.getCanonicalPath
spark
.createDataFrame(Seq(Tuple1("{}{invalid}")))
.coalesce(1)
.write
.text(path)
val jsonDF = spark.read.option("multiLine", true).json(path)
// no corrupt record column should be created
assert(jsonDF.schema === StructType(Seq()))
// only the first object should be read
assert(jsonDF.count() === 1)
}
}
test("SPARK-18352: Handle multi-line corrupt documents (PERMISSIVE)") {
withTempPath { dir =>
val path = dir.getCanonicalPath
val corruptRecordCount = additionalCorruptRecords.count().toInt
assert(corruptRecordCount === 5)
additionalCorruptRecords
.toDF("value")
// this is the minimum partition count that avoids hash collisions
.repartition(corruptRecordCount * 4, F.hash($"value"))
.write
.text(path)
val jsonDF = spark.read.option("multiLine", true).option("mode", "PERMISSIVE").json(path)
assert(jsonDF.count() === corruptRecordCount)
assert(jsonDF.schema === new StructType()
.add("_corrupt_record", StringType)
.add("dummy", StringType))
val counts = jsonDF
.join(
additionalCorruptRecords.toDF("value"),
F.regexp_replace($"_corrupt_record", "(^\\\\s+|\\\\s+$)", "") === F.trim($"value"),
"outer")
.agg(
F.count($"dummy").as("valid"),
F.count($"_corrupt_record").as("corrupt"),
F.count("*").as("count"))
checkAnswer(counts, Row(1, 4, 6))
}
}
test("SPARK-19641: Handle multi-line corrupt documents (DROPMALFORMED)") {
withTempPath { dir =>
val path = dir.getCanonicalPath
val corruptRecordCount = additionalCorruptRecords.count().toInt
assert(corruptRecordCount === 5)
additionalCorruptRecords
.toDF("value")
// this is the minimum partition count that avoids hash collisions
.repartition(corruptRecordCount * 4, F.hash($"value"))
.write
.text(path)
val jsonDF = spark.read.option("multiLine", true).option("mode", "DROPMALFORMED").json(path)
checkAnswer(jsonDF, Seq(Row("test")))
}
}
test("SPARK-18352: Handle multi-line corrupt documents (FAILFAST)") {
withTempPath { dir =>
val path = dir.getCanonicalPath
val corruptRecordCount = additionalCorruptRecords.count().toInt
assert(corruptRecordCount === 5)
additionalCorruptRecords
.toDF("value")
// this is the minimum partition count that avoids hash collisions
.repartition(corruptRecordCount * 4, F.hash($"value"))
.write
.text(path)
val schema = new StructType().add("dummy", StringType)
// `FAILFAST` mode should throw an exception for corrupt records.
val exceptionOne = intercept[SparkException] {
spark.read
.option("multiLine", true)
.option("mode", "FAILFAST")
.json(path)
}
assert(exceptionOne.getMessage.contains("Malformed records are detected in schema " +
"inference. Parse Mode: FAILFAST."))
val exceptionTwo = intercept[SparkException] {
spark.read
.option("multiLine", true)
.option("mode", "FAILFAST")
.schema(schema)
.json(path)
.collect()
}
assert(exceptionTwo.getMessage.contains("Malformed records are detected in record " +
"parsing. Parse Mode: FAILFAST."))
}
}
test("Throw an exception if a `columnNameOfCorruptRecord` field violates requirements") {
val columnNameOfCorruptRecord = "_unparsed"
val schema = StructType(
StructField(columnNameOfCorruptRecord, IntegerType, true) ::
StructField("a", StringType, true) ::
StructField("b", StringType, true) ::
StructField("c", StringType, true) :: Nil)
val errMsg = intercept[AnalysisException] {
spark.read
.option("mode", "Permissive")
.option("columnNameOfCorruptRecord", columnNameOfCorruptRecord)
.schema(schema)
.json(corruptRecords)
}.getMessage
assert(errMsg.startsWith("The field for corrupt records must be string type and nullable"))
// We use `PERMISSIVE` mode by default if invalid string is given.
withTempPath { dir =>
val path = dir.getCanonicalPath
corruptRecords.toDF("value").write.text(path)
val errMsg = intercept[AnalysisException] {
spark.read
.option("mode", "permm")
.option("columnNameOfCorruptRecord", columnNameOfCorruptRecord)
.schema(schema)
.json(path)
.collect
}.getMessage
assert(errMsg.startsWith("The field for corrupt records must be string type and nullable"))
}
}
test("SPARK-18772: Parse special floats correctly") {
val jsons = Seq(
"""{"a": "NaN"}""",
"""{"a": "Infinity"}""",
"""{"a": "-Infinity"}""")
// positive cases
val checks: Seq[Double => Boolean] = Seq(
_.isNaN,
_.isPosInfinity,
_.isNegInfinity)
Seq(FloatType, DoubleType).foreach { dt =>
jsons.zip(checks).foreach { case (json, check) =>
val ds = spark.read
.schema(StructType(Seq(StructField("a", dt))))
.json(Seq(json).toDS())
.select($"a".cast(DoubleType)).as[Double]
assert(check(ds.first()))
}
}
// negative cases
Seq(FloatType, DoubleType).foreach { dt =>
val lowerCasedJsons = jsons.map(_.toLowerCase(Locale.ROOT))
// The special floats are case-sensitive so these cases below throw exceptions.
lowerCasedJsons.foreach { lowerCasedJson =>
val e = intercept[SparkException] {
spark.read
.option("mode", "FAILFAST")
.schema(StructType(Seq(StructField("a", dt))))
.json(Seq(lowerCasedJson).toDS())
.collect()
}
assert(e.getMessage.contains("Cannot parse"))
}
}
}
test("SPARK-21610: Corrupt records are not handled properly when creating a dataframe " +
"from a file") {
withTempPath { dir =>
val path = dir.getCanonicalPath
val data =
"""{"field": 1}
|{"field": 2}
|{"field": "3"}""".stripMargin
Seq(data).toDF().repartition(1).write.text(path)
val schema = new StructType().add("field", ByteType).add("_corrupt_record", StringType)
// negative cases
val msg = intercept[AnalysisException] {
spark.read.schema(schema).json(path).select("_corrupt_record").collect()
}.getMessage
assert(msg.contains("only include the internal corrupt record column"))
// workaround
val df = spark.read.schema(schema).json(path).cache()
assert(df.filter($"_corrupt_record".isNotNull).count() == 1)
assert(df.filter($"_corrupt_record".isNull).count() == 2)
checkAnswer(
df.select("_corrupt_record"),
Row(null) :: Row(null) :: Row("{\\"field\\": \\"3\\"}") :: Nil
)
}
}
def testLineSeparator(lineSep: String): Unit = {
test(s"SPARK-21289: Support line separator - lineSep: '$lineSep'") {
// Read
val data =
s"""
| {"f":
|"a", "f0": 1}$lineSep{"f":
|
|"c", "f0": 2}$lineSep{"f": "d", "f0": 3}
""".stripMargin
val dataWithTrailingLineSep = s"$data$lineSep"
Seq(data, dataWithTrailingLineSep).foreach { lines =>
withTempPath { path =>
Files.write(path.toPath, lines.getBytes(StandardCharsets.UTF_8))
val df = spark.read.option("lineSep", lineSep).json(path.getAbsolutePath)
val expectedSchema =
StructType(StructField("f", StringType) :: StructField("f0", LongType) :: Nil)
checkAnswer(df, Seq(("a", 1), ("c", 2), ("d", 3)).toDF())
assert(df.schema === expectedSchema)
}
}
// Write
withTempPath { path =>
Seq("a", "b", "c").toDF("value").coalesce(1)
.write.option("lineSep", lineSep).json(path.getAbsolutePath)
val partFile = TestUtils.recursiveList(path).filter(f => f.getName.startsWith("part-")).head
val readBack = new String(Files.readAllBytes(partFile.toPath), StandardCharsets.UTF_8)
assert(
readBack === s"""{"value":"a"}$lineSep{"value":"b"}$lineSep{"value":"c"}$lineSep""")
}
// Roundtrip
withTempPath { path =>
val df = Seq("a", "b", "c").toDF()
df.write.option("lineSep", lineSep).json(path.getAbsolutePath)
val readBack = spark.read.option("lineSep", lineSep).json(path.getAbsolutePath)
checkAnswer(df, readBack)
}
}
}
// scalastyle:off nonascii
Seq("|", "^", "::", "!!!@3", 0x1E.toChar.toString, "아").foreach { lineSep =>
testLineSeparator(lineSep)
}
// scalastyle:on nonascii
test("""SPARK-21289: Support line separator - default value \\r, \\r\\n and \\n""") {
val data =
"{\\"f\\": \\"a\\", \\"f0\\": 1}\\r{\\"f\\": \\"c\\", \\"f0\\": 2}\\r\\n{\\"f\\": \\"d\\", \\"f0\\": 3}\\n"
withTempPath { path =>
Files.write(path.toPath, data.getBytes(StandardCharsets.UTF_8))
val df = spark.read.json(path.getAbsolutePath)
val expectedSchema =
StructType(StructField("f", StringType) :: StructField("f0", LongType) :: Nil)
checkAnswer(df, Seq(("a", 1), ("c", 2), ("d", 3)).toDF())
assert(df.schema === expectedSchema)
}
}
test("SPARK-23849: schema inferring touches less data if samplingRatio < 1.0") {
// Set default values for the DataSource parameters to make sure
// that whole test file is mapped to only one partition. This will guarantee
// reliable sampling of the input file.
withSQLConf(
SQLConf.FILES_MAX_PARTITION_BYTES.key -> (128 * 1024 * 1024).toString,
SQLConf.FILES_OPEN_COST_IN_BYTES.key -> (4 * 1024 * 1024).toString
)(withTempPath { path =>
val ds = sampledTestData.coalesce(1)
ds.write.text(path.getAbsolutePath)
val readback1 = spark.read.option("samplingRatio", 0.1).json(path.getCanonicalPath)
assert(readback1.schema == new StructType().add("f1", LongType))
withClue("SPARK-32621: 'path' option can cause issues while inferring schema") {
// During infer, "path" option gets added again to the paths that have already been listed.
// This results in reading more data than necessary and causes different schema to be
// inferred when sampling ratio is involved.
val readback2 = spark.read
.option("samplingRatio", 0.1).option("path", path.getCanonicalPath)
.format("json").load
assert(readback2.schema == new StructType().add("f1", LongType))
}
})
}
test("SPARK-23849: usage of samplingRatio while parsing a dataset of strings") {
val ds = sampledTestData.coalesce(1)
val readback = spark.read.option("samplingRatio", 0.1).json(ds)
assert(readback.schema == new StructType().add("f1", LongType))
}
test("SPARK-23849: samplingRatio is out of the range (0, 1.0]") {
val ds = spark.range(0, 100, 1, 1).map(_.toString)
val errorMsg0 = intercept[IllegalArgumentException] {
spark.read.option("samplingRatio", -1).json(ds)
}.getMessage
assert(errorMsg0.contains("samplingRatio (-1.0) should be greater than 0"))
val errorMsg1 = intercept[IllegalArgumentException] {
spark.read.option("samplingRatio", 0).json(ds)
}.getMessage
assert(errorMsg1.contains("samplingRatio (0.0) should be greater than 0"))
val sampled = spark.read.option("samplingRatio", 1.0).json(ds)
assert(sampled.count() == ds.count())
}
test("SPARK-23723: json in UTF-16 with BOM") {
val fileName = "test-data/utf16WithBOM.json"
val schema = new StructType().add("firstName", StringType).add("lastName", StringType)
val jsonDF = spark.read.schema(schema)
.option("multiline", "true")
.option("encoding", "UTF-16")
.json(testFile(fileName))
checkAnswer(jsonDF, Seq(Row("Chris", "Baird"), Row("Doug", "Rood")))
}
test("SPARK-23723: multi-line json in UTF-32BE with BOM") {
val fileName = "test-data/utf32BEWithBOM.json"
val schema = new StructType().add("firstName", StringType).add("lastName", StringType)
val jsonDF = spark.read.schema(schema)
.option("multiline", "true")
.json(testFile(fileName))
checkAnswer(jsonDF, Seq(Row("Chris", "Baird")))
}
test("SPARK-23723: Use user's encoding in reading of multi-line json in UTF-16LE") {
val fileName = "test-data/utf16LE.json"
val schema = new StructType().add("firstName", StringType).add("lastName", StringType)
val jsonDF = spark.read.schema(schema)
.option("multiline", "true")
.options(Map("encoding" -> "UTF-16LE"))
.json(testFile(fileName))
checkAnswer(jsonDF, Seq(Row("Chris", "Baird")))
}
test("SPARK-23723: Unsupported encoding name") {
val invalidCharset = "UTF-128"
val exception = intercept[UnsupportedCharsetException] {
spark.read
.options(Map("encoding" -> invalidCharset, "lineSep" -> "\\n"))
.json(testFile("test-data/utf16LE.json"))
.count()
}
assert(exception.getMessage.contains(invalidCharset))
}
test("SPARK-23723: checking that the encoding option is case agnostic") {
val fileName = "test-data/utf16LE.json"
val schema = new StructType().add("firstName", StringType).add("lastName", StringType)
val jsonDF = spark.read.schema(schema)
.option("multiline", "true")
.options(Map("encoding" -> "uTf-16lE"))
.json(testFile(fileName))
checkAnswer(jsonDF, Seq(Row("Chris", "Baird")))
}
test("SPARK-23723: specified encoding is not matched to actual encoding") {
val fileName = "test-data/utf16LE.json"
val schema = new StructType().add("firstName", StringType).add("lastName", StringType)
val exception = intercept[SparkException] {
spark.read.schema(schema)
.option("mode", "FAILFAST")
.option("multiline", "true")
.options(Map("encoding" -> "UTF-16BE"))
.json(testFile(fileName))
.count()
}
assert(exception.getMessage.contains("Malformed records are detected in record parsing"))
}
def checkEncoding(expectedEncoding: String, pathToJsonFiles: String,
expectedContent: String): Unit = {
val jsonFiles = new File(pathToJsonFiles)
.listFiles()
.filter(_.isFile)
.filter(_.getName.endsWith("json"))
val actualContent = jsonFiles.map { file =>
new String(Files.readAllBytes(file.toPath), expectedEncoding)
}.mkString.trim
assert(actualContent == expectedContent)
}
test("SPARK-23723: save json in UTF-32BE") {
val encoding = "UTF-32BE"
withTempPath { path =>
val df = spark.createDataset(Seq(("Dog", 42)))
df.write
.options(Map("encoding" -> encoding))
.json(path.getCanonicalPath)
checkEncoding(
expectedEncoding = encoding,
pathToJsonFiles = path.getCanonicalPath,
expectedContent = """{"_1":"Dog","_2":42}""")
}
}
test("SPARK-23723: save json in default encoding - UTF-8") {
withTempPath { path =>
val df = spark.createDataset(Seq(("Dog", 42)))
df.write.json(path.getCanonicalPath)
checkEncoding(
expectedEncoding = "UTF-8",
pathToJsonFiles = path.getCanonicalPath,
expectedContent = """{"_1":"Dog","_2":42}""")
}
}
test("SPARK-23723: wrong output encoding") {
val encoding = "UTF-128"
val exception = intercept[SparkException] {
withTempPath { path =>
val df = spark.createDataset(Seq((0)))
df.write
.options(Map("encoding" -> encoding))
.json(path.getCanonicalPath)
}
}
val baos = new ByteArrayOutputStream()
val ps = new PrintStream(baos, true, StandardCharsets.UTF_8.name())
exception.printStackTrace(ps)
ps.flush()
assert(baos.toString.contains(
"java.nio.charset.UnsupportedCharsetException: UTF-128"))
}
test("SPARK-23723: read back json in UTF-16LE") {
val options = Map("encoding" -> "UTF-16LE", "lineSep" -> "\\n")
withTempPath { path =>
val ds = spark.createDataset(Seq(("a", 1), ("b", 2), ("c", 3))).repartition(2)
ds.write.options(options).json(path.getCanonicalPath)
val readBack = spark
.read
.options(options)
.json(path.getCanonicalPath)
checkAnswer(readBack.toDF(), ds.toDF())
}
}
test("SPARK-23723: write json in UTF-16/32 with multiline off") {
Seq("UTF-16", "UTF-32").foreach { encoding =>
withTempPath { path =>
val ds = spark.createDataset(Seq(("a", 1))).repartition(1)
ds.write
.option("encoding", encoding)
.option("multiline", false)
.json(path.getCanonicalPath)
val jsonFiles = path.listFiles().filter(_.getName.endsWith("json"))
jsonFiles.foreach { jsonFile =>
val readback = Files.readAllBytes(jsonFile.toPath)
val expected = ("""{"_1":"a","_2":1}""" + "\\n").getBytes(Charset.forName(encoding))
assert(readback === expected)
}
}
}
}
def checkReadJson(lineSep: String, encoding: String, inferSchema: Boolean, id: Int): Unit = {
test(s"SPARK-23724: checks reading json in ${encoding} #${id}") {
val schema = new StructType().add("f1", StringType).add("f2", IntegerType)
withTempPath { path =>
val records = List(("a", 1), ("b", 2))
val data = records
.map(rec => s"""{"f1":"${rec._1}", "f2":${rec._2}}""".getBytes(encoding))
.reduce((a1, a2) => a1 ++ lineSep.getBytes(encoding) ++ a2)
val os = new FileOutputStream(path)
os.write(data)
os.close()
val reader = if (inferSchema) {
spark.read
} else {
spark.read.schema(schema)
}
val readBack = reader
.option("encoding", encoding)
.option("lineSep", lineSep)
.json(path.getCanonicalPath)
checkAnswer(readBack, records.map(rec => Row(rec._1, rec._2)))
}
}
}
// scalastyle:off nonascii
List(
(0, "|", "UTF-8", false),
(1, "^", "UTF-16BE", true),
(2, "::", "ISO-8859-1", true),
(3, "!!!@3", "UTF-32LE", false),
(4, 0x1E.toChar.toString, "UTF-8", true),
(5, "아", "UTF-32BE", false),
(6, "куку", "CP1251", true),
(7, "sep", "utf-8", false),
(8, "\\r\\n", "UTF-16LE", false),
(9, "\\r\\n", "utf-16be", true),
(10, "\\u000d\\u000a", "UTF-32BE", false),
(11, "\\u000a\\u000d", "UTF-8", true),
(12, "===", "US-ASCII", false),
(13, "$^+", "utf-32le", true)
).foreach {
case (testNum, sep, encoding, inferSchema) => checkReadJson(sep, encoding, inferSchema, testNum)
}
// scalastyle:on nonascii
test("SPARK-23724: lineSep should be set if encoding if different from UTF-8") {
val encoding = "UTF-16LE"
val exception = intercept[IllegalArgumentException] {
spark.read
.options(Map("encoding" -> encoding))
.json(testFile("test-data/utf16LE.json"))
.count()
}
assert(exception.getMessage.contains(
s"""The lineSep option must be specified for the $encoding encoding"""))
}
private val badJson = "\\u0000\\u0000\\u0000A\\u0001AAA"
test("SPARK-23094: permissively read JSON file with leading nulls when multiLine is enabled") {
withTempPath { tempDir =>
val path = tempDir.getAbsolutePath
Seq(badJson + """{"a":1}""").toDS().write.text(path)
val expected = s"""${badJson}{"a":1}\\n"""
val schema = new StructType().add("a", IntegerType).add("_corrupt_record", StringType)
val df = spark.read.format("json")
.option("mode", "PERMISSIVE")
.option("multiLine", true)
.option("encoding", "UTF-8")
.schema(schema).load(path)
checkAnswer(df, Row(null, expected))
}
}
test("SPARK-23094: permissively read JSON file with leading nulls when multiLine is disabled") {
withTempPath { tempDir =>
val path = tempDir.getAbsolutePath
Seq(badJson, """{"a":1}""").toDS().write.text(path)
val schema = new StructType().add("a", IntegerType).add("_corrupt_record", StringType)
val df = spark.read.format("json")
.option("mode", "PERMISSIVE")
.option("multiLine", false)
.option("encoding", "UTF-8")
.schema(schema).load(path)
checkAnswer(df, Seq(Row(1, null), Row(null, badJson)))
}
}
test("SPARK-23094: permissively parse a dataset contains JSON with leading nulls") {
checkAnswer(
spark.read.option("mode", "PERMISSIVE").option("encoding", "UTF-8").json(Seq(badJson).toDS()),
Row(badJson))
}
test("SPARK-23772 ignore column of all null values or empty array during schema inference") {
withTempPath { tempDir =>
val path = tempDir.getAbsolutePath
// primitive types
Seq(
"""{"a":null, "b":1, "c":3.0}""",
"""{"a":null, "b":null, "c":"string"}""",
"""{"a":null, "b":null, "c":null}""")
.toDS().write.text(path)
var df = spark.read.format("json")
.option("dropFieldIfAllNull", true)
.load(path)
var expectedSchema = new StructType()
.add("b", LongType).add("c", StringType)
assert(df.schema === expectedSchema)
checkAnswer(df, Row(1, "3.0") :: Row(null, "string") :: Row(null, null) :: Nil)
// arrays
Seq(
"""{"a":[2, 1], "b":[null, null], "c":null, "d":[[], [null]], "e":[[], null, [[]]]}""",
"""{"a":[null], "b":[null], "c":[], "d":[null, []], "e":null}""",
"""{"a":null, "b":null, "c":[], "d":null, "e":[null, [], null]}""")
.toDS().write.mode("overwrite").text(path)
df = spark.read.format("json")
.option("dropFieldIfAllNull", true)
.load(path)
expectedSchema = new StructType()
.add("a", ArrayType(LongType))
assert(df.schema === expectedSchema)
checkAnswer(df, Row(Array(2, 1)) :: Row(Array(null)) :: Row(null) :: Nil)
// structs
Seq(
"""{"a":{"a1": 1, "a2":"string"}, "b":{}}""",
"""{"a":{"a1": 2, "a2":null}, "b":{"b1":[null]}}""",
"""{"a":null, "b":null}""")
.toDS().write.mode("overwrite").text(path)
df = spark.read.format("json")
.option("dropFieldIfAllNull", true)
.load(path)
expectedSchema = new StructType()
.add("a", StructType(StructField("a1", LongType) :: StructField("a2", StringType)
:: Nil))
assert(df.schema === expectedSchema)
checkAnswer(df, Row(Row(1, "string")) :: Row(Row(2, null)) :: Row(null) :: Nil)
}
}
test("SPARK-24190: restrictions for JSONOptions in read") {
for (encoding <- Set("UTF-16", "UTF-32")) {
val exception = intercept[IllegalArgumentException] {
spark.read
.option("encoding", encoding)
.option("multiLine", false)
.json(testFile("test-data/utf16LE.json"))
.count()
}
assert(exception.getMessage.contains("encoding must not be included in the denyList"))
}
}
test("count() for malformed input") {
def countForMalformedJSON(expected: Long, input: Seq[String]): Unit = {
val schema = new StructType().add("a", StringType)
val strings = spark.createDataset(input)
val df = spark.read.schema(schema).json(strings)
assert(df.count() == expected)
}
def checkCount(expected: Long): Unit = {
val validRec = """{"a":"b"}"""
val inputs = Seq(
Seq("{-}", validRec),
Seq(validRec, "?"),
Seq("}", validRec),
Seq(validRec, """{"a": [1, 2, 3]}"""),
Seq("""{"a": {"a": "b"}}""", validRec)
)
inputs.foreach { input =>
countForMalformedJSON(expected, input)
}
}
checkCount(2)
countForMalformedJSON(0, Seq(""))
}
test("SPARK-26745: count() for non-multiline input with empty lines") {
withTempPath { tempPath =>
val path = tempPath.getCanonicalPath
Seq("""{ "a" : 1 }""", "", """ { "a" : 2 }""", " \\t ")
.toDS()
.repartition(1)
.write
.text(path)
assert(spark.read.json(path).count() === 2)
}
}
private def failedOnEmptyString(dataType: DataType): Unit = {
val df = spark.read.schema(s"a ${dataType.catalogString}")
.option("mode", "FAILFAST").json(Seq("""{"a":""}""").toDS)
val errMessage = intercept[SparkException] {
df.collect()
}.getMessage
assert(errMessage.contains(
s"Failed to parse an empty string for data type ${dataType.catalogString}"))
}
private def emptyString(dataType: DataType, expected: Any): Unit = {
val df = spark.read.schema(s"a ${dataType.catalogString}")
.option("mode", "FAILFAST").json(Seq("""{"a":""}""").toDS)
checkAnswer(df, Row(expected) :: Nil)
}
test("SPARK-25040: empty strings should be disallowed") {
failedOnEmptyString(BooleanType)
failedOnEmptyString(ByteType)
failedOnEmptyString(ShortType)
failedOnEmptyString(IntegerType)
failedOnEmptyString(LongType)
failedOnEmptyString(FloatType)
failedOnEmptyString(DoubleType)
failedOnEmptyString(DecimalType.SYSTEM_DEFAULT)
failedOnEmptyString(TimestampType)
failedOnEmptyString(DateType)
failedOnEmptyString(ArrayType(IntegerType))
failedOnEmptyString(MapType(StringType, IntegerType, true))
failedOnEmptyString(StructType(StructField("f1", IntegerType, true) :: Nil))
emptyString(StringType, "")
emptyString(BinaryType, "".getBytes(StandardCharsets.UTF_8))
}
test("SPARK-25040: allowing empty strings when legacy config is enabled") {
def emptyStringAsNull(dataType: DataType): Unit = {
val df = spark.read.schema(s"a ${dataType.catalogString}")
.option("mode", "FAILFAST").json(Seq("""{"a":""}""").toDS)
checkAnswer(df, Row(null) :: Nil)
}
// Legacy mode prior to Spark 3.0.0
withSQLConf(SQLConf.LEGACY_ALLOW_EMPTY_STRING_IN_JSON.key -> "true") {
emptyStringAsNull(BooleanType)
emptyStringAsNull(ByteType)
emptyStringAsNull(ShortType)
emptyStringAsNull(IntegerType)
emptyStringAsNull(LongType)
failedOnEmptyString(FloatType)
failedOnEmptyString(DoubleType)
failedOnEmptyString(TimestampType)
failedOnEmptyString(DateType)
emptyStringAsNull(DecimalType.SYSTEM_DEFAULT)
emptyStringAsNull(ArrayType(IntegerType))
emptyStringAsNull(MapType(StringType, IntegerType, true))
emptyStringAsNull(StructType(StructField("f1", IntegerType, true) :: Nil))
emptyString(StringType, "")
emptyString(BinaryType, "".getBytes(StandardCharsets.UTF_8))
}
}
test("return partial result for bad records") {
val schema = "a double, b array<int>, c string, _corrupt_record string"
val badRecords = Seq(
"""{"a":"-","b":[0, 1, 2],"c":"abc"}""",
"""{"a":0.1,"b":{},"c":"def"}""").toDS()
val df = spark.read.schema(schema).json(badRecords)
checkAnswer(
df,
Row(null, Array(0, 1, 2), "abc", """{"a":"-","b":[0, 1, 2],"c":"abc"}""") ::
Row(0.1, null, "def", """{"a":0.1,"b":{},"c":"def"}""") :: Nil)
}
test("inferring timestamp type") {
def schemaOf(jsons: String*): StructType = {
spark.read.option("inferTimestamp", true).json(jsons.toDS).schema
}
assert(schemaOf(
"""{"a":"2018-12-17T10:11:12.123-01:00"}""",
"""{"a":"2018-12-16T22:23:24.123-02:00"}""") === fromDDL("a timestamp"))
assert(schemaOf("""{"a":"2018-12-17T10:11:12.123-01:00"}""", """{"a":1}""")
=== fromDDL("a string"))
assert(schemaOf("""{"a":"2018-12-17T10:11:12.123-01:00"}""", """{"a":"123"}""")
=== fromDDL("a string"))
assert(schemaOf("""{"a":"2018-12-17T10:11:12.123-01:00"}""", """{"a":null}""")
=== fromDDL("a timestamp"))
assert(schemaOf("""{"a":null}""", """{"a":"2018-12-17T10:11:12.123-01:00"}""")
=== fromDDL("a timestamp"))
}
test("roundtrip for timestamp type inferring") {
val customSchema = new StructType().add("date", TimestampType)
withTempDir { dir =>
val timestampsWithFormatPath = s"${dir.getCanonicalPath}/timestampsWithFormat.json"
val timestampsWithFormat = spark.read
.option("timestampFormat", "dd/MM/yyyy HH:mm")
.option("inferTimestamp", true)
.json(datesRecords)
assert(timestampsWithFormat.schema === customSchema)
timestampsWithFormat.write
.format("json")
.option("timestampFormat", "yyyy-MM-dd HH:mm:ss")
.option(DateTimeUtils.TIMEZONE_OPTION, "UTC")
.save(timestampsWithFormatPath)
val readBack = spark.read
.option("timestampFormat", "yyyy-MM-dd HH:mm:ss")
.option(DateTimeUtils.TIMEZONE_OPTION, "UTC")
.option("inferTimestamp", true)
.json(timestampsWithFormatPath)
assert(readBack.schema === customSchema)
checkAnswer(readBack, timestampsWithFormat)
}
}
test("SPARK-30960, SPARK-31641: parse date/timestamp string with legacy format") {
val julianDay = -141704 // 1582-01-01 in Julian calendar
val ds = Seq(
s"{'t': '2020-1-12 3:23:34.12', 'd': '2020-1-12 T', 'd2': '12345', 'd3': '$julianDay'}"
).toDS()
val json = spark.read.schema("t timestamp, d date, d2 date, d3 date").json(ds)
checkAnswer(json, Row(
Timestamp.valueOf("2020-1-12 3:23:34.12"),
Date.valueOf("2020-1-12"),
Date.valueOf(LocalDate.ofEpochDay(12345)),
Date.valueOf("1582-01-01")))
}
test("exception mode for parsing date/timestamp string") {
val ds = Seq("{'t': '2020-01-27T20:06:11.847-0800'}").toDS()
val json = spark.read
.schema("t timestamp")
.option("timestampFormat", "yyyy-MM-dd'T'HH:mm:ss.SSSz")
.json(ds)
withSQLConf(SQLConf.LEGACY_TIME_PARSER_POLICY.key -> "exception") {
val msg = intercept[SparkException] {
json.collect()
}.getCause.getMessage
assert(msg.contains("Fail to parse"))
}
withSQLConf(SQLConf.LEGACY_TIME_PARSER_POLICY.key -> "legacy") {
checkAnswer(json, Row(Timestamp.valueOf("2020-01-27 20:06:11.847")))
}
withSQLConf(SQLConf.LEGACY_TIME_PARSER_POLICY.key -> "corrected") {
checkAnswer(json, Row(null))
}
}
test("filters push down") {
withTempPath { path =>
val t = "2019-12-17 00:01:02"
Seq(
"""{"c0": "abc", "c1": {"c2": 1, "c3": "2019-11-14 20:35:30"}}""",
s"""{"c0": "def", "c1": {"c2": 2, "c3": "$t"}}""",
s"""{"c0": "defa", "c1": {"c2": 3, "c3": "$t"}}""",
s"""{"c0": "define", "c1": {"c2": 2, "c3": "$t"}}""").toDF("data")
.repartition(1)
.write.text(path.getAbsolutePath)
Seq(true, false).foreach { filterPushdown =>
withSQLConf(SQLConf.JSON_FILTER_PUSHDOWN_ENABLED.key -> filterPushdown.toString) {
Seq("PERMISSIVE", "DROPMALFORMED", "FAILFAST").foreach { mode =>
val readback = spark.read
.option("mode", mode)
.option("timestampFormat", "yyyy-MM-dd HH:mm:ss")
.schema("c0 string, c1 struct<c2:integer,c3:timestamp>")
.json(path.getAbsolutePath)
.where($"c1.c2" === 2 && $"c0".startsWith("def"))
.select($"c1.c3")
assert(readback.count() === 2)
checkAnswer(readback, Seq(Row(Timestamp.valueOf(t)), Row(Timestamp.valueOf(t))))
}
}
}
}
}
test("apply filters to malformed rows") {
withSQLConf(SQLConf.JSON_FILTER_PUSHDOWN_ENABLED.key -> "true") {
withTempPath { path =>
Seq(
"{}",
"""{"invalid": 0}""",
"""{"i":}""",
"""{"i": 0}""",
"""{"i": 1, "t": "2020-01-28 01:00:00"}""",
"""{"t": "2020-01-28 02:00:00"}""",
"""{"i": "abc", "t": "2020-01-28 03:00:00"}""",
"""{"i": 2, "t": "2020-01-28 04:00:00", "d": 3.14}""").toDF("data")
.repartition(1)
.write.text(path.getAbsolutePath)
val schema = "i INTEGER, t TIMESTAMP"
val readback = spark.read
.schema(schema)
.option("timestampFormat", "yyyy-MM-dd HH:mm:ss")
.json(path.getAbsolutePath)
// readback:
// +----+-------------------+
// |i |t |
// +----+-------------------+
// |null|null |
// |null|null |
// |null|null |
// |0 |null |
// |1 |2020-01-28 01:00:00|
// |null|2020-01-28 02:00:00|
// |null|2020-01-28 03:00:00|
// |2 |2020-01-28 04:00:00|
// +----+-------------------+
checkAnswer(
readback.where($"i".isNull && $"t".isNotNull),
Seq(
Row(null, Timestamp.valueOf("2020-01-28 02:00:00")),
Row(null, Timestamp.valueOf("2020-01-28 03:00:00"))))
checkAnswer(
readback.where($"i" >= 0 && $"t" > "2020-01-28 00:00:00"),
Seq(
Row(1, Timestamp.valueOf("2020-01-28 01:00:00")),
Row(2, Timestamp.valueOf("2020-01-28 04:00:00"))))
checkAnswer(
readback.where($"t".isNull).select($"i"),
Seq(Row(null), Row(null), Row(null), Row(0)))
}
}
}
test("case sensitivity of filters references") {
Seq(true, false).foreach { filterPushdown =>
withSQLConf(SQLConf.JSON_FILTER_PUSHDOWN_ENABLED.key -> filterPushdown.toString) {
withTempPath { path =>
Seq(
"""{"aaa": 0, "BBB": 1}""",
"""{"AAA": 2, "bbb": 3}""").toDF().write.text(path.getCanonicalPath)
withSQLConf(SQLConf.CASE_SENSITIVE.key -> "false") {
val readback = spark.read.schema("aaa integer, BBB integer")
.json(path.getCanonicalPath)
checkAnswer(readback, Seq(Row(null, null), Row(0, 1)))
checkAnswer(readback.filter($"AAA" === 0 && $"bbb" === 1), Seq(Row(0, 1)))
checkAnswer(readback.filter($"AAA" === 2 && $"bbb" === 3), Seq())
// Schema inferring
val errorMsg = intercept[AnalysisException] {
spark.read.json(path.getCanonicalPath).collect()
}.getMessage
assert(errorMsg.contains("Found duplicate column(s) in the data schema"))
}
withSQLConf(SQLConf.CASE_SENSITIVE.key -> "true") {
val readback = spark.read.schema("aaa integer, BBB integer")
.json(path.getCanonicalPath)
checkAnswer(readback, Seq(Row(null, null), Row(0, 1)))
val errorMsg = intercept[AnalysisException] {
readback.filter($"AAA" === 0 && $"bbb" === 1).collect()
}.getMessage
assert(errorMsg.contains("cannot resolve 'AAA'"))
// Schema inferring
val readback2 = spark.read.json(path.getCanonicalPath)
checkAnswer(
readback2.filter($"AAA" === 2).select($"AAA", $"bbb"),
Seq(Row(2, 3)))
checkAnswer(readback2.filter($"aaa" === 2).select($"AAA", $"bbb"), Seq())
}
}
}
}
}
test("SPARK-32810: JSON data source should be able to read files with " +
"escaped glob metacharacter in the paths") {
withTempDir { dir =>
val basePath = dir.getCanonicalPath
// test JSON writer / reader without specifying schema
val jsonTableName = "{def}"
spark.range(3).coalesce(1).write.json(s"$basePath/$jsonTableName")
val readback = spark.read
.json(s"$basePath/${"""(\\[|\\]|\\{|\\})""".r.replaceAllIn(jsonTableName, """\\\\$1""")}")
assert(readback.collect sameElements Array(Row(0), Row(1), Row(2)))
}
}
test("SPARK-35047: Write Non-ASCII character as codepoint") {
// scalastyle:off nonascii
withTempPaths(2) { paths =>
paths.foreach(_.delete())
val seq = Seq("a", "\\n", "\\u3042")
val df = seq.toDF
val basePath1 = paths(0).getCanonicalPath
df.write.option("writeNonAsciiCharacterAsCodePoint", "true")
.option("pretty", "false").json(basePath1)
val actualText1 = spark.read.option("wholetext", "true").text(basePath1)
.sort("value").map(_.getString(0)).collect().mkString
val expectedText1 =
s"""{"value":"\\\\n"}
|{"value":"\\\\u3042"}
|{"value":"a"}
|""".stripMargin
assert(actualText1 === expectedText1)
val actualJson1 = spark.read.json(basePath1)
.sort("value").map(_.getString(0)).collect().mkString
val expectedJson1 = "\\na\\u3042"
assert(actualJson1 === expectedJson1)
// Test for pretty printed JSON.
// If multiLine option is set to true, the format should be should be
// one JSON record per file. So LEAF_NODE_DEFAULT_PARALLELISM is set here.
withSQLConf(SQLConf.LEAF_NODE_DEFAULT_PARALLELISM.key -> s"${seq.length}") {
val basePath2 = paths(1).getCanonicalPath
df.write.option("writeNonAsciiCharacterAsCodePoint", "true")
.option("pretty", "true").json(basePath2)
val actualText2 = spark.read.option("wholetext", "true").text(basePath2)
.sort("value").map(_.getString(0)).collect().mkString
val expectedText2 =
s"""{
| "value" : "\\\\n"
|}
|{
| "value" : "\\\\u3042"
|}
|{
| "value" : "a"
|}
|""".stripMargin
assert(actualText2 === expectedText2)
val actualJson2 = spark.read.option("multiLine", "true").json(basePath2)
.sort("value").map(_.getString(0)).collect().mkString
val expectedJson2 = "\\na\\u3042"
assert(actualJson2 === expectedJson2)
}
}
// scalastyle:on nonascii
}
test("SPARK-35104: Fix wrong indentation for multiple JSON even if `pretty` option is true") {
withSQLConf(SQLConf.LEAF_NODE_DEFAULT_PARALLELISM.key -> "1") {
withTempPath { path =>
val basePath = path.getCanonicalPath
val df = Seq("a", "b", "c").toDF
df.write.option("pretty", "true").json(basePath)
val expectedText =
s"""{
| "value" : "a"
|}
|{
| "value" : "b"
|}
|{
| "value" : "c"
|}
|""".stripMargin
val actualText = spark.read.option("wholetext", "true")
.text(basePath).map(_.getString(0)).collect().mkString
assert(actualText === expectedText)
}
}
}
}
class JsonV1Suite extends JsonSuite {
override protected def sparkConf: SparkConf =
super
.sparkConf
.set(SQLConf.USE_V1_SOURCE_LIST, "json")
}
class JsonV2Suite extends JsonSuite {
override protected def sparkConf: SparkConf =
super
.sparkConf
.set(SQLConf.USE_V1_SOURCE_LIST, "")
test("get pushed filters") {
val attr = "col"
def getBuilder(path: String): JsonScanBuilder = {
val fileIndex = new InMemoryFileIndex(
spark,
Seq(new org.apache.hadoop.fs.Path(path, "file.json")),
Map.empty,
None,
NoopCache)
val schema = new StructType().add(attr, IntegerType)
val options = CaseInsensitiveStringMap.empty()
new JsonScanBuilder(spark, fileIndex, schema, schema, options)
}
val filters: Array[sources.Filter] = Array(sources.IsNotNull(attr))
withSQLConf(SQLConf.JSON_FILTER_PUSHDOWN_ENABLED.key -> "true") {
withTempPath { file =>
val scanBuilder = getBuilder(file.getCanonicalPath)
assert(scanBuilder.pushFilters(filters) === filters)
assert(scanBuilder.pushedFilters() === filters)
}
}
withSQLConf(SQLConf.JSON_FILTER_PUSHDOWN_ENABLED.key -> "false") {
withTempPath { file =>
val scanBuilder = getBuilder(file.getCanonicalPath)
assert(scanBuilder.pushFilters(filters) === filters)
assert(scanBuilder.pushedFilters() === Array.empty[sources.Filter])
}
}
}
}
class JsonLegacyTimeParserSuite extends JsonSuite {
override protected def sparkConf: SparkConf =
super
.sparkConf
.set(SQLConf.LEGACY_TIME_PARSER_POLICY, "legacy")
}
| wangmiao1981/spark | sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/json/JsonSuite.scala | Scala | apache-2.0 | 111,559 |
/** Copyright 2015 TappingStone, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.prediction.controller
import io.prediction.core.BaseAlgorithm
import io.prediction.workflow.PersistentModelManifest
import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import scala.reflect._
/** Base class of a local algorithm.
*
* A local algorithm runs locally within a single machine and produces a model
* that can fit within a single machine.
*
* If your input query class requires custom JSON4S serialization, the most
* idiomatic way is to implement a trait that extends [[CustomQuerySerializer]],
* and mix that into your algorithm class, instead of overriding
* [[querySerializer]] directly.
*
* @tparam PD Prepared data class.
* @tparam M Trained model class.
* @tparam Q Input query class.
* @tparam P Output prediction class.
* @group Algorithm
*/
abstract class LAlgorithm[PD, M : ClassTag, Q : Manifest, P]
extends BaseAlgorithm[RDD[PD], RDD[M], Q, P] {
/** Do not use directly or override this method, as this is called by
* PredictionIO workflow to train a model.
*/
private[prediction]
def trainBase(sc: SparkContext, pd: RDD[PD]): RDD[M] = pd.map(train)
/** Implement this method to produce a model from prepared data.
*
* @param pd Prepared data for model training.
* @return Trained model.
*/
def train(pd: PD): M
private[prediction]
def batchPredictBase(sc: SparkContext, bm: Any, qs: RDD[(Long, Q)])
: RDD[(Long, P)] = {
val mRDD = bm.asInstanceOf[RDD[M]]
batchPredict(mRDD, qs)
}
private[prediction]
def batchPredict(mRDD: RDD[M], qs: RDD[(Long, Q)]): RDD[(Long, P)] = {
val glomQs: RDD[Array[(Long, Q)]] = qs.glom()
val cartesian: RDD[(M, Array[(Long, Q)])] = mRDD.cartesian(glomQs)
cartesian.flatMap { case (m, qArray) => {
qArray.map { case (qx, q) => (qx, predict(m, q)) }
}}
}
private[prediction]
def predictBase(localBaseModel: Any, q: Q): P = {
predict(localBaseModel.asInstanceOf[M], q)
}
/** Implement this method to produce a prediction from a query and trained
* model.
*
* @param m Trained model produced by [[train]].
* @param q An input query.
* @return A prediction.
*/
def predict(m: M, q: Q): P
private[prediction]
override
def makePersistentModel(
sc: SparkContext,
modelId: String,
algoParams: Params,
bm: Any): Any = {
// LAlgo has local model. By default, the model is serialized into our
// storage automatically. User can override this by implementing the
// IPersistentModel trait, then we call the save method, upon successful, we
// return the Manifest, otherwise, Unit.
// Check RDD[M].count == 1
val m = bm.asInstanceOf[RDD[M]].first
if (m.isInstanceOf[PersistentModel[_]]) {
if (m.asInstanceOf[PersistentModel[Params]].save(
modelId, algoParams, sc)) {
PersistentModelManifest(className = m.getClass.getName)
} else {
Unit
}
} else {
m
}
}
}
| ydanilenko/PredictionIO | core/src/main/scala/io/prediction/controller/LAlgorithm.scala | Scala | apache-2.0 | 3,604 |
package com.twitter.finagle.memcached
import com.twitter.finagle.memcached.protocol.{ClientError, Value}
import com.twitter.io.Buf
import com.twitter.util.{Future, Time}
import scala.collection.mutable
import _root_.java.lang.{Boolean => JBoolean, Long => JLong}
/**
* Map-based mock client for testing
*
* Note: expiry and flags are ignored on update operations.
*/
class MockClient(val map: mutable.Map[String, Buf]) extends Client {
def this() = this(mutable.Map[String, Buf]())
def this(contents: Map[String, Array[Byte]]) =
this(mutable.Map[String, Buf]() ++ (contents mapValues { v => Buf.ByteArray.Owned(v) }))
def this(contents: Map[String, String])(implicit m: Manifest[String]) =
this(contents mapValues { _.getBytes })
protected def _get(keys: Iterable[String]): GetResult = {
val hits = mutable.Map[String, Value]()
val misses = mutable.Set[String]()
map.synchronized {
keys foreach { key =>
map.get(key) match {
case Some(v: Buf) =>
hits += (key -> Value(Buf.Utf8(key), v, Some(Interpreter.generateCasUnique(v))))
case _ =>
misses += key
}
// Needed due to compiler bug(?)
Unit
}
}
GetResult(hits.toMap, misses.toSet)
}
def getResult(keys: Iterable[String]): Future[GetResult] =
Future.value(_get(keys))
def getsResult(keys: Iterable[String]): Future[GetsResult] =
Future.value(GetsResult(_get(keys)))
/**
* Note: expiry and flags are ignored.
*/
def set(key: String, flags: Int, expiry: Time, value: Buf) = {
map.synchronized { map(key) = value }
Future.Unit
}
/**
* Note: expiry and flags are ignored.
*/
def add(key: String, flags: Int, expiry: Time, value: Buf): Future[JBoolean] =
Future.value(
map.synchronized {
if (!map.contains(key)) {
map(key) = value
true
} else {
false
}
}
)
/**
* Note: expiry and flags are ignored.
*/
def append(key: String, flags: Int, expiry: Time, value: Buf): Future[JBoolean] =
Future.value(
map.synchronized {
map.get(key) match {
case Some(previousValue) =>
map(key) = previousValue.concat(value)
true
case None =>
false
}
}
)
/**
* Note: expiry and flags are ignored.
*/
def prepend(key: String, flags: Int, expiry: Time, value: Buf): Future[JBoolean] =
Future.value(
map.synchronized {
map.get(key) match {
case Some(previousValue) =>
map(key) = value.concat(previousValue)
true
case None =>
false
}
}
)
/**
* Note: expiry and flags are ignored.
*/
def replace(key: String, flags: Int, expiry: Time, value: Buf): Future[JBoolean] =
Future.value(
map.synchronized {
if (map.contains(key)) {
map(key) = value
true
} else {
false
}
}
)
/**
* Checks if value is same as previous value, if not, do a swap and return true.
*
* Note: expiry and flags are ignored.
*/
def checkAndSet(
key: String,
flags: Int,
expiry: Time,
value: Buf,
casUnique: Buf
): Future[CasResult] =
Future.value(
map.synchronized {
map.get(key) match {
case Some(previousValue) if Interpreter.generateCasUnique(previousValue) == casUnique =>
map(key) = value
CasResult.Stored
case Some(_) => CasResult.Exists
case None => CasResult.NotFound
}
}
)
def delete(key: String): Future[JBoolean] =
Future.value(
map.synchronized {
if (map.contains(key)) {
map.remove(key)
true
} else {
false
}
}
)
def incr(key: String, delta: Long): Future[Option[JLong]] =
Future.value(
map.synchronized {
map.get(key) match {
case Some(value: Buf) =>
try {
val Buf.Utf8(valStr) = value
val newValue = math.max(valStr.toLong + delta, 0L)
map(key) = Buf.Utf8(newValue.toString)
Some(newValue)
} catch {
case _: NumberFormatException =>
throw new ClientError("cannot increment or decrement non-numeric value")
}
case None =>
None
}
}
)
def decr(key: String, delta: Long): Future[Option[JLong]] =
incr(key, -delta)
def stats(args: Option[String]): Future[Seq[String]] = Future.Nil
def release() {}
override def toString = {
"MockClient(" + map.toString + ")"
}
/** Returns an immutable copy of the current cache. */
def contents: Map[String, Buf] = {
map.synchronized {
Map(map.toSeq: _*)
}
}
}
| adriancole/finagle | finagle-memcached/src/main/scala/com/twitter/finagle/memcached/MockClient.scala | Scala | apache-2.0 | 4,885 |
package lolchat.util
import scala.util.Try
object parsing {
def parseId(txt: String): Option[String] = "[0-9]+".r.findFirstIn(txt)
def parseXml[A](xml: String)(value: String)(as: String => A): Option[A] = {
val pattern = s"(?<=$value>).*?(?=</$value)"
pattern.r.findFirstIn(xml).flatMap(value => Try(as(value)).toOption)
}
}
| Thangiee/League-of-Legend-Chat-Lib-Scala | lib/src/main/scala/lolchat/util/parsing.scala | Scala | mit | 344 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.