code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1 value | license stringclasses 15 values | size int64 5 1M |
|---|---|---|---|---|---|
package org.jetbrains.plugins.scala.codeInspection.unusedInspections
import com.intellij.codeHighlighting._
import com.intellij.openapi.editor.Editor
import com.intellij.openapi.project.Project
import com.intellij.psi.PsiFile
import org.jetbrains.plugins.scala.lang.psi.api.ScalaFile
/**
* Created by Svyatoslav Ilinskiy on 11.07.16.
*/
class ScalaLocalVarCouldBeValPassFactory
extends TextEditorHighlightingPassFactory
with TextEditorHighlightingPassFactoryRegistrar {
override def createHighlightingPass(file: PsiFile, editor: Editor): TextEditorHighlightingPass = file match {
case scalaFile: ScalaFile => new ScalaLocalVarCouldBeValPass(scalaFile, Option(editor.getDocument))
case _ => null
}
override def registerHighlightingPassFactory(registrar: TextEditorHighlightingPassRegistrar, project: Project): Unit = {
registrar.registerTextEditorHighlightingPass(this, Array[Int](Pass.UPDATE_ALL), null, false, -1)
}
}
| JetBrains/intellij-scala | scala/scala-impl/src/org/jetbrains/plugins/scala/codeInspection/unusedInspections/ScalaLocalVarCouldBeValPassFactory.scala | Scala | apache-2.0 | 953 |
/*
* Copyright 2013-2016 Tsukasa Kitachi
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package configs.testutil.instance
import scalaz.{Equal, std}
object tuple {
implicit def tuple2Equal[A: Equal, B: Equal]: Equal[(A, B)] =
std.tuple.tuple2Equal[A, B]
implicit def tuple3Equal[A: Equal, B: Equal, C: Equal]: Equal[(A, B, C)] =
std.tuple.tuple3Equal[A, B, C]
implicit def tuple4Equal[A: Equal, B: Equal, C: Equal, D: Equal]: Equal[(A, B, C, D)] =
std.tuple.tuple4Equal[A, B, C, D]
implicit def tuple5Equal[A: Equal, B: Equal, C: Equal, D: Equal, E: Equal]: Equal[(A, B, C, D, E)] =
std.tuple.tuple5Equal[A, B, C, D, E]
implicit def tuple6Equal[A: Equal, B: Equal, C: Equal, D: Equal, E: Equal, F: Equal]: Equal[(A, B, C, D, E, F)] =
std.tuple.tuple6Equal[A, B, C, D, E, F]
implicit def tuple7Equal[A: Equal, B: Equal, C: Equal, D: Equal, E: Equal, F: Equal, G: Equal]: Equal[(A, B, C, D, E, F, G)] =
std.tuple.tuple7Equal[A, B, C, D, E, F, G]
implicit def tuple8Equal[A: Equal, B: Equal, C: Equal, D: Equal, E: Equal, F: Equal, G: Equal, H: Equal]: Equal[(A, B, C, D, E, F, G, H)] =
std.tuple.tuple8Equal[A, B, C, D, E, F, G, H]
implicit def tuple9Equal[A: Equal, B: Equal, C: Equal, D: Equal, E: Equal, F: Equal, G: Equal, H: Equal, I: Equal]: Equal[(A, B, C, D, E, F, G, H, I)] =
Equal.equalBy {
case (a, b, c, d, e, f, g, h, i) => ((a, b, c, d, e, f, g, h), i)
}
implicit def tuple10Equal[A: Equal, B: Equal, C: Equal, D: Equal, E: Equal, F: Equal, G: Equal, H: Equal, I: Equal, J: Equal]: Equal[(A, B, C, D, E, F, G, H, I, J)] =
Equal.equalBy {
case (a, b, c, d, e, f, g, h, i, j) => ((a, b, c, d, e, f, g, h), i, j)
}
implicit def tuple11Equal[A: Equal, B: Equal, C: Equal, D: Equal, E: Equal, F: Equal, G: Equal, H: Equal, I: Equal, J: Equal, K: Equal]: Equal[(A, B, C, D, E, F, G, H, I, J, K)] =
Equal.equalBy {
case (a, b, c, d, e, f, g, h, i, j, k) => ((a, b, c, d, e, f, g, h), i, j, k)
}
implicit def tuple12Equal[A: Equal, B: Equal, C: Equal, D: Equal, E: Equal, F: Equal, G: Equal, H: Equal, I: Equal, J: Equal, K: Equal, L: Equal]: Equal[(A, B, C, D, E, F, G, H, I, J, K, L)] =
Equal.equalBy {
case (a, b, c, d, e, f, g, h, i, j, k, l) => ((a, b, c, d, e, f, g, h), i, j, k, l)
}
implicit def tuple13Equal[A: Equal, B: Equal, C: Equal, D: Equal, E: Equal, F: Equal, G: Equal, H: Equal, I: Equal, J: Equal, K: Equal, L: Equal, M: Equal]: Equal[(A, B, C, D, E, F, G, H, I, J, K, L, M)] =
Equal.equalBy {
case (a, b, c, d, e, f, g, h, i, j, k, l, m) => ((a, b, c, d, e, f, g, h), i, j, k, l, m)
}
implicit def tuple14Equal[A: Equal, B: Equal, C: Equal, D: Equal, E: Equal, F: Equal, G: Equal, H: Equal, I: Equal, J: Equal, K: Equal, L: Equal, M: Equal, N: Equal]: Equal[(A, B, C, D, E, F, G, H, I, J, K, L, M, N)] =
Equal.equalBy {
case (a, b, c, d, e, f, g, h, i, j, k, l, m, n) => ((a, b, c, d, e, f, g, h), i, j, k, l, m, n)
}
implicit def tuple15Equal[A: Equal, B: Equal, C: Equal, D: Equal, E: Equal, F: Equal, G: Equal, H: Equal, I: Equal, J: Equal, K: Equal, L: Equal, M: Equal, N: Equal, O: Equal]: Equal[(A, B, C, D, E, F, G, H, I, J, K, L, M, N, O)] =
Equal.equalBy {
case (a, b, c, d, e, f, g, h, i, j, k, l, m, n, o) => ((a, b, c, d, e, f, g, h), i, j, k, l, m, n, o)
}
implicit def tuple16Equal[A: Equal, B: Equal, C: Equal, D: Equal, E: Equal, F: Equal, G: Equal, H: Equal, I: Equal, J: Equal, K: Equal, L: Equal, M: Equal, N: Equal, O: Equal, P: Equal]: Equal[(A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P)] =
Equal.equalBy {
case (a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p) => ((a, b, c, d, e, f, g, h), (i, j, k, l, m, n, o, p))
}
implicit def tuple17Equal[A: Equal, B: Equal, C: Equal, D: Equal, E: Equal, F: Equal, G: Equal, H: Equal, I: Equal, J: Equal, K: Equal, L: Equal, M: Equal, N: Equal, O: Equal, P: Equal, Q: Equal]: Equal[(A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q)] =
Equal.equalBy {
case (a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q) => ((a, b, c, d, e, f, g, h), (i, j, k, l, m, n, o, p), q)
}
implicit def tuple18Equal[A: Equal, B: Equal, C: Equal, D: Equal, E: Equal, F: Equal, G: Equal, H: Equal, I: Equal, J: Equal, K: Equal, L: Equal, M: Equal, N: Equal, O: Equal, P: Equal, Q: Equal, R: Equal]: Equal[(A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R)] =
Equal.equalBy {
case (a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r) => ((a, b, c, d, e, f, g, h), (i, j, k, l, m, n, o, p), q, r)
}
implicit def tuple19Equal[A: Equal, B: Equal, C: Equal, D: Equal, E: Equal, F: Equal, G: Equal, H: Equal, I: Equal, J: Equal, K: Equal, L: Equal, M: Equal, N: Equal, O: Equal, P: Equal, Q: Equal, R: Equal, S: Equal]: Equal[(A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R, S)] =
Equal.equalBy {
case (a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s) => ((a, b, c, d, e, f, g, h), (i, j, k, l, m, n, o, p), q, r, s)
}
implicit def tuple20Equal[A: Equal, B: Equal, C: Equal, D: Equal, E: Equal, F: Equal, G: Equal, H: Equal, I: Equal, J: Equal, K: Equal, L: Equal, M: Equal, N: Equal, O: Equal, P: Equal, Q: Equal, R: Equal, S: Equal, T: Equal]: Equal[(A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R, S, T)] =
Equal.equalBy {
case (a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t) => ((a, b, c, d, e, f, g, h), (i, j, k, l, m, n, o, p), q, r, s, t)
}
implicit def tuple21Equal[A: Equal, B: Equal, C: Equal, D: Equal, E: Equal, F: Equal, G: Equal, H: Equal, I: Equal, J: Equal, K: Equal, L: Equal, M: Equal, N: Equal, O: Equal, P: Equal, Q: Equal, R: Equal, S: Equal, T: Equal, U: Equal]: Equal[(A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R, S, T, U)] =
Equal.equalBy {
case (a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u) => ((a, b, c, d, e, f, g, h), (i, j, k, l, m, n, o, p), q, r, s, t, u)
}
implicit def tuple22Equal[A: Equal, B: Equal, C: Equal, D: Equal, E: Equal, F: Equal, G: Equal, H: Equal, I: Equal, J: Equal, K: Equal, L: Equal, M: Equal, N: Equal, O: Equal, P: Equal, Q: Equal, R: Equal, S: Equal, T: Equal, U: Equal, V: Equal]: Equal[(A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R, S, T, U, V)] =
Equal.equalBy {
case (a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v) => ((a, b, c, d, e, f, g, h), (i, j, k, l, m, n, o, p), q, r, s, t, u, v)
}
}
| kxbmap/configs | core/src/test/scala/configs/testutil/instance/tuple.scala | Scala | apache-2.0 | 7,013 |
/*
* Copyright 2014 Databricks
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package hydrograph.engine.spark.datasource.xml
import org.apache.spark.sql.sources.DataSourceRegister
/**
* The Class DefaultSource15.
*
* @author Bitwise
*
*/
class DefaultSource15 extends DefaultSource with DataSourceRegister {
/**
* Short alias for spark-xml data source.
*/
override def shortName(): String = "xml"
}
| capitalone/Hydrograph | hydrograph.engine/hydrograph.engine.spark/src/main/scala/hydrograph/engine/spark/datasource/xml/DefaultSource15.scala | Scala | apache-2.0 | 938 |
package com.shorrockin.cascal
import org.junit.{Assert, Test}
import com.shorrockin.cascal.utils.Conversions._
import Assert._
class TestSeconderyIndex extends EmbeddedCassandra {
val family = "Test" \\ "StandardIndexed"
val key1 = family \\ "key1"
val key2 = family \\ "key2"
@Test def eqExpression = borrow { (session) =>
session.insert(key1 \\ "column1" \\ "b")
session.insert(key1 \\ "column2" \\ "c")
session.insert(key2 \\ "column1" \\ "b")
val query = family where "column1" Eq "b" startAt "a"
val rows = session.list(query)
assertEquals(2, rows.size)
}
@Test def eqExpressionForLongType = borrow { (session) =>
session.insert(key1 \\ "longColumn" \\ 1L)
session.insert(key2 \\ "longColumn" \\ 2L)
val query = family where "longColumn" Eq 1L startAt 0
val rows = session.list(query)
assertEquals(1, rows.size)
}
@Test def eqAndGtExpression = borrow { (session) =>
session.insert(key1 \\ "column1" \\ "b")
session.insert(key1 \\ "column2" \\ "c")
session.insert(key2 \\ "column1" \\ "b")
session.insert(key2 \\ "column2" \\ "b")
val query = family where "column1" Eq "b" and "column2" Gt "b" startAt "a"
val rows = session.list(query)
assertEquals(1, rows.size)
}
@Test def eqAndGteExpression = borrow { (session) =>
session.insert(key1 \\ "column1" \\ "b")
session.insert(key1 \\ "column2" \\ "c")
session.insert(key2 \\ "column1" \\ "b")
session.insert(key2 \\ "column2" \\ "b")
val query = family where "column1" Eq "b" and "column2" Gte "b" startAt "a"
val rows = session.list(query)
assertEquals(2, rows.size)
}
@Test def eqAndLtExpression = borrow { (session) =>
session.insert(key1 \\ "column1" \\ "b")
session.insert(key1 \\ "column2" \\ "a")
session.insert(key2 \\ "column1" \\ "b")
session.insert(key2 \\ "column2" \\ "b")
val query = family where "column1" Eq "b" and "column2" Lt "b" startAt "a"
val rows = session.list(query)
assertEquals(1, rows.size)
}
@Test def eqAndLteExpression = borrow { (session) =>
session.insert(key1 \\ "column1" \\ "b")
session.insert(key1 \\ "column2" \\ "a")
session.insert(key2 \\ "column1" \\ "b")
session.insert(key2 \\ "column2" \\ "b")
val query = family where "column1" Eq "b" and "column2" Lte "b" startAt "a"
val rows = session.list(query)
assertEquals(2, rows.size)
}
@Test def limitNumberOfResult = borrow { (session) =>
session.insert(key1 \\ "column1" \\ "b")
session.insert(key1 \\ "column2" \\ "a")
session.insert(key2 \\ "column1" \\ "b")
session.insert(key2 \\ "column2" \\ "b")
val query = family where "column1" Eq "b" and "column2" Lte "b" startAt "a" limit 1
val rows = session.list(query)
assertEquals(1, rows.size)
}
} | Shimi/cascal | src/test/scala/com/shorrockin/cascal/TestSecondaryIndex.scala | Scala | apache-2.0 | 2,920 |
/*
* Copyright 2014 Commonwealth Computer Research, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.locationtech.geomesa.plugin.wms
import java.awt.color.ColorSpace
import java.awt.image._
import java.awt.{Point, Transparency}
import org.locationtech.geomesa.utils.geohash.{GeoHash, TwoGeoHashBoundingBox}
object ImageUtils {
def createTileImage(ghBbox: TwoGeoHashBoundingBox,
ghvs: Iterator[(GeoHash,Double)]): RenderedImage = {
val xdim: Int = math
.round(ghBbox.bbox.longitudeSize /
ghBbox.ur.bbox.longitudeSize - 1.0)
.asInstanceOf[Int]
val ydim: Int = math
.round(ghBbox.bbox.latitudeSize /
ghBbox.ur.bbox.latitudeSize - 1.0)
.asInstanceOf[Int]
val buffer = createImageBuffer(xdim, ydim)
setImagePixels(ghvs, buffer, xdim, ydim, ghBbox)
val resized: BufferedImage = drawImage(buffer, xdim, ydim)
resized
}
private def createImageBuffer(xdim: Int, ydim: Int) = {
val defaultColor = 0//black won't work - it has full transparency
val bufferSize: Int = xdim * ydim
val iBuffer = Array.ofDim[Byte](1, bufferSize)
var i: Int = 0
while (i < ydim) {
var j: Int = 0
while (j < xdim) {
val idx: Int = i * xdim + j
iBuffer(0)(idx) = defaultColor.asInstanceOf[Byte]
j += 1
}
i += 1
}
iBuffer
}
private def setImagePixels(ghvs: Iterator[(GeoHash, Double)],
buffer: Array[Array[Byte]],
xdim: Int,
ydim: Int,
ghBbox: TwoGeoHashBoundingBox) {
val dxDegrees = ghBbox.ur.bbox.longitudeSize
val dyDegrees = ghBbox.ur.bbox.latitudeSize
for (ghv <- ghvs) {
if (ghBbox.bbox.covers(ghv._1.bbox)// @todo check by index
){
val gh: GeoHash = ghv._1
val x: Int = Math
.round((gh.x - ghBbox.ll.x) /
dxDegrees)
.asInstanceOf[Int]
val y: Int = Math.max(ydim -
Math.round((gh.y - ghBbox.ll.y) / dyDegrees)
.asInstanceOf[Int] -1, 0)
val idx: Int = math.min(y * xdim + x, xdim*ydim -1 )
val thisVal: Double = ghv._2
val tmpColor = thisVal * 255
buffer(0)(idx) = tmpColor.asInstanceOf[Byte]
}
}
}
def drawImage(buffer: Array[Array[Byte]], xdim: Int, ydim: Int): BufferedImage = {
val dbuffer: DataBufferByte = new DataBufferByte(buffer, xdim * ydim)
val sampleModel = new BandedSampleModel(DataBuffer.TYPE_BYTE,
xdim,
ydim,
1)
val colorModel = new ComponentColorModel(ColorSpace.getInstance(ColorSpace.CS_GRAY),
null,
false,
false,
Transparency.OPAQUE,
DataBuffer.TYPE_BYTE)
val raster = Raster.createWritableRaster(sampleModel,
dbuffer,
new Point(0, 0))
new BufferedImage(colorModel, raster, false, null)
}
}
| jnh5y/geomesa | geomesa-plugin/src/main/scala/org/locationtech/geomesa/plugin/wms/ImageUtils.scala | Scala | apache-2.0 | 4,022 |
/*
* Copyright (c) 2014-2020 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.reactive.internal.operators
import monix.execution.Ack
import monix.execution.Ack.Continue
import monix.reactive.Observable.Operator
import monix.reactive.observers.Subscriber
import scala.concurrent.Future
private[reactive] final class TakeEveryNthOperator[A](n: Int) extends Operator[A, A] {
require(n > 0, "n must be strictly positive")
def apply(out: Subscriber[A]): Subscriber[A] =
new Subscriber[A] {
implicit val scheduler = out.scheduler
private[this] var index = n
def onNext(elem: A): Future[Ack] = {
index -= 1
if (index != 0)
Continue
else {
index = n
out.onNext(elem)
}
}
def onError(ex: Throwable) =
out.onError(ex)
def onComplete() =
out.onComplete()
}
}
| alexandru/monifu | monix-reactive/shared/src/main/scala/monix/reactive/internal/operators/TakeEveryNthOperator.scala | Scala | apache-2.0 | 1,491 |
/*
Copyright 2012 Twitter, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.twitter.algebird
import org.scalacheck.{Arbitrary, Prop}
import org.scalacheck.Prop.forAll
import scala.math.Equiv
/**
* Properties useful for testing instances of Metric[T].
*/
trait MetricProperties {
private def beCloseTo(a: Double, b: Double, eps: Double = 1e-10): Boolean =
a == b || (math.abs(a - b) / math.abs(a)) < eps || (a.isInfinite && b.isInfinite)
private def beGreaterThan(a: Double, b: Double, eps: Double = 1e-10): Boolean =
a > b - eps || (a.isInfinite && b.isInfinite)
def isNonNegative[T: Metric: Arbitrary]: Prop =
forAll { (a: T, b: T) =>
val m = Metric(a, b)
beGreaterThan(m, 0.0) || beCloseTo(m, 0.0)
}
def isEqualIffZero[T: Metric: Arbitrary: Equiv]: Prop =
forAll { (a: T, b: T) =>
if (Equiv[T].equiv(a, b)) beCloseTo(Metric(a, b), 0.0)
else !beCloseTo(Metric(a, b), 0.0)
}
def isSymmetric[T: Metric: Arbitrary]: Prop =
forAll { (a: T, b: T) =>
beCloseTo(Metric(a, b), Metric(b, a))
}
def satisfiesTriangleInequality[T: Metric: Arbitrary]: Prop =
forAll { (a: T, b: T, c: T) =>
val m1 = Metric(a, b) + Metric(b, c)
val m2 = Metric(a, c)
beGreaterThan(m1, m2) || beCloseTo(m1, m2)
}
def metricLaws[T: Metric: Arbitrary: Equiv]: Prop =
isNonNegative[T] && isEqualIffZero[T] &&
isSymmetric[T] && satisfiesTriangleInequality[T]
}
| nevillelyh/algebird | algebird-test/src/main/scala/com/twitter/algebird/MetricProperties.scala | Scala | apache-2.0 | 1,944 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest
import SavesConfigMapSuite.theConfigMap
import java.util.UUID
@WrapWith(classOf[ConfigMapWrapperSuite])
class SavesConfigMapSuite(configMap: Map[String, Any]) extends FunSuite {
theConfigMap = Some(configMap)
test("one test") {}
test("two test") {}
test("red test") {}
test("blue test", org.scalatest.mytags.FastAsLight) {}
ignore("ignore me") {}
class NSuite extends Suite {
override val suiteId = getClass.getName + "-" + UUID.randomUUID.toString
}
override def nestedSuites: Vector[Suite] = Vector(new NSuite, new NSuite, new NSuite)
}
object SavesConfigMapSuite {
private var theConfigMap: Option[Map[String, Any]] = None
def savedConfigMap = theConfigMap
def resetConfigMap() { theConfigMap = None }
}
| travisbrown/scalatest | src/test/scala/org/scalatest/SavesConfigMapSuite.scala | Scala | apache-2.0 | 1,361 |
/*
* Copyright (C) 2014 - 2017 Contributors as noted in the AUTHORS.md file
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package com.wegtam.tensei.agent.transformers
import com.wegtam.tensei.agent.transformers.BaseTransformer.{
StartTransformation,
TransformerResponse
}
import akka.actor.Props
import akka.util.ByteString
import scala.util.Try
object Split {
def props: Props = Props(new Split())
}
/**
* A simple transformer that splits a source.
*
* The transformer accepts the following parameters:
* - `pattern` The pattern that is used to split the element.
* - `limit` Return only the first x elements of the split array. (Default -1 for all)
* - `selected` Return only the elements at the given positions. (Comma separated list of Int, starting with 0)
*/
class Split extends BaseTransformer {
override def transform: Receive = {
case msg: StartTransformation =>
log.debug("Starting splitting of source: {}", msg.src)
val params = msg.options.params
val pattern = paramValue("pattern")(params)
val limit: Int = paramValueO("limit")(params).map(_.toInt).getOrElse(-1)
val selected: Seq[Int] = paramValue("selected")(params) match {
case "" => List.empty[Int]
case is => is.split(",").toList.map(_.trim.toInt)
}
val splittedSource: List[ByteString] =
if (msg.src.nonEmpty) {
val concatenatedString = msg.src.map {
case bs: ByteString => bs.utf8String
case otherData => otherData.toString
}.mkString
val parts: List[String] =
if (pattern.nonEmpty)
concatenatedString.split(pattern).map(_.trim).toList
else
List(concatenatedString)
parts match {
case p1 :: p2 :: ps =>
// We have at least 2 entries.
val sliced =
if (limit > 0 && parts.size > limit)
parts.slice(0, limit)
else
parts
if (selected.nonEmpty) {
val candidates = sliced.toVector
selected
.map(
pos =>
Try(candidates(pos)) match {
case scala.util.Failure(e) => ByteString("")
case scala.util.Success(c) => ByteString(c)
}
)
.toList
} else
sliced.map(p => ByteString(p))
case _ => parts.map(p => ByteString(p))
}
} else
List(ByteString(""))
log.debug("Finished splitting of source.")
log.debug("Splitted {} into {}. -> {} elements",
msg.src,
splittedSource,
splittedSource.length)
context become receive
sender() ! TransformerResponse(splittedSource, classOf[String])
}
}
| Tensei-Data/tensei-agent | src/main/scala/com/wegtam/tensei/agent/transformers/Split.scala | Scala | agpl-3.0 | 3,568 |
package org.embulk.parser.twitter_ads_stats
import org.embulk.spi.`type`.Types
import org.embulk.spi.{Column => EmbulkColumn}
class ColumnSpec extends UnitSpec {
"メトリクスグループのJSONがアルファベット順で JSON 型で生成される" in {
val metricElementNames = MetricElementNames(
Map(
"video" -> Seq(
"video_total_views",
"video_views_25",
),
"billing" -> Seq(
"billed_engagements",
"billed_charge_local_micro"
)
)
)
val actual = Column.createEmbulkColumns(metricElementNames)
val expected = Seq(
new EmbulkColumn(0, "id", Types.STRING),
new EmbulkColumn(1, "date", Types.STRING),
new EmbulkColumn(2, "segment", Types.JSON),
new EmbulkColumn(3, "placement", Types.STRING),
new EmbulkColumn(4, "billing", Types.JSON),
new EmbulkColumn(5, "video", Types.JSON)
)
assert(actual == expected)
}
}
| septeni-original/embulk-parser-twitter_ads_stats | src/test/scala/org/embulk/parser/twitter_ads_stats/ColumnSpec.scala | Scala | mit | 969 |
package controllers
import javax.inject.Inject
import com.mohiva.play.silhouette.api.{Environment, Silhouette}
import com.mohiva.play.silhouette.impl.authenticators.SessionAuthenticator
import controllers.headers.ProvidesHeader
import models.user.User
import models.street.StreetEdgePriorityTable
import scala.concurrent.Future
/**
* Holds the HTTP requests associated with maintaining the audit priority value for streets.
*
* @param env The Silhouette environment.
*/
class AuditPriorityController @Inject() (implicit val env: Environment[User, SessionAuthenticator])
extends Silhouette[User, SessionAuthenticator] with ProvidesHeader {
def isAdmin(user: Option[User]): Boolean = user match {
case Some(user) =>
if (user.role.getOrElse("") == "Administrator" || user.role.getOrElse("") == "Owner") true else false
case _ => false
}
/**
* Recalculates street edge priority for all streets.
*/
def recalculateStreetPriority = UserAwareAction.async { implicit request =>
if (isAdmin(request.identity)) {
StreetEdgePriorityTable.recalculateStreetPriority
Future.successful(Ok("Successfully recalculated street priorities"))
} else {
Future.successful(Redirect("/"))
}
}
}
| ProjectSidewalk/SidewalkWebpage | app/controllers/AuditPriorityController.scala | Scala | mit | 1,242 |
package com.datawizards.sparklocal.rdd
import com.datawizards.sparklocal.SparkLocalBaseTest
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
@RunWith(classOf[JUnitRunner])
class IsEmptyTest extends SparkLocalBaseTest {
test("isEmpty result") {
assert(!RDDAPI(Seq(1,2,3)).isEmpty)
assert(RDDAPI(Seq()).isEmpty)
}
test("isEmpty equal") {
assertRDDOperationReturnsSameResult[Int, Boolean](Seq(1,2,3)) {
d => d.isEmpty
}
assertRDDOperationReturnsSameResult[Int, Boolean](Seq()) {
d => d.isEmpty
}
}
} | piotr-kalanski/spark-local | src/test/scala/com/datawizards/sparklocal/rdd/IsEmptyTest.scala | Scala | apache-2.0 | 567 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.datastax.killrweather
import akka.actor.{ ActorLogging, Actor, ActorRef }
import akka.pattern.pipe
import com.datastax.spark.connector._
import com.scaledaction.core.cassandra.CassandraConfig
import org.apache.spark.SparkContext
import org.apache.spark.util.StatCounter
/**
* The TemperatureActor reads the daily temperature rollup data from Cassandra,
* and TODO: for a given weather station, computes temperature statistics by
* month for a given year.
*/
//class TemperatureActor(sc: SparkContext, settings: WeatherSettings)
class TemperatureActor(sc: SparkContext, cassandraConfig: CassandraConfig)
extends AggregationActor with ActorLogging {
import WeatherEvent._
import Weather._
val keyspace = cassandraConfig.keyspace
//TODO - Add a WeatherServiceAppConfig and replace the hard-coded "dailytable" and "rawtable" values
// cassandra {
// table.raw = "raw_weather_data"
// table.daily.temperature = "daily_aggregate_temperature"
// }
// import settings.{CassandraTableDailyTemp => dailytable}
val dailyTable = "daily_aggregate_temperature"
// import settings.{CassandraTableRaw => rawtable}
val rawTable = "raw_weather_data"
val monthlyTable = "monthly_aggregate_temperature"
def receive: Actor.Receive = {
//case e: GetDailyTemperature => daily(e.day, sender)
case e: GetDailyTemperature => daily(Day(e.wsid, e.year, e.month, e.day), sender)
case e: GetMonthlyTemperature => highLow(e, sender)
case e: DailyTemperature => storeDaily(e)
case e: MonthlyTemperature => storeMonthly(e)
}
private def daily(day: Day, requester: ActorRef): Unit =
sc.cassandraTable[DailyTemperature](keyspace, dailyTable)
.where("wsid = ? AND year = ? AND month = ? AND day = ?",
day.wsid, day.year, day.month, day.day)
.collectAsync // TODO: write custom sum that checks for no data
.map(seqDT => seqDT.headOption match {
case None => aggregateDaily(day, requester)
case Some(dailyTemperature) => requester ! dailyTemperature
})
private def aggregateDaily(day: Day, requester: ActorRef): Unit = {
sc.cassandraTable[Double](keyspace, rawTable)
.select("temperature").where(
"wsid = ? AND year = ? AND month = ? AND day = ?",
day.wsid, day.year, day.month, day.day
)
.collectAsync() // TODO: write custom sum that checks for no data
.map(toDaily(_, day)) pipeTo requester
}
/**
* Checks for stored monthly aggregation and if not exists, passes
* to the aggregateMonth method.
*/
private def highLow(
e: GetMonthlyTemperature, requester: ActorRef)
: Unit = {
sc.cassandraTable[MonthlyTemperature](keyspace, monthlyTable)
.where("wsid = ? AND year = ? AND month = ?",
e.wsid, e.year, e.month)
.collectAsync // TODO: write custom sum that checks for no data
.map(seqDT => seqDT.headOption match {
case None => aggregateMonth(e, requester)
case Some(monthlyTemperature) => requester ! monthlyTemperature
})
}
private def aggregateMonth(
e: GetMonthlyTemperature, requester: ActorRef
): Unit =
sc.cassandraTable[RawWeatherData](keyspace, rawTable)
.where("wsid = ? AND year = ? AND month = ?", e.wsid, e.year, e.month)
.collectAsync() // TODO: write custom sum that checks for no data
.map(toMonthly(_, e.wsid, e.year, e.month)) pipeTo requester
private def storeDaily(e: DailyTemperature): Unit =
sc.parallelize(Seq(e)).saveToCassandra(keyspace, dailyTable)
private def storeMonthly(e: MonthlyTemperature): Unit =
sc.parallelize(Seq(e)).saveToCassandra(keyspace, monthlyTable)
private def toDaily(aggregate: Seq[Double], key: Day): WeatherAggregate =
if (aggregate.nonEmpty) {
val data = toDailyTemperature(key, StatCounter(aggregate))
if(timestamp.getYear > key.year
|| timestamp.getMonthOfYear > key.month) self ! data
data
} else {
log.info("TemperatureActor.toDaily NoDataAvailable")
NoDataAvailable(key.wsid, key.year, classOf[DailyTemperature])
}
private def toMonthly(
aggregate: Seq[RawWeatherData], wsid: String, year: Int, month: Int)
: WeatherAggregate = {
if (aggregate.nonEmpty) {
val data = MonthlyTemperature(wsid, year, month,
aggregate.map(rwd => rwd.temperature).max,
aggregate.map(rwd => rwd.temperature).min)
if(timestamp.getYear > year || timestamp.getMonthOfYear > month)
self ! data
data
}
else NoDataAvailable(wsid, year, classOf[MonthlyTemperature])
}
private def toDailyTemperature(key: Day, stats: StatCounter) =
DailyTemperature(key.wsid, key.year, key.month, key.day,
high = stats.max, low = stats.min, mean = stats.mean,
variance = stats.variance, stdev = stats.stdev)
}
| scaledaction/weather-service | query-service/src/main/scala/com/datastax/killrweather/TemperatureActor.scala | Scala | apache-2.0 | 5,978 |
package dao
import com.vividsolutions.jts.geom.{GeometryFactory, Coordinate, Point}
import utils.driver.pgSlickDriver.simple._
import scala.slick.jdbc.{GetResult, StaticQuery => Q}
import models.Feature
/**
* Feature table definition.
* For more detailed information about rows and stored data, @see{models.Feature}.
*
* @author Amadeusz Kosik <akosik@semantive.com>
* @author Piotr Jędruszuk <pjedruszuk@semantive.com>
*/
object Features extends Table[Feature]("feature") with DAO[Feature] {
// <editor-fold desc="Row definitions">
def geonameId = column[Int]("geoname_id", O.PrimaryKey)
def defaultName = column[String]("default_name", O.DBType("VARCHAR(200)"))
def featureClass = column[String]("feature_class", O.DBType("CHAR(1)"))
def featureCode = column[String]("feature_code", O.DBType("VARCHAR(10)"))
def admCode = column[String]("adm_code", O.DBType("VARCHAR(40)"), O.Nullable)
def countryId = column[Int]("country_id", O.Nullable)
def adm1Id = column[Int]("adm1_id", O.Nullable)
def adm2Id = column[Int]("adm2_id", O.Nullable)
def adm3Id = column[Int]("adm3_id", O.Nullable)
def adm4Id = column[Int]("adm4_id", O.Nullable)
def parentId = column[Int]("parent_id", O.Nullable)
def timezoneId = column[Int]("timezone_id", O.Nullable)
def population = column[Long]("population", O.Nullable)
def location = column[Point]("location")
def wikiLink = column[String]("wiki_link", O.Nullable)
// </editor-fold>
// <editor-fold desc="Foreign keys">
/** REFERENCES key on country.id. */
def fkCountry = foreignKey("fk_feature_country", countryId, Countries)(_.geonameId)
/** REFERENCES key on adm1.id. */
def fkAdm1 = foreignKey("fk_feature_adm1", adm1Id, Features)(_.geonameId)
/** REFERENCES key on adm2.id. */
def fkAdm2 = foreignKey("fk_feature_adm2", adm2Id, Features)(_.geonameId)
/** REFERENCES key on adm3.id. */
def fkAdm3 = foreignKey("fk_feature_adm3", adm3Id, Features)(_.geonameId)
/** REFERENCES key on adm4.id. */
def fkAdm4 = foreignKey("fk_feature_adm4", adm4Id, Features)(_.geonameId)
/** REFERENCES key on adm4.id. */
def fkParentId = foreignKey("fk_feature_parent_id", parentId, Features)(_.geonameId)
/** REFERENCES key on timezone.id. */
def fkTimezone = foreignKey("fk_feature_timezone", timezoneId, Timezones)(_.id)
// </editor-fold>
// <editor-fold desc="Projections">
/** Default projection. */
def * = geonameId ~ defaultName ~ featureClass ~ featureCode ~ admCode.? ~ countryId.? ~ adm1Id.? ~ adm2Id.? ~ adm3Id.? ~ adm4Id.? ~ parentId.? ~ timezoneId.? ~ population.? ~ location ~ wikiLink.? <> (Feature.apply _, Feature.unapply _)
def adm1insertion = geonameId ~ featureClass ~ featureCode ~ admCode.? ~ countryId.? <>(f => new Feature(f._1, f._2, f._3, f._4, f._5), (f : Feature) => Some(f.geonameId, f.featureClass, f.featureCode, f.admCode, f.countryId))
def adm2insertion = geonameId ~ featureClass ~ featureCode ~ admCode.? ~ countryId.? ~ adm1Id.? <>(f => new Feature(f._1, f._2, f._3, f._4, f._5, f._6), (f : Feature) => Some(f.geonameId, f.featureClass, f.featureCode, f.admCode, f.countryId, f.adm1Id))
// </editor-fold>
// <editor-fold desc="Retrieve methods">
/**
*
* @param geonameId
* @param lang
* @param session
* @return
*/
def getWithName(geonameId: Int, lang: String)(implicit session: Session): Option[(Feature, Option[String])] =
matchFeatureWithName(lang).filter(_._1.geonameId === geonameId).firstOption
/**
*
* @param latitude
* @param longitude
* @param limit
* @param session
* @return
*/
def getByPoint(latitude: Double, longitude: Double, language: String, radius: Double, limit: Int, featureClass: Option[String], featureCode: Option[String], countryBias: Option[String])
(implicit session: Session): List[(Feature, Option[String], Option[String])] = {
val geometryFactory = new GeometryFactory()
val inputPoint = geometryFactory.createPoint(new Coordinate(longitude, latitude))
var query = (for {
((f, n), c) <- joinFeaturesWithNames(language) leftJoin Countries on(_._1.countryId === _.geonameId)
if st_dwithin(f.location, inputPoint, radius)
} yield (f, n.name.?, c.iso2Code.?)).sortBy(tpl => st_distance_sphere(tpl._1.location, inputPoint))
if(! featureClass.isEmpty)
query = query.filter(t => t._1.featureClass === featureClass.get)
if(! featureCode.isEmpty)
query = query.filter(t => t._1.featureCode === featureCode.get)
if(! countryBias.isEmpty)
query = query.sortBy(t => t._3 =!= countryBias)
query.take(limit).list
}
/**
*
* @param geonameId
* @param lang
* @param session
* @return
*/
def getChildren(geonameId: Int, lang: String)(implicit session: Session): List[(Feature, Option[String])] =
matchFeatureWithName(lang).filter(_._1.parentId === geonameId).list
/**
* Returns hierarchy of the feature - the feature itself and all of it's parents.
*
* @param geonameId id of the feature to search for
* @param lang preferred language of the names in the output
* @return list of parent features, including given feature
*/
def getHierarchy(geonameId: Int, lang: String)(implicit session: Session): List[(Feature, Option[String])] = {
implicit val getFeatureResult = GetResult(r => Feature(r.<<, r.<<, r.<<, r.<<,
r.<<, r.<<, r.<<, r.<<, r.<<, r.<<, r.<<, r.<<, r.<<, r.<<, r.<<))
val query = Q.query[(Int, String), (Feature, Option[String])]("""
WITH RECURSIVE
parent_feature(geoname_id, parent_id, depth, path) AS (
SELECT
f.geoname_id, f.parent_id, 1::INT AS depth, ARRAY[f.geoname_id] AS path
FROM
feature AS f
WHERE
f.parent_id IS NULL
UNION ALL
SELECT
f.geoname_id, f.parent_id, pf.depth + 1 AS depth, path || ARRAY[f.geoname_id]
FROM
parent_feature AS pf, feature AS f
WHERE
f.parent_id = pf.geoname_id
)
SELECT feature.*, name_translation.name FROM feature LEFT JOIN name_translation ON feature.geoname_id = name_translation.geoname_id
WHERE feature.geoname_id = ANY((SELECT path FROM parent_feature AS f WHERE f.geoname_id = ?)::integer[])
AND name_translation.language = ?
""")
query.list((geonameId, lang))
}
/**
*
* @param geonameId
* @param lang
* @param session
* @return
*/
def getSiblings(geonameId: Int, lang: String)(implicit session: Session): List[(Feature, Option[String])] = {
(for {
p <- Features
(f, n) <- joinFeaturesWithNames(lang)
if p.geonameId === geonameId && f.parentId === p.parentId
} yield (f, n.name.?)).list
}
// </editor-fold>
}
| Semantive/geoys | app/dao/Features.scala | Scala | apache-2.0 | 6,930 |
/*
Copyright 2015 UCSC Computational Genomics Lab
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package edu.ucsc.cgl.conductor
import java.net.URI
import org.scalatest._
class ConductorSpec extends FlatSpec with Matchers
{
val credentials = Credentials( )
val partSize = 64 * 1024 * 1024
val bigFileSize = 64 * 1024 * 1024 * 5 / 2
private val src: URI = new URI( "s3://file/src" )
private val dst: URI = new URI( "hdfs://file/dst" )
val config = Config(
s3PartSize = partSize,
hdfsBlockSize = partSize,
concat = true,
src = src,
dst = dst )
val downloader = new Download( config, credentials )
"The partition method" should "divide a file into pieces corresponding to the specified size" in {
val partitionResult = downloader.partition( partSize ).toArray
// FIXME: assert entire array in a single assertion against a literal
assert( partitionResult.length == 1 )
assert( partitionResult( 0 ).getSize == partSize )
assert( partitionResult( 0 ).getStart == 0 )
val minusResult = downloader.partition( partSize - 1 ).toArray
assert( minusResult.length == 1 )
assert( minusResult( 0 ).getSize == partSize - 1 )
assert( minusResult( 0 ).getStart == 0 )
val plusResult = downloader.partition( partSize + 1 ).toArray
assert( plusResult.length == 2 )
assert( plusResult( 0 ).getSize == partSize )
assert( plusResult( 0 ).getStart == 0 )
assert( plusResult( 1 ).getSize == 1 )
assert( plusResult( 1 ).getStart == partSize )
val oneResult = downloader.partition( 1 ).toArray
assert( oneResult.length == 1 )
assert( oneResult( 0 ).getSize == 1 )
assert( oneResult( 0 ).getStart == 0 )
val bigResult = downloader.partition( bigFileSize ).toArray
assert( bigResult.length == 3 )
assert( bigResult( 0 ).getSize == partSize )
assert( bigResult( 0 ).getStart == 0 )
assert( bigResult( 1 ).getSize == partSize )
assert( bigResult( 1 ).getStart == partSize )
assert( bigResult( 2 ).getSize == partSize / 2 )
assert( bigResult( 2 ).getStart == partSize * 2 )
}
"A downloader" should "have a source URI in the S3 filesystem" in {
a[AssertionError] should be thrownBy {
new Download( Config( src = dst, dst = dst ), credentials )
}
}
it should "have a destination URI in the HDFS filesystem" in {
a[AssertionError] should be thrownBy {
new Download( Config( src = src, dst = src ), credentials )
}
}
"An uploader" should "have a source URI in the HDFS filesystem" in {
a[AssertionError] should be thrownBy {
new Upload( Config( src = src, dst = src ), credentials )
}
}
it should "have a destination URI in the S3 filesystem" in {
a[AssertionError] should be thrownBy {
new Upload( Config( src = dst, dst = dst ), credentials )
}
}
}
| BD2KGenomics/conductor | conductor/src/test/scala/edu/ucsc/cgl/conductor/ConductorSpec.scala | Scala | apache-2.0 | 3,540 |
package text.search
/**
* @author ynupc
* Created on 2016/08/21
*/
object AlphaSkipSearch extends Search {
override def indexOf[T](source: Array[T], target: Array[T]): Int = {
-1
}
override def indicesOf[T](source: Array[T], target: Array[T]): Array[Int] = {
Array()
}
}
| ynupc/scalastringcourseday6 | src/main/scala/text/search/AlphaSkipSearch.scala | Scala | apache-2.0 | 303 |
/*
* Copyright 2017 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.accounts.frsse2008
import uk.gov.hmrc.ct.accounts.frsse2008.calculations.ProfitOrLossCalculator
import uk.gov.hmrc.ct.accounts.frsse2008.retriever.Frsse2008AccountsBoxRetriever
import uk.gov.hmrc.ct.box.{CtBoxIdentifier, CtOptionalInteger}
case class AC17(value: Option[Int]) extends CtBoxIdentifier(name = "Previous Gross profit or loss") with CtOptionalInteger
object AC17 extends ProfitOrLossCalculator {
def calculate(boxRetriever: Frsse2008AccountsBoxRetriever): AC17 = {
calculatePreviousGrossProfitOrLoss(ac13 = boxRetriever.ac13(),
ac15 = boxRetriever.ac15(),
statutoryAccountsFiling = boxRetriever.filingAttributesBoxValueRetriever.statutoryAccountsFiling())
}
}
| liquidarmour/ct-calculations | src/main/scala/uk/gov/hmrc/ct/accounts/frsse2008/AC17.scala | Scala | apache-2.0 | 1,389 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.nn
import com.intel.analytics.bigdl.nn.abstractnn.AbstractModule
import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.{T, Table}
import scala.reflect.ClassTag
/**
* This is a simple table layer which takes a table of two tensors as input
* and calculate the dot product between them as outputs
*/
@SerialVersionUID(2455897411271580599L)
class DotProduct[T: ClassTag] (implicit ev: TensorNumeric[T])
extends AbstractModule[Table, Tensor[T], T] {
gradInput = T(Tensor[T](), Tensor[T]())
@transient private var buffer: Tensor[T] = null
override def updateOutput(input: Table): Tensor[T] = {
var input1: Tensor[T] = input(1)
var input2: Tensor[T] = input(2)
if (input1.dim() == 1) {
input1 = input1.view(1, input1.size(1))
input2 = input2.view(1, input2.size(1))
}
if (buffer == null) {
buffer = Tensor[T]()
}
buffer.resizeAs(input1).cmul(input1, input2)
output.sum(buffer, 2)
output.resize(input1.size(1))
output
}
override def updateGradInput(input: Table, gradOutput: Tensor[T]): Table = {
var input1: Tensor[T] = input(1)
var input2: Tensor[T] = input(2)
var notBatch = false
if (gradInput.length() != 2) {
if (!gradInput.contains(1)) {
gradInput.update(1, Tensor[T]())
}
if (!gradInput.contains(2)) {
gradInput.update(2, Tensor[T]())
}
}
if (input1.dim() == 1) {
input1 = input1.view(1, input1.size(1))
input2 = input2.view(1, input2.size(1))
notBatch = true
}
val gw1: Tensor[T] = gradInput(1)
val gw2: Tensor[T] = gradInput(2)
gw1.resizeAs(input1).copy(input2)
gw2.resizeAs(input2).copy(input1)
val go = gradOutput.view(gradOutput.size(1), 1).expandAs(input1)
gw1.cmul(go)
gw2.cmul(go)
if (notBatch) {
gradInput[Tensor[T]](1).set(gw1.select(1, 1))
gradInput[Tensor[T]](2).set(gw2.select(1, 1))
}
gradInput
}
override def toString: String = {
s"nn.DotProduct"
}
}
object DotProduct {
def apply[@specialized(Float, Double) T: ClassTag]()
(implicit ev: TensorNumeric[T]) : DotProduct[T] = {
new DotProduct[T]()
}
}
| yiheng/BigDL | spark/dl/src/main/scala/com/intel/analytics/bigdl/nn/DotProduct.scala | Scala | apache-2.0 | 2,906 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.parser
import java.util.Locale
import java.util.concurrent.TimeUnit
import scala.collection.JavaConverters._
import scala.collection.mutable.{ArrayBuffer, Set}
import org.antlr.v4.runtime.{ParserRuleContext, Token}
import org.antlr.v4.runtime.tree.{ParseTree, RuleNode, TerminalNode}
import org.apache.commons.codec.DecoderException
import org.apache.commons.codec.binary.Hex
import org.apache.spark.internal.Logging
import org.apache.spark.sql.AnalysisException
import org.apache.spark.sql.catalyst.{FunctionIdentifier, SQLConfHelper, TableIdentifier}
import org.apache.spark.sql.catalyst.analysis._
import org.apache.spark.sql.catalyst.catalog.{BucketSpec, CatalogStorageFormat, FunctionResource, FunctionResourceType}
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.expressions.aggregate.{First, Last}
import org.apache.spark.sql.catalyst.parser.SqlBaseParser._
import org.apache.spark.sql.catalyst.plans._
import org.apache.spark.sql.catalyst.plans.logical._
import org.apache.spark.sql.catalyst.trees.CurrentOrigin
import org.apache.spark.sql.catalyst.util.{CharVarcharUtils, DateTimeUtils, IntervalUtils}
import org.apache.spark.sql.catalyst.util.DateTimeUtils.{convertSpecialDate, convertSpecialTimestamp, convertSpecialTimestampNTZ, getZoneId, stringToDate, stringToTimestamp, stringToTimestampWithoutTimeZone}
import org.apache.spark.sql.connector.catalog.{SupportsNamespaces, TableCatalog}
import org.apache.spark.sql.connector.catalog.TableChange.ColumnPosition
import org.apache.spark.sql.connector.expressions.{ApplyTransform, BucketTransform, DaysTransform, Expression => V2Expression, FieldReference, HoursTransform, IdentityTransform, LiteralValue, MonthsTransform, Transform, YearsTransform}
import org.apache.spark.sql.errors.QueryParsingErrors
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types._
import org.apache.spark.unsafe.types.{CalendarInterval, UTF8String}
import org.apache.spark.util.random.RandomSampler
/**
* The AstBuilder converts an ANTLR4 ParseTree into a catalyst Expression, LogicalPlan or
* TableIdentifier.
*/
class AstBuilder extends SqlBaseBaseVisitor[AnyRef] with SQLConfHelper with Logging {
import ParserUtils._
protected def typedVisit[T](ctx: ParseTree): T = {
ctx.accept(this).asInstanceOf[T]
}
/**
* Override the default behavior for all visit methods. This will only return a non-null result
* when the context has only one child. This is done because there is no generic method to
* combine the results of the context children. In all other cases null is returned.
*/
override def visitChildren(node: RuleNode): AnyRef = {
if (node.getChildCount == 1) {
node.getChild(0).accept(this)
} else {
null
}
}
override def visitSingleStatement(ctx: SingleStatementContext): LogicalPlan = withOrigin(ctx) {
visit(ctx.statement).asInstanceOf[LogicalPlan]
}
override def visitSingleExpression(ctx: SingleExpressionContext): Expression = withOrigin(ctx) {
visitNamedExpression(ctx.namedExpression)
}
override def visitSingleTableIdentifier(
ctx: SingleTableIdentifierContext): TableIdentifier = withOrigin(ctx) {
visitTableIdentifier(ctx.tableIdentifier)
}
override def visitSingleFunctionIdentifier(
ctx: SingleFunctionIdentifierContext): FunctionIdentifier = withOrigin(ctx) {
visitFunctionIdentifier(ctx.functionIdentifier)
}
override def visitSingleMultipartIdentifier(
ctx: SingleMultipartIdentifierContext): Seq[String] = withOrigin(ctx) {
visitMultipartIdentifier(ctx.multipartIdentifier)
}
override def visitSingleDataType(ctx: SingleDataTypeContext): DataType = withOrigin(ctx) {
typedVisit[DataType](ctx.dataType)
}
override def visitSingleTableSchema(ctx: SingleTableSchemaContext): StructType = {
val schema = StructType(visitColTypeList(ctx.colTypeList))
withOrigin(ctx)(schema)
}
/* ********************************************************************************************
* Plan parsing
* ******************************************************************************************** */
protected def plan(tree: ParserRuleContext): LogicalPlan = typedVisit(tree)
/**
* Create a top-level plan with Common Table Expressions.
*/
override def visitQuery(ctx: QueryContext): LogicalPlan = withOrigin(ctx) {
val query = plan(ctx.queryTerm).optionalMap(ctx.queryOrganization)(withQueryResultClauses)
// Apply CTEs
query.optionalMap(ctx.ctes)(withCTE)
}
override def visitDmlStatement(ctx: DmlStatementContext): AnyRef = withOrigin(ctx) {
val dmlStmt = plan(ctx.dmlStatementNoWith)
// Apply CTEs
dmlStmt.optionalMap(ctx.ctes)(withCTE)
}
private def withCTE(ctx: CtesContext, plan: LogicalPlan): LogicalPlan = {
val ctes = ctx.namedQuery.asScala.map { nCtx =>
val namedQuery = visitNamedQuery(nCtx)
(namedQuery.alias, namedQuery)
}
// Check for duplicate names.
val duplicates = ctes.groupBy(_._1).filter(_._2.size > 1).keys
if (duplicates.nonEmpty) {
throw QueryParsingErrors.duplicateCteDefinitionNamesError(
duplicates.mkString("'", "', '", "'"), ctx)
}
UnresolvedWith(plan, ctes.toSeq)
}
/**
* Create a logical query plan for a hive-style FROM statement body.
*/
private def withFromStatementBody(
ctx: FromStatementBodyContext, plan: LogicalPlan): LogicalPlan = withOrigin(ctx) {
// two cases for transforms and selects
if (ctx.transformClause != null) {
withTransformQuerySpecification(
ctx,
ctx.transformClause,
ctx.lateralView,
ctx.whereClause,
ctx.aggregationClause,
ctx.havingClause,
ctx.windowClause,
plan
)
} else {
withSelectQuerySpecification(
ctx,
ctx.selectClause,
ctx.lateralView,
ctx.whereClause,
ctx.aggregationClause,
ctx.havingClause,
ctx.windowClause,
plan
)
}
}
override def visitFromStatement(ctx: FromStatementContext): LogicalPlan = withOrigin(ctx) {
val from = visitFromClause(ctx.fromClause)
val selects = ctx.fromStatementBody.asScala.map { body =>
withFromStatementBody(body, from).
// Add organization statements.
optionalMap(body.queryOrganization)(withQueryResultClauses)
}
// If there are multiple SELECT just UNION them together into one query.
if (selects.length == 1) {
selects.head
} else {
Union(selects.toSeq)
}
}
/**
* Create a named logical plan.
*
* This is only used for Common Table Expressions.
*/
override def visitNamedQuery(ctx: NamedQueryContext): SubqueryAlias = withOrigin(ctx) {
val subQuery: LogicalPlan = plan(ctx.query).optionalMap(ctx.columnAliases)(
(columnAliases, plan) =>
UnresolvedSubqueryColumnAliases(visitIdentifierList(columnAliases), plan)
)
SubqueryAlias(ctx.name.getText, subQuery)
}
/**
* Create a logical plan which allows for multiple inserts using one 'from' statement. These
* queries have the following SQL form:
* {{{
* [WITH cte...]?
* FROM src
* [INSERT INTO tbl1 SELECT *]+
* }}}
* For example:
* {{{
* FROM db.tbl1 A
* INSERT INTO dbo.tbl1 SELECT * WHERE A.value = 10 LIMIT 5
* INSERT INTO dbo.tbl2 SELECT * WHERE A.value = 12
* }}}
* This (Hive) feature cannot be combined with set-operators.
*/
override def visitMultiInsertQuery(ctx: MultiInsertQueryContext): LogicalPlan = withOrigin(ctx) {
val from = visitFromClause(ctx.fromClause)
// Build the insert clauses.
val inserts = ctx.multiInsertQueryBody.asScala.map { body =>
withInsertInto(body.insertInto,
withFromStatementBody(body.fromStatementBody, from).
optionalMap(body.fromStatementBody.queryOrganization)(withQueryResultClauses))
}
// If there are multiple INSERTS just UNION them together into one query.
if (inserts.length == 1) {
inserts.head
} else {
Union(inserts.toSeq)
}
}
/**
* Create a logical plan for a regular (single-insert) query.
*/
override def visitSingleInsertQuery(
ctx: SingleInsertQueryContext): LogicalPlan = withOrigin(ctx) {
withInsertInto(
ctx.insertInto(),
plan(ctx.queryTerm).optionalMap(ctx.queryOrganization)(withQueryResultClauses))
}
/**
* Parameters used for writing query to a table:
* (UnresolvedRelation, tableColumnList, partitionKeys, ifPartitionNotExists).
*/
type InsertTableParams = (UnresolvedRelation, Seq[String], Map[String, Option[String]], Boolean)
/**
* Parameters used for writing query to a directory: (isLocal, CatalogStorageFormat, provider).
*/
type InsertDirParams = (Boolean, CatalogStorageFormat, Option[String])
/**
* Add an
* {{{
* INSERT OVERWRITE TABLE tableIdentifier [partitionSpec [IF NOT EXISTS]]? [identifierList]
* INSERT INTO [TABLE] tableIdentifier [partitionSpec] [identifierList]
* INSERT OVERWRITE [LOCAL] DIRECTORY STRING [rowFormat] [createFileFormat]
* INSERT OVERWRITE [LOCAL] DIRECTORY [STRING] tableProvider [OPTIONS tablePropertyList]
* }}}
* operation to logical plan
*/
private def withInsertInto(
ctx: InsertIntoContext,
query: LogicalPlan): LogicalPlan = withOrigin(ctx) {
ctx match {
case table: InsertIntoTableContext =>
val (relation, cols, partition, ifPartitionNotExists) = visitInsertIntoTable(table)
InsertIntoStatement(
relation,
partition,
cols,
query,
overwrite = false,
ifPartitionNotExists)
case table: InsertOverwriteTableContext =>
val (relation, cols, partition, ifPartitionNotExists) = visitInsertOverwriteTable(table)
InsertIntoStatement(
relation,
partition,
cols,
query,
overwrite = true,
ifPartitionNotExists)
case dir: InsertOverwriteDirContext =>
val (isLocal, storage, provider) = visitInsertOverwriteDir(dir)
InsertIntoDir(isLocal, storage, provider, query, overwrite = true)
case hiveDir: InsertOverwriteHiveDirContext =>
val (isLocal, storage, provider) = visitInsertOverwriteHiveDir(hiveDir)
InsertIntoDir(isLocal, storage, provider, query, overwrite = true)
case _ =>
throw QueryParsingErrors.invalidInsertIntoError(ctx)
}
}
/**
* Add an INSERT INTO TABLE operation to the logical plan.
*/
override def visitInsertIntoTable(
ctx: InsertIntoTableContext): InsertTableParams = withOrigin(ctx) {
val cols = Option(ctx.identifierList()).map(visitIdentifierList).getOrElse(Nil)
val partitionKeys = Option(ctx.partitionSpec).map(visitPartitionSpec).getOrElse(Map.empty)
if (ctx.EXISTS != null) {
operationNotAllowed("INSERT INTO ... IF NOT EXISTS", ctx)
}
(createUnresolvedRelation(ctx.multipartIdentifier), cols, partitionKeys, false)
}
/**
* Add an INSERT OVERWRITE TABLE operation to the logical plan.
*/
override def visitInsertOverwriteTable(
ctx: InsertOverwriteTableContext): InsertTableParams = withOrigin(ctx) {
assert(ctx.OVERWRITE() != null)
val cols = Option(ctx.identifierList()).map(visitIdentifierList).getOrElse(Nil)
val partitionKeys = Option(ctx.partitionSpec).map(visitPartitionSpec).getOrElse(Map.empty)
val dynamicPartitionKeys: Map[String, Option[String]] = partitionKeys.filter(_._2.isEmpty)
if (ctx.EXISTS != null && dynamicPartitionKeys.nonEmpty) {
operationNotAllowed("IF NOT EXISTS with dynamic partitions: " +
dynamicPartitionKeys.keys.mkString(", "), ctx)
}
(createUnresolvedRelation(ctx.multipartIdentifier), cols, partitionKeys, ctx.EXISTS() != null)
}
/**
* Write to a directory, returning a [[InsertIntoDir]] logical plan.
*/
override def visitInsertOverwriteDir(
ctx: InsertOverwriteDirContext): InsertDirParams = withOrigin(ctx) {
throw QueryParsingErrors.insertOverwriteDirectoryUnsupportedError(ctx)
}
/**
* Write to a directory, returning a [[InsertIntoDir]] logical plan.
*/
override def visitInsertOverwriteHiveDir(
ctx: InsertOverwriteHiveDirContext): InsertDirParams = withOrigin(ctx) {
throw QueryParsingErrors.insertOverwriteDirectoryUnsupportedError(ctx)
}
private def getTableAliasWithoutColumnAlias(
ctx: TableAliasContext, op: String): Option[String] = {
if (ctx == null) {
None
} else {
val ident = ctx.strictIdentifier()
if (ctx.identifierList() != null) {
throw QueryParsingErrors.columnAliasInOperationNotAllowedError(op, ctx)
}
if (ident != null) Some(ident.getText) else None
}
}
override def visitDeleteFromTable(
ctx: DeleteFromTableContext): LogicalPlan = withOrigin(ctx) {
val table = createUnresolvedRelation(ctx.multipartIdentifier())
val tableAlias = getTableAliasWithoutColumnAlias(ctx.tableAlias(), "DELETE")
val aliasedTable = tableAlias.map(SubqueryAlias(_, table)).getOrElse(table)
val predicate = if (ctx.whereClause() != null) {
Some(expression(ctx.whereClause().booleanExpression()))
} else {
None
}
DeleteFromTable(aliasedTable, predicate)
}
override def visitUpdateTable(ctx: UpdateTableContext): LogicalPlan = withOrigin(ctx) {
val table = createUnresolvedRelation(ctx.multipartIdentifier())
val tableAlias = getTableAliasWithoutColumnAlias(ctx.tableAlias(), "UPDATE")
val aliasedTable = tableAlias.map(SubqueryAlias(_, table)).getOrElse(table)
val assignments = withAssignments(ctx.setClause().assignmentList())
val predicate = if (ctx.whereClause() != null) {
Some(expression(ctx.whereClause().booleanExpression()))
} else {
None
}
UpdateTable(aliasedTable, assignments, predicate)
}
private def withAssignments(assignCtx: SqlBaseParser.AssignmentListContext): Seq[Assignment] =
withOrigin(assignCtx) {
assignCtx.assignment().asScala.map { assign =>
Assignment(UnresolvedAttribute(visitMultipartIdentifier(assign.key)),
expression(assign.value))
}.toSeq
}
override def visitMergeIntoTable(ctx: MergeIntoTableContext): LogicalPlan = withOrigin(ctx) {
val targetTable = createUnresolvedRelation(ctx.target)
val targetTableAlias = getTableAliasWithoutColumnAlias(ctx.targetAlias, "MERGE")
val aliasedTarget = targetTableAlias.map(SubqueryAlias(_, targetTable)).getOrElse(targetTable)
val sourceTableOrQuery = if (ctx.source != null) {
createUnresolvedRelation(ctx.source)
} else if (ctx.sourceQuery != null) {
visitQuery(ctx.sourceQuery)
} else {
throw QueryParsingErrors.emptySourceForMergeError(ctx)
}
val sourceTableAlias = getTableAliasWithoutColumnAlias(ctx.sourceAlias, "MERGE")
val aliasedSource =
sourceTableAlias.map(SubqueryAlias(_, sourceTableOrQuery)).getOrElse(sourceTableOrQuery)
val mergeCondition = expression(ctx.mergeCondition)
val matchedActions = ctx.matchedClause().asScala.map {
clause => {
if (clause.matchedAction().DELETE() != null) {
DeleteAction(Option(clause.matchedCond).map(expression))
} else if (clause.matchedAction().UPDATE() != null) {
val condition = Option(clause.matchedCond).map(expression)
if (clause.matchedAction().ASTERISK() != null) {
UpdateStarAction(condition)
} else {
UpdateAction(condition, withAssignments(clause.matchedAction().assignmentList()))
}
} else {
// It should not be here.
throw QueryParsingErrors.unrecognizedMatchedActionError(clause)
}
}
}
val notMatchedActions = ctx.notMatchedClause().asScala.map {
clause => {
if (clause.notMatchedAction().INSERT() != null) {
val condition = Option(clause.notMatchedCond).map(expression)
if (clause.notMatchedAction().ASTERISK() != null) {
InsertStarAction(condition)
} else {
val columns = clause.notMatchedAction().columns.multipartIdentifier()
.asScala.map(attr => UnresolvedAttribute(visitMultipartIdentifier(attr)))
val values = clause.notMatchedAction().expression().asScala.map(expression)
if (columns.size != values.size) {
throw QueryParsingErrors.insertedValueNumberNotMatchFieldNumberError(clause)
}
InsertAction(condition, columns.zip(values).map(kv => Assignment(kv._1, kv._2)).toSeq)
}
} else {
// It should not be here.
throw QueryParsingErrors.unrecognizedNotMatchedActionError(clause)
}
}
}
if (matchedActions.isEmpty && notMatchedActions.isEmpty) {
throw QueryParsingErrors.mergeStatementWithoutWhenClauseError(ctx)
}
// children being empty means that the condition is not set
val matchedActionSize = matchedActions.length
if (matchedActionSize >= 2 && !matchedActions.init.forall(_.condition.nonEmpty)) {
throw QueryParsingErrors.nonLastMatchedClauseOmitConditionError(ctx)
}
val notMatchedActionSize = notMatchedActions.length
if (notMatchedActionSize >= 2 && !notMatchedActions.init.forall(_.condition.nonEmpty)) {
throw QueryParsingErrors.nonLastNotMatchedClauseOmitConditionError(ctx)
}
MergeIntoTable(
aliasedTarget,
aliasedSource,
mergeCondition,
matchedActions.toSeq,
notMatchedActions.toSeq)
}
/**
* Create a partition specification map.
*/
override def visitPartitionSpec(
ctx: PartitionSpecContext): Map[String, Option[String]] = withOrigin(ctx) {
val legacyNullAsString =
conf.getConf(SQLConf.LEGACY_PARSE_NULL_PARTITION_SPEC_AS_STRING_LITERAL)
val parts = ctx.partitionVal.asScala.map { pVal =>
val name = pVal.identifier.getText
val value = Option(pVal.constant).map(v => visitStringConstant(v, legacyNullAsString))
name -> value
}
// Before calling `toMap`, we check duplicated keys to avoid silently ignore partition values
// in partition spec like PARTITION(a='1', b='2', a='3'). The real semantical check for
// partition columns will be done in analyzer.
if (conf.caseSensitiveAnalysis) {
checkDuplicateKeys(parts.toSeq, ctx)
} else {
checkDuplicateKeys(parts.map(kv => kv._1.toLowerCase(Locale.ROOT) -> kv._2).toSeq, ctx)
}
parts.toMap
}
/**
* Create a partition specification map without optional values.
*/
protected def visitNonOptionalPartitionSpec(
ctx: PartitionSpecContext): Map[String, String] = withOrigin(ctx) {
visitPartitionSpec(ctx).map {
case (key, None) => throw QueryParsingErrors.emptyPartitionKeyError(key, ctx)
case (key, Some(value)) => key -> value
}
}
/**
* Convert a constant of any type into a string. This is typically used in DDL commands, and its
* main purpose is to prevent slight differences due to back to back conversions i.e.:
* String -> Literal -> String.
*/
protected def visitStringConstant(
ctx: ConstantContext,
legacyNullAsString: Boolean): String = withOrigin(ctx) {
expression(ctx) match {
case Literal(null, _) if !legacyNullAsString => null
case l @ Literal(null, _) => l.toString
case l: Literal =>
// TODO For v2 commands, we will cast the string back to its actual value,
// which is a waste and can be improved in the future.
Cast(l, StringType, Some(conf.sessionLocalTimeZone)).eval().toString
case other =>
throw new IllegalArgumentException(s"Only literals are allowed in the " +
s"partition spec, but got ${other.sql}")
}
}
/**
* Add ORDER BY/SORT BY/CLUSTER BY/DISTRIBUTE BY/LIMIT/WINDOWS clauses to the logical plan. These
* clauses determine the shape (ordering/partitioning/rows) of the query result.
*/
private def withQueryResultClauses(
ctx: QueryOrganizationContext,
query: LogicalPlan): LogicalPlan = withOrigin(ctx) {
import ctx._
// Handle ORDER BY, SORT BY, DISTRIBUTE BY, and CLUSTER BY clause.
val withOrder = if (
!order.isEmpty && sort.isEmpty && distributeBy.isEmpty && clusterBy.isEmpty) {
// ORDER BY ...
Sort(order.asScala.map(visitSortItem).toSeq, global = true, query)
} else if (order.isEmpty && !sort.isEmpty && distributeBy.isEmpty && clusterBy.isEmpty) {
// SORT BY ...
Sort(sort.asScala.map(visitSortItem).toSeq, global = false, query)
} else if (order.isEmpty && sort.isEmpty && !distributeBy.isEmpty && clusterBy.isEmpty) {
// DISTRIBUTE BY ...
withRepartitionByExpression(ctx, expressionList(distributeBy), query)
} else if (order.isEmpty && !sort.isEmpty && !distributeBy.isEmpty && clusterBy.isEmpty) {
// SORT BY ... DISTRIBUTE BY ...
Sort(
sort.asScala.map(visitSortItem).toSeq,
global = false,
withRepartitionByExpression(ctx, expressionList(distributeBy), query))
} else if (order.isEmpty && sort.isEmpty && distributeBy.isEmpty && !clusterBy.isEmpty) {
// CLUSTER BY ...
val expressions = expressionList(clusterBy)
Sort(
expressions.map(SortOrder(_, Ascending)),
global = false,
withRepartitionByExpression(ctx, expressions, query))
} else if (order.isEmpty && sort.isEmpty && distributeBy.isEmpty && clusterBy.isEmpty) {
// [EMPTY]
query
} else {
throw QueryParsingErrors.combinationQueryResultClausesUnsupportedError(ctx)
}
// WINDOWS
val withWindow = withOrder.optionalMap(windowClause)(withWindowClause)
// LIMIT
// - LIMIT ALL is the same as omitting the LIMIT clause
withWindow.optional(limit) {
Limit(typedVisit(limit), withWindow)
}
}
/**
* Create a clause for DISTRIBUTE BY.
*/
protected def withRepartitionByExpression(
ctx: QueryOrganizationContext,
expressions: Seq[Expression],
query: LogicalPlan): LogicalPlan = {
throw QueryParsingErrors.distributeByUnsupportedError(ctx)
}
override def visitTransformQuerySpecification(
ctx: TransformQuerySpecificationContext): LogicalPlan = withOrigin(ctx) {
val from = OneRowRelation().optional(ctx.fromClause) {
visitFromClause(ctx.fromClause)
}
withTransformQuerySpecification(
ctx,
ctx.transformClause,
ctx.lateralView,
ctx.whereClause,
ctx.aggregationClause,
ctx.havingClause,
ctx.windowClause,
from
)
}
override def visitRegularQuerySpecification(
ctx: RegularQuerySpecificationContext): LogicalPlan = withOrigin(ctx) {
val from = OneRowRelation().optional(ctx.fromClause) {
visitFromClause(ctx.fromClause)
}
withSelectQuerySpecification(
ctx,
ctx.selectClause,
ctx.lateralView,
ctx.whereClause,
ctx.aggregationClause,
ctx.havingClause,
ctx.windowClause,
from
)
}
override def visitNamedExpressionSeq(
ctx: NamedExpressionSeqContext): Seq[Expression] = {
Option(ctx).toSeq
.flatMap(_.namedExpression.asScala)
.map(typedVisit[Expression])
}
override def visitExpressionSeq(ctx: ExpressionSeqContext): Seq[Expression] = {
Option(ctx).toSeq
.flatMap(_.expression.asScala)
.map(typedVisit[Expression])
}
/**
* Create a logical plan using a having clause.
*/
private def withHavingClause(
ctx: HavingClauseContext, plan: LogicalPlan): LogicalPlan = {
// Note that we add a cast to non-predicate expressions. If the expression itself is
// already boolean, the optimizer will get rid of the unnecessary cast.
val predicate = expression(ctx.booleanExpression) match {
case p: Predicate => p
case e => Cast(e, BooleanType)
}
UnresolvedHaving(predicate, plan)
}
/**
* Create a logical plan using a where clause.
*/
private def withWhereClause(ctx: WhereClauseContext, plan: LogicalPlan): LogicalPlan = {
Filter(expression(ctx.booleanExpression), plan)
}
/**
* Add a hive-style transform (SELECT TRANSFORM/MAP/REDUCE) query specification to a logical plan.
*/
private def withTransformQuerySpecification(
ctx: ParserRuleContext,
transformClause: TransformClauseContext,
lateralView: java.util.List[LateralViewContext],
whereClause: WhereClauseContext,
aggregationClause: AggregationClauseContext,
havingClause: HavingClauseContext,
windowClause: WindowClauseContext,
relation: LogicalPlan): LogicalPlan = withOrigin(ctx) {
if (transformClause.setQuantifier != null) {
throw QueryParsingErrors.transformNotSupportQuantifierError(transformClause.setQuantifier)
}
// Create the attributes.
val (attributes, schemaLess) = if (transformClause.colTypeList != null) {
// Typed return columns.
(createSchema(transformClause.colTypeList).toAttributes, false)
} else if (transformClause.identifierSeq != null) {
// Untyped return columns.
val attrs = visitIdentifierSeq(transformClause.identifierSeq).map { name =>
AttributeReference(name, StringType, nullable = true)()
}
(attrs, false)
} else {
(Seq(AttributeReference("key", StringType)(),
AttributeReference("value", StringType)()), true)
}
val plan = visitCommonSelectQueryClausePlan(
relation,
visitExpressionSeq(transformClause.expressionSeq),
lateralView,
whereClause,
aggregationClause,
havingClause,
windowClause,
isDistinct = false)
ScriptTransformation(
string(transformClause.script),
attributes,
plan,
withScriptIOSchema(
ctx,
transformClause.inRowFormat,
transformClause.recordWriter,
transformClause.outRowFormat,
transformClause.recordReader,
schemaLess
)
)
}
/**
* Add a regular (SELECT) query specification to a logical plan. The query specification
* is the core of the logical plan, this is where sourcing (FROM clause), projection (SELECT),
* aggregation (GROUP BY ... HAVING ...) and filtering (WHERE) takes place.
*
* Note that query hints are ignored (both by the parser and the builder).
*/
private def withSelectQuerySpecification(
ctx: ParserRuleContext,
selectClause: SelectClauseContext,
lateralView: java.util.List[LateralViewContext],
whereClause: WhereClauseContext,
aggregationClause: AggregationClauseContext,
havingClause: HavingClauseContext,
windowClause: WindowClauseContext,
relation: LogicalPlan): LogicalPlan = withOrigin(ctx) {
val isDistinct = selectClause.setQuantifier() != null &&
selectClause.setQuantifier().DISTINCT() != null
val plan = visitCommonSelectQueryClausePlan(
relation,
visitNamedExpressionSeq(selectClause.namedExpressionSeq),
lateralView,
whereClause,
aggregationClause,
havingClause,
windowClause,
isDistinct)
// Hint
selectClause.hints.asScala.foldRight(plan)(withHints)
}
def visitCommonSelectQueryClausePlan(
relation: LogicalPlan,
expressions: Seq[Expression],
lateralView: java.util.List[LateralViewContext],
whereClause: WhereClauseContext,
aggregationClause: AggregationClauseContext,
havingClause: HavingClauseContext,
windowClause: WindowClauseContext,
isDistinct: Boolean): LogicalPlan = {
// Add lateral views.
val withLateralView = lateralView.asScala.foldLeft(relation)(withGenerate)
// Add where.
val withFilter = withLateralView.optionalMap(whereClause)(withWhereClause)
// Add aggregation or a project.
val namedExpressions = expressions.map {
case e: NamedExpression => e
case e: Expression => UnresolvedAlias(e)
}
def createProject() = if (namedExpressions.nonEmpty) {
Project(namedExpressions, withFilter)
} else {
withFilter
}
val withProject = if (aggregationClause == null && havingClause != null) {
if (conf.getConf(SQLConf.LEGACY_HAVING_WITHOUT_GROUP_BY_AS_WHERE)) {
// If the legacy conf is set, treat HAVING without GROUP BY as WHERE.
val predicate = expression(havingClause.booleanExpression) match {
case p: Predicate => p
case e => Cast(e, BooleanType)
}
Filter(predicate, createProject())
} else {
// According to SQL standard, HAVING without GROUP BY means global aggregate.
withHavingClause(havingClause, Aggregate(Nil, namedExpressions, withFilter))
}
} else if (aggregationClause != null) {
val aggregate = withAggregationClause(aggregationClause, namedExpressions, withFilter)
aggregate.optionalMap(havingClause)(withHavingClause)
} else {
// When hitting this branch, `having` must be null.
createProject()
}
// Distinct
val withDistinct = if (isDistinct) {
Distinct(withProject)
} else {
withProject
}
// Window
val withWindow = withDistinct.optionalMap(windowClause)(withWindowClause)
withWindow
}
// Script Transform's input/output format.
type ScriptIOFormat =
(Seq[(String, String)], Option[String], Seq[(String, String)], Option[String])
protected def getRowFormatDelimited(ctx: RowFormatDelimitedContext): ScriptIOFormat = {
// TODO we should use the visitRowFormatDelimited function here. However HiveScriptIOSchema
// expects a seq of pairs in which the old parsers' token names are used as keys.
// Transforming the result of visitRowFormatDelimited would be quite a bit messier than
// retrieving the key value pairs ourselves.
val entries = entry("TOK_TABLEROWFORMATFIELD", ctx.fieldsTerminatedBy) ++
entry("TOK_TABLEROWFORMATCOLLITEMS", ctx.collectionItemsTerminatedBy) ++
entry("TOK_TABLEROWFORMATMAPKEYS", ctx.keysTerminatedBy) ++
entry("TOK_TABLEROWFORMATNULL", ctx.nullDefinedAs) ++
Option(ctx.linesSeparatedBy).toSeq.map { token =>
val value = string(token)
validate(
value == "\\n",
s"LINES TERMINATED BY only supports newline '\\\\n' right now: $value",
ctx)
"TOK_TABLEROWFORMATLINES" -> value
}
(entries, None, Seq.empty, None)
}
/**
* Create a [[ScriptInputOutputSchema]].
*/
protected def withScriptIOSchema(
ctx: ParserRuleContext,
inRowFormat: RowFormatContext,
recordWriter: Token,
outRowFormat: RowFormatContext,
recordReader: Token,
schemaLess: Boolean): ScriptInputOutputSchema = {
def format(fmt: RowFormatContext): ScriptIOFormat = fmt match {
case c: RowFormatDelimitedContext =>
getRowFormatDelimited(c)
case c: RowFormatSerdeContext =>
throw QueryParsingErrors.transformWithSerdeUnsupportedError(ctx)
// SPARK-32106: When there is no definition about format, we return empty result
// to use a built-in default Serde in SparkScriptTransformationExec.
case null =>
(Nil, None, Seq.empty, None)
}
val (inFormat, inSerdeClass, inSerdeProps, reader) = format(inRowFormat)
val (outFormat, outSerdeClass, outSerdeProps, writer) = format(outRowFormat)
ScriptInputOutputSchema(
inFormat, outFormat,
inSerdeClass, outSerdeClass,
inSerdeProps, outSerdeProps,
reader, writer,
schemaLess)
}
/**
* Create a logical plan for a given 'FROM' clause. Note that we support multiple (comma
* separated) relations here, these get converted into a single plan by condition-less inner join.
*/
override def visitFromClause(ctx: FromClauseContext): LogicalPlan = withOrigin(ctx) {
val from = ctx.relation.asScala.foldLeft(null: LogicalPlan) { (left, relation) =>
val right = plan(relation.relationPrimary)
val join = right.optionalMap(left) { (left, right) =>
if (relation.LATERAL != null) {
if (!relation.relationPrimary.isInstanceOf[AliasedQueryContext]) {
throw QueryParsingErrors.invalidLateralJoinRelationError(relation.relationPrimary)
}
LateralJoin(left, LateralSubquery(right), Inner, None)
} else {
Join(left, right, Inner, None, JoinHint.NONE)
}
}
withJoinRelations(join, relation)
}
if (ctx.pivotClause() != null) {
if (!ctx.lateralView.isEmpty) {
throw QueryParsingErrors.lateralWithPivotInFromClauseNotAllowedError(ctx)
}
withPivot(ctx.pivotClause, from)
} else {
ctx.lateralView.asScala.foldLeft(from)(withGenerate)
}
}
/**
* Connect two queries by a Set operator.
*
* Supported Set operators are:
* - UNION [ DISTINCT | ALL ]
* - EXCEPT [ DISTINCT | ALL ]
* - MINUS [ DISTINCT | ALL ]
* - INTERSECT [DISTINCT | ALL]
*/
override def visitSetOperation(ctx: SetOperationContext): LogicalPlan = withOrigin(ctx) {
val left = plan(ctx.left)
val right = plan(ctx.right)
val all = Option(ctx.setQuantifier()).exists(_.ALL != null)
ctx.operator.getType match {
case SqlBaseParser.UNION if all =>
Union(left, right)
case SqlBaseParser.UNION =>
Distinct(Union(left, right))
case SqlBaseParser.INTERSECT if all =>
Intersect(left, right, isAll = true)
case SqlBaseParser.INTERSECT =>
Intersect(left, right, isAll = false)
case SqlBaseParser.EXCEPT if all =>
Except(left, right, isAll = true)
case SqlBaseParser.EXCEPT =>
Except(left, right, isAll = false)
case SqlBaseParser.SETMINUS if all =>
Except(left, right, isAll = true)
case SqlBaseParser.SETMINUS =>
Except(left, right, isAll = false)
}
}
/**
* Add a [[WithWindowDefinition]] operator to a logical plan.
*/
private def withWindowClause(
ctx: WindowClauseContext,
query: LogicalPlan): LogicalPlan = withOrigin(ctx) {
// Collect all window specifications defined in the WINDOW clause.
val baseWindowTuples = ctx.namedWindow.asScala.map {
wCtx =>
(wCtx.name.getText, typedVisit[WindowSpec](wCtx.windowSpec))
}
baseWindowTuples.groupBy(_._1).foreach { kv =>
if (kv._2.size > 1) {
throw QueryParsingErrors.repetitiveWindowDefinitionError(kv._1, ctx)
}
}
val baseWindowMap = baseWindowTuples.toMap
// Handle cases like
// window w1 as (partition by p_mfgr order by p_name
// range between 2 preceding and 2 following),
// w2 as w1
val windowMapView = baseWindowMap.mapValues {
case WindowSpecReference(name) =>
baseWindowMap.get(name) match {
case Some(spec: WindowSpecDefinition) =>
spec
case Some(ref) =>
throw QueryParsingErrors.invalidWindowReferenceError(name, ctx)
case None =>
throw QueryParsingErrors.cannotResolveWindowReferenceError(name, ctx)
}
case spec: WindowSpecDefinition => spec
}
// Note that mapValues creates a view instead of materialized map. We force materialization by
// mapping over identity.
WithWindowDefinition(windowMapView.map(identity).toMap, query)
}
/**
* Add an [[Aggregate]] to a logical plan.
*/
private def withAggregationClause(
ctx: AggregationClauseContext,
selectExpressions: Seq[NamedExpression],
query: LogicalPlan): LogicalPlan = withOrigin(ctx) {
if (ctx.groupingExpressionsWithGroupingAnalytics.isEmpty) {
val groupByExpressions = expressionList(ctx.groupingExpressions)
if (ctx.GROUPING != null) {
// GROUP BY ... GROUPING SETS (...)
// `groupByExpressions` can be non-empty for Hive compatibility. It may add extra grouping
// expressions that do not exist in GROUPING SETS (...), and the value is always null.
// For example, `SELECT a, b, c FROM ... GROUP BY a, b, c GROUPING SETS (a, b)`, the output
// of column `c` is always null.
val groupingSets =
ctx.groupingSet.asScala.map(_.expression.asScala.map(e => expression(e)).toSeq)
Aggregate(Seq(GroupingSets(groupingSets.toSeq, groupByExpressions)),
selectExpressions, query)
} else {
// GROUP BY .... (WITH CUBE | WITH ROLLUP)?
val mappedGroupByExpressions = if (ctx.CUBE != null) {
Seq(Cube(groupByExpressions.map(Seq(_))))
} else if (ctx.ROLLUP != null) {
Seq(Rollup(groupByExpressions.map(Seq(_))))
} else {
groupByExpressions
}
Aggregate(mappedGroupByExpressions, selectExpressions, query)
}
} else {
val groupByExpressions =
ctx.groupingExpressionsWithGroupingAnalytics.asScala
.map(groupByExpr => {
val groupingAnalytics = groupByExpr.groupingAnalytics
if (groupingAnalytics != null) {
visitGroupingAnalytics(groupingAnalytics)
} else {
expression(groupByExpr.expression)
}
})
Aggregate(groupByExpressions.toSeq, selectExpressions, query)
}
}
override def visitGroupingAnalytics(
groupingAnalytics: GroupingAnalyticsContext): BaseGroupingSets = {
val groupingSets = groupingAnalytics.groupingSet.asScala
.map(_.expression.asScala.map(e => expression(e)).toSeq)
if (groupingAnalytics.CUBE != null) {
// CUBE(A, B, (A, B), ()) is not supported.
if (groupingSets.exists(_.isEmpty)) {
throw QueryParsingErrors.invalidGroupingSetError("CUBE", groupingAnalytics)
}
Cube(groupingSets.toSeq)
} else if (groupingAnalytics.ROLLUP != null) {
// ROLLUP(A, B, (A, B), ()) is not supported.
if (groupingSets.exists(_.isEmpty)) {
throw QueryParsingErrors.invalidGroupingSetError("ROLLUP", groupingAnalytics)
}
Rollup(groupingSets.toSeq)
} else {
assert(groupingAnalytics.GROUPING != null && groupingAnalytics.SETS != null)
val groupingSets = groupingAnalytics.groupingElement.asScala.flatMap { expr =>
val groupingAnalytics = expr.groupingAnalytics()
if (groupingAnalytics != null) {
visitGroupingAnalytics(groupingAnalytics).selectedGroupByExprs
} else {
Seq(expr.groupingSet().expression().asScala.map(e => expression(e)).toSeq)
}
}
GroupingSets(groupingSets.toSeq)
}
}
/**
* Add [[UnresolvedHint]]s to a logical plan.
*/
private def withHints(
ctx: HintContext,
query: LogicalPlan): LogicalPlan = withOrigin(ctx) {
var plan = query
ctx.hintStatements.asScala.reverse.foreach { stmt =>
plan = UnresolvedHint(stmt.hintName.getText,
stmt.parameters.asScala.map(expression).toSeq, plan)
}
plan
}
/**
* Add a [[Pivot]] to a logical plan.
*/
private def withPivot(
ctx: PivotClauseContext,
query: LogicalPlan): LogicalPlan = withOrigin(ctx) {
val aggregates = Option(ctx.aggregates).toSeq
.flatMap(_.namedExpression.asScala)
.map(typedVisit[Expression])
val pivotColumn = if (ctx.pivotColumn.identifiers.size == 1) {
UnresolvedAttribute.quoted(ctx.pivotColumn.identifier.getText)
} else {
CreateStruct(
ctx.pivotColumn.identifiers.asScala.map(
identifier => UnresolvedAttribute.quoted(identifier.getText)).toSeq)
}
val pivotValues = ctx.pivotValues.asScala.map(visitPivotValue)
Pivot(None, pivotColumn, pivotValues.toSeq, aggregates, query)
}
/**
* Create a Pivot column value with or without an alias.
*/
override def visitPivotValue(ctx: PivotValueContext): Expression = withOrigin(ctx) {
val e = expression(ctx.expression)
if (ctx.identifier != null) {
Alias(e, ctx.identifier.getText)()
} else {
e
}
}
/**
* Add a [[Generate]] (Lateral View) to a logical plan.
*/
private def withGenerate(
query: LogicalPlan,
ctx: LateralViewContext): LogicalPlan = withOrigin(ctx) {
val expressions = expressionList(ctx.expression)
Generate(
UnresolvedGenerator(visitFunctionName(ctx.qualifiedName), expressions),
unrequiredChildIndex = Nil,
outer = ctx.OUTER != null,
// scalastyle:off caselocale
Some(ctx.tblName.getText.toLowerCase),
// scalastyle:on caselocale
ctx.colName.asScala.map(_.getText).map(UnresolvedAttribute.quoted).toSeq,
query)
}
/**
* Create a single relation referenced in a FROM clause. This method is used when a part of the
* join condition is nested, for example:
* {{{
* select * from t1 join (t2 cross join t3) on col1 = col2
* }}}
*/
override def visitRelation(ctx: RelationContext): LogicalPlan = withOrigin(ctx) {
withJoinRelations(plan(ctx.relationPrimary), ctx)
}
/**
* Join one more [[LogicalPlan]]s to the current logical plan.
*/
private def withJoinRelations(base: LogicalPlan, ctx: RelationContext): LogicalPlan = {
ctx.joinRelation.asScala.foldLeft(base) { (left, join) =>
withOrigin(join) {
val baseJoinType = join.joinType match {
case null => Inner
case jt if jt.CROSS != null => Cross
case jt if jt.FULL != null => FullOuter
case jt if jt.SEMI != null => LeftSemi
case jt if jt.ANTI != null => LeftAnti
case jt if jt.LEFT != null => LeftOuter
case jt if jt.RIGHT != null => RightOuter
case _ => Inner
}
if (join.LATERAL != null && !join.right.isInstanceOf[AliasedQueryContext]) {
throw QueryParsingErrors.invalidLateralJoinRelationError(join.right)
}
// Resolve the join type and join condition
val (joinType, condition) = Option(join.joinCriteria) match {
case Some(c) if c.USING != null =>
if (join.LATERAL != null) {
throw QueryParsingErrors.lateralJoinWithUsingJoinUnsupportedError(ctx)
}
(UsingJoin(baseJoinType, visitIdentifierList(c.identifierList)), None)
case Some(c) if c.booleanExpression != null =>
(baseJoinType, Option(expression(c.booleanExpression)))
case Some(c) =>
throw QueryParsingErrors.joinCriteriaUnimplementedError(c, ctx)
case None if join.NATURAL != null =>
if (join.LATERAL != null) {
throw QueryParsingErrors.lateralJoinWithNaturalJoinUnsupportedError(ctx)
}
if (baseJoinType == Cross) {
throw QueryParsingErrors.naturalCrossJoinUnsupportedError(ctx)
}
(NaturalJoin(baseJoinType), None)
case None =>
(baseJoinType, None)
}
if (join.LATERAL != null) {
if (!Seq(Inner, Cross, LeftOuter).contains(joinType)) {
throw QueryParsingErrors.unsupportedLateralJoinTypeError(ctx, joinType.toString)
}
LateralJoin(left, LateralSubquery(plan(join.right)), joinType, condition)
} else {
Join(left, plan(join.right), joinType, condition, JoinHint.NONE)
}
}
}
}
/**
* Add a [[Sample]] to a logical plan.
*
* This currently supports the following sampling methods:
* - TABLESAMPLE(x ROWS): Sample the table down to the given number of rows.
* - TABLESAMPLE(x PERCENT): Sample the table down to the given percentage. Note that percentages
* are defined as a number between 0 and 100.
* - TABLESAMPLE(BUCKET x OUT OF y): Sample the table down to a 'x' divided by 'y' fraction.
*/
private def withSample(ctx: SampleContext, query: LogicalPlan): LogicalPlan = withOrigin(ctx) {
// Create a sampled plan if we need one.
def sample(fraction: Double): Sample = {
// The range of fraction accepted by Sample is [0, 1]. Because Hive's block sampling
// function takes X PERCENT as the input and the range of X is [0, 100], we need to
// adjust the fraction.
val eps = RandomSampler.roundingEpsilon
validate(fraction >= 0.0 - eps && fraction <= 1.0 + eps,
s"Sampling fraction ($fraction) must be on interval [0, 1]",
ctx)
Sample(0.0, fraction, withReplacement = false, (math.random * 1000).toInt, query)
}
if (ctx.sampleMethod() == null) {
throw QueryParsingErrors.emptyInputForTableSampleError(ctx)
}
ctx.sampleMethod() match {
case ctx: SampleByRowsContext =>
Limit(expression(ctx.expression), query)
case ctx: SampleByPercentileContext =>
val fraction = ctx.percentage.getText.toDouble
val sign = if (ctx.negativeSign == null) 1 else -1
sample(sign * fraction / 100.0d)
case ctx: SampleByBytesContext =>
val bytesStr = ctx.bytes.getText
if (bytesStr.matches("[0-9]+[bBkKmMgG]")) {
throw QueryParsingErrors.tableSampleByBytesUnsupportedError("byteLengthLiteral", ctx)
} else {
throw QueryParsingErrors.invalidByteLengthLiteralError(bytesStr, ctx)
}
case ctx: SampleByBucketContext if ctx.ON() != null =>
if (ctx.identifier != null) {
throw QueryParsingErrors.tableSampleByBytesUnsupportedError(
"BUCKET x OUT OF y ON colname", ctx)
} else {
throw QueryParsingErrors.tableSampleByBytesUnsupportedError(
"BUCKET x OUT OF y ON function", ctx)
}
case ctx: SampleByBucketContext =>
sample(ctx.numerator.getText.toDouble / ctx.denominator.getText.toDouble)
}
}
/**
* Create a logical plan for a sub-query.
*/
override def visitSubquery(ctx: SubqueryContext): LogicalPlan = withOrigin(ctx) {
plan(ctx.query)
}
/**
* Create an un-aliased table reference. This is typically used for top-level table references,
* for example:
* {{{
* INSERT INTO db.tbl2
* TABLE db.tbl1
* }}}
*/
override def visitTable(ctx: TableContext): LogicalPlan = withOrigin(ctx) {
UnresolvedRelation(visitMultipartIdentifier(ctx.multipartIdentifier))
}
/**
* Create an aliased table reference. This is typically used in FROM clauses.
*/
override def visitTableName(ctx: TableNameContext): LogicalPlan = withOrigin(ctx) {
val tableId = visitMultipartIdentifier(ctx.multipartIdentifier)
val table = mayApplyAliasPlan(ctx.tableAlias, UnresolvedRelation(tableId))
table.optionalMap(ctx.sample)(withSample)
}
/**
* Create a table-valued function call with arguments, e.g. range(1000)
*/
override def visitTableValuedFunction(ctx: TableValuedFunctionContext)
: LogicalPlan = withOrigin(ctx) {
val func = ctx.functionTable
val aliases = if (func.tableAlias.identifierList != null) {
visitIdentifierList(func.tableAlias.identifierList)
} else {
Seq.empty
}
val name = getFunctionIdentifier(func.functionName)
if (name.database.nonEmpty) {
operationNotAllowed(s"table valued function cannot specify database name: $name", ctx)
}
val tvf = UnresolvedTableValuedFunction(
name, func.expression.asScala.map(expression).toSeq, aliases)
tvf.optionalMap(func.tableAlias.strictIdentifier)(aliasPlan)
}
/**
* Create an inline table (a virtual table in Hive parlance).
*/
override def visitInlineTable(ctx: InlineTableContext): LogicalPlan = withOrigin(ctx) {
// Get the backing expressions.
val rows = ctx.expression.asScala.map { e =>
expression(e) match {
// inline table comes in two styles:
// style 1: values (1), (2), (3) -- multiple columns are supported
// style 2: values 1, 2, 3 -- only a single column is supported here
case struct: CreateNamedStruct => struct.valExprs // style 1
case child => Seq(child) // style 2
}
}
val aliases = if (ctx.tableAlias.identifierList != null) {
visitIdentifierList(ctx.tableAlias.identifierList)
} else {
Seq.tabulate(rows.head.size)(i => s"col${i + 1}")
}
val table = UnresolvedInlineTable(aliases, rows.toSeq)
table.optionalMap(ctx.tableAlias.strictIdentifier)(aliasPlan)
}
/**
* Create an alias (SubqueryAlias) for a join relation. This is practically the same as
* visitAliasedQuery and visitNamedExpression, ANTLR4 however requires us to use 3 different
* hooks. We could add alias names for output columns, for example:
* {{{
* SELECT a, b, c, d FROM (src1 s1 INNER JOIN src2 s2 ON s1.id = s2.id) dst(a, b, c, d)
* }}}
*/
override def visitAliasedRelation(ctx: AliasedRelationContext): LogicalPlan = withOrigin(ctx) {
val relation = plan(ctx.relation).optionalMap(ctx.sample)(withSample)
mayApplyAliasPlan(ctx.tableAlias, relation)
}
/**
* Create an alias (SubqueryAlias) for a sub-query. This is practically the same as
* visitAliasedRelation and visitNamedExpression, ANTLR4 however requires us to use 3 different
* hooks. We could add alias names for output columns, for example:
* {{{
* SELECT col1, col2 FROM testData AS t(col1, col2)
* }}}
*/
override def visitAliasedQuery(ctx: AliasedQueryContext): LogicalPlan = withOrigin(ctx) {
val relation = plan(ctx.query).optionalMap(ctx.sample)(withSample)
if (ctx.tableAlias.strictIdentifier == null) {
// For un-aliased subqueries, use a default alias name that is not likely to conflict with
// normal subquery names, so that parent operators can only access the columns in subquery by
// unqualified names. Users can still use this special qualifier to access columns if they
// know it, but that's not recommended.
SubqueryAlias("__auto_generated_subquery_name", relation)
} else {
mayApplyAliasPlan(ctx.tableAlias, relation)
}
}
/**
* Create an alias ([[SubqueryAlias]]) for a [[LogicalPlan]].
*/
private def aliasPlan(alias: ParserRuleContext, plan: LogicalPlan): LogicalPlan = {
SubqueryAlias(alias.getText, plan)
}
/**
* If aliases specified in a FROM clause, create a subquery alias ([[SubqueryAlias]]) and
* column aliases for a [[LogicalPlan]].
*/
private def mayApplyAliasPlan(tableAlias: TableAliasContext, plan: LogicalPlan): LogicalPlan = {
if (tableAlias.strictIdentifier != null) {
val alias = tableAlias.strictIdentifier.getText
if (tableAlias.identifierList != null) {
val columnNames = visitIdentifierList(tableAlias.identifierList)
SubqueryAlias(alias, UnresolvedSubqueryColumnAliases(columnNames, plan))
} else {
SubqueryAlias(alias, plan)
}
} else {
plan
}
}
/**
* Create a Sequence of Strings for a parenthesis enclosed alias list.
*/
override def visitIdentifierList(ctx: IdentifierListContext): Seq[String] = withOrigin(ctx) {
visitIdentifierSeq(ctx.identifierSeq)
}
/**
* Create a Sequence of Strings for an identifier list.
*/
override def visitIdentifierSeq(ctx: IdentifierSeqContext): Seq[String] = withOrigin(ctx) {
ctx.ident.asScala.map(_.getText).toSeq
}
/* ********************************************************************************************
* Table Identifier parsing
* ******************************************************************************************** */
/**
* Create a [[TableIdentifier]] from a 'tableName' or 'databaseName'.'tableName' pattern.
*/
override def visitTableIdentifier(
ctx: TableIdentifierContext): TableIdentifier = withOrigin(ctx) {
TableIdentifier(ctx.table.getText, Option(ctx.db).map(_.getText))
}
/**
* Create a [[FunctionIdentifier]] from a 'functionName' or 'databaseName'.'functionName' pattern.
*/
override def visitFunctionIdentifier(
ctx: FunctionIdentifierContext): FunctionIdentifier = withOrigin(ctx) {
FunctionIdentifier(ctx.function.getText, Option(ctx.db).map(_.getText))
}
/**
* Create a multi-part identifier.
*/
override def visitMultipartIdentifier(ctx: MultipartIdentifierContext): Seq[String] =
withOrigin(ctx) {
ctx.parts.asScala.map(_.getText).toSeq
}
/* ********************************************************************************************
* Expression parsing
* ******************************************************************************************** */
/**
* Create an expression from the given context. This method just passes the context on to the
* visitor and only takes care of typing (We assume that the visitor returns an Expression here).
*/
protected def expression(ctx: ParserRuleContext): Expression = typedVisit(ctx)
/**
* Create sequence of expressions from the given sequence of contexts.
*/
private def expressionList(trees: java.util.List[ExpressionContext]): Seq[Expression] = {
trees.asScala.map(expression).toSeq
}
/**
* Create a star (i.e. all) expression; this selects all elements (in the specified object).
* Both un-targeted (global) and targeted aliases are supported.
*/
override def visitStar(ctx: StarContext): Expression = withOrigin(ctx) {
UnresolvedStar(Option(ctx.qualifiedName()).map(_.identifier.asScala.map(_.getText).toSeq))
}
/**
* Create an aliased expression if an alias is specified. Both single and multi-aliases are
* supported.
*/
override def visitNamedExpression(ctx: NamedExpressionContext): Expression = withOrigin(ctx) {
val e = expression(ctx.expression)
if (ctx.name != null) {
Alias(e, ctx.name.getText)()
} else if (ctx.identifierList != null) {
MultiAlias(e, visitIdentifierList(ctx.identifierList))
} else {
e
}
}
/**
* Combine a number of boolean expressions into a balanced expression tree. These expressions are
* either combined by a logical [[And]] or a logical [[Or]].
*
* A balanced binary tree is created because regular left recursive trees cause considerable
* performance degradations and can cause stack overflows.
*/
override def visitLogicalBinary(ctx: LogicalBinaryContext): Expression = withOrigin(ctx) {
val expressionType = ctx.operator.getType
val expressionCombiner = expressionType match {
case SqlBaseParser.AND => And.apply _
case SqlBaseParser.OR => Or.apply _
}
// Collect all similar left hand contexts.
val contexts = ArrayBuffer(ctx.right)
var current = ctx.left
def collectContexts: Boolean = current match {
case lbc: LogicalBinaryContext if lbc.operator.getType == expressionType =>
contexts += lbc.right
current = lbc.left
true
case _ =>
contexts += current
false
}
while (collectContexts) {
// No body - all updates take place in the collectContexts.
}
// Reverse the contexts to have them in the same sequence as in the SQL statement & turn them
// into expressions.
val expressions = contexts.reverseMap(expression)
// Create a balanced tree.
def reduceToExpressionTree(low: Int, high: Int): Expression = high - low match {
case 0 =>
expressions(low)
case 1 =>
expressionCombiner(expressions(low), expressions(high))
case x =>
val mid = low + x / 2
expressionCombiner(
reduceToExpressionTree(low, mid),
reduceToExpressionTree(mid + 1, high))
}
reduceToExpressionTree(0, expressions.size - 1)
}
/**
* Invert a boolean expression.
*/
override def visitLogicalNot(ctx: LogicalNotContext): Expression = withOrigin(ctx) {
Not(expression(ctx.booleanExpression()))
}
/**
* Create a filtering correlated sub-query (EXISTS).
*/
override def visitExists(ctx: ExistsContext): Expression = {
Exists(plan(ctx.query))
}
/**
* Create a comparison expression. This compares two expressions. The following comparison
* operators are supported:
* - Equal: '=' or '=='
* - Null-safe Equal: '<=>'
* - Not Equal: '<>' or '!='
* - Less than: '<'
* - Less then or Equal: '<='
* - Greater than: '>'
* - Greater then or Equal: '>='
*/
override def visitComparison(ctx: ComparisonContext): Expression = withOrigin(ctx) {
val left = expression(ctx.left)
val right = expression(ctx.right)
val operator = ctx.comparisonOperator().getChild(0).asInstanceOf[TerminalNode]
operator.getSymbol.getType match {
case SqlBaseParser.EQ =>
EqualTo(left, right)
case SqlBaseParser.NSEQ =>
EqualNullSafe(left, right)
case SqlBaseParser.NEQ | SqlBaseParser.NEQJ =>
Not(EqualTo(left, right))
case SqlBaseParser.LT =>
LessThan(left, right)
case SqlBaseParser.LTE =>
LessThanOrEqual(left, right)
case SqlBaseParser.GT =>
GreaterThan(left, right)
case SqlBaseParser.GTE =>
GreaterThanOrEqual(left, right)
}
}
/**
* Create a predicated expression. A predicated expression is a normal expression with a
* predicate attached to it, for example:
* {{{
* a + 1 IS NULL
* }}}
*/
override def visitPredicated(ctx: PredicatedContext): Expression = withOrigin(ctx) {
val e = expression(ctx.valueExpression)
if (ctx.predicate != null) {
withPredicate(e, ctx.predicate)
} else {
e
}
}
/**
* Add a predicate to the given expression. Supported expressions are:
* - (NOT) BETWEEN
* - (NOT) IN
* - (NOT) (LIKE | ILIKE) (ANY | SOME | ALL)
* - (NOT) RLIKE
* - IS (NOT) NULL.
* - IS (NOT) (TRUE | FALSE | UNKNOWN)
* - IS (NOT) DISTINCT FROM
*/
private def withPredicate(e: Expression, ctx: PredicateContext): Expression = withOrigin(ctx) {
// Invert a predicate if it has a valid NOT clause.
def invertIfNotDefined(e: Expression): Expression = ctx.NOT match {
case null => e
case not => Not(e)
}
def getValueExpressions(e: Expression): Seq[Expression] = e match {
case c: CreateNamedStruct => c.valExprs
case other => Seq(other)
}
def lowerLikeArgsIfNeeded(
expr: Expression,
patterns: Seq[UTF8String]): (Expression, Seq[UTF8String]) = ctx.kind.getType match {
// scalastyle:off caselocale
case SqlBaseParser.ILIKE => (Lower(expr), patterns.map(_.toLowerCase))
// scalastyle:on caselocale
case _ => (expr, patterns)
}
def getLike(expr: Expression, pattern: Expression): Expression = ctx.kind.getType match {
case SqlBaseParser.ILIKE => new ILike(expr, pattern)
case _ => new Like(expr, pattern)
}
// Create the predicate.
ctx.kind.getType match {
case SqlBaseParser.BETWEEN =>
// BETWEEN is translated to lower <= e && e <= upper
invertIfNotDefined(And(
GreaterThanOrEqual(e, expression(ctx.lower)),
LessThanOrEqual(e, expression(ctx.upper))))
case SqlBaseParser.IN if ctx.query != null =>
invertIfNotDefined(InSubquery(getValueExpressions(e), ListQuery(plan(ctx.query))))
case SqlBaseParser.IN =>
invertIfNotDefined(In(e, ctx.expression.asScala.map(expression).toSeq))
case SqlBaseParser.LIKE | SqlBaseParser.ILIKE =>
Option(ctx.quantifier).map(_.getType) match {
case Some(SqlBaseParser.ANY) | Some(SqlBaseParser.SOME) =>
validate(!ctx.expression.isEmpty, "Expected something between '(' and ')'.", ctx)
val expressions = expressionList(ctx.expression)
if (expressions.forall(_.foldable) && expressions.forall(_.dataType == StringType)) {
// If there are many pattern expressions, will throw StackOverflowError.
// So we use LikeAny or NotLikeAny instead.
val patterns = expressions.map(_.eval(EmptyRow).asInstanceOf[UTF8String])
val (expr, pat) = lowerLikeArgsIfNeeded(e, patterns)
ctx.NOT match {
case null => LikeAny(expr, pat)
case _ => NotLikeAny(expr, pat)
}
} else {
ctx.expression.asScala.map(expression)
.map(p => invertIfNotDefined(getLike(e, p))).toSeq.reduceLeft(Or)
}
case Some(SqlBaseParser.ALL) =>
validate(!ctx.expression.isEmpty, "Expected something between '(' and ')'.", ctx)
val expressions = expressionList(ctx.expression)
if (expressions.forall(_.foldable) && expressions.forall(_.dataType == StringType)) {
// If there are many pattern expressions, will throw StackOverflowError.
// So we use LikeAll or NotLikeAll instead.
val patterns = expressions.map(_.eval(EmptyRow).asInstanceOf[UTF8String])
val (expr, pat) = lowerLikeArgsIfNeeded(e, patterns)
ctx.NOT match {
case null => LikeAll(expr, pat)
case _ => NotLikeAll(expr, pat)
}
} else {
ctx.expression.asScala.map(expression)
.map(p => invertIfNotDefined(getLike(e, p))).toSeq.reduceLeft(And)
}
case _ =>
val escapeChar = Option(ctx.escapeChar).map(string).map { str =>
if (str.length != 1) {
throw QueryParsingErrors.invalidEscapeStringError(ctx)
}
str.charAt(0)
}.getOrElse('\\\\')
val likeExpr = ctx.kind.getType match {
case SqlBaseParser.ILIKE => new ILike(e, expression(ctx.pattern), escapeChar)
case _ => Like(e, expression(ctx.pattern), escapeChar)
}
invertIfNotDefined(likeExpr)
}
case SqlBaseParser.RLIKE =>
invertIfNotDefined(RLike(e, expression(ctx.pattern)))
case SqlBaseParser.NULL if ctx.NOT != null =>
IsNotNull(e)
case SqlBaseParser.NULL =>
IsNull(e)
case SqlBaseParser.TRUE => ctx.NOT match {
case null => EqualNullSafe(e, Literal(true))
case _ => Not(EqualNullSafe(e, Literal(true)))
}
case SqlBaseParser.FALSE => ctx.NOT match {
case null => EqualNullSafe(e, Literal(false))
case _ => Not(EqualNullSafe(e, Literal(false)))
}
case SqlBaseParser.UNKNOWN => ctx.NOT match {
case null => IsUnknown(e)
case _ => IsNotUnknown(e)
}
case SqlBaseParser.DISTINCT if ctx.NOT != null =>
EqualNullSafe(e, expression(ctx.right))
case SqlBaseParser.DISTINCT =>
Not(EqualNullSafe(e, expression(ctx.right)))
}
}
/**
* Create a binary arithmetic expression. The following arithmetic operators are supported:
* - Multiplication: '*'
* - Division: '/'
* - Hive Long Division: 'DIV'
* - Modulo: '%'
* - Addition: '+'
* - Subtraction: '-'
* - Binary AND: '&'
* - Binary XOR
* - Binary OR: '|'
*/
override def visitArithmeticBinary(ctx: ArithmeticBinaryContext): Expression = withOrigin(ctx) {
val left = expression(ctx.left)
val right = expression(ctx.right)
ctx.operator.getType match {
case SqlBaseParser.ASTERISK =>
Multiply(left, right)
case SqlBaseParser.SLASH =>
Divide(left, right)
case SqlBaseParser.PERCENT =>
Remainder(left, right)
case SqlBaseParser.DIV =>
IntegralDivide(left, right)
case SqlBaseParser.PLUS =>
Add(left, right)
case SqlBaseParser.MINUS =>
Subtract(left, right)
case SqlBaseParser.CONCAT_PIPE =>
Concat(left :: right :: Nil)
case SqlBaseParser.AMPERSAND =>
BitwiseAnd(left, right)
case SqlBaseParser.HAT =>
BitwiseXor(left, right)
case SqlBaseParser.PIPE =>
BitwiseOr(left, right)
}
}
/**
* Create a unary arithmetic expression. The following arithmetic operators are supported:
* - Plus: '+'
* - Minus: '-'
* - Bitwise Not: '~'
*/
override def visitArithmeticUnary(ctx: ArithmeticUnaryContext): Expression = withOrigin(ctx) {
val value = expression(ctx.valueExpression)
ctx.operator.getType match {
case SqlBaseParser.PLUS =>
UnaryPositive(value)
case SqlBaseParser.MINUS =>
UnaryMinus(value)
case SqlBaseParser.TILDE =>
BitwiseNot(value)
}
}
override def visitCurrentLike(ctx: CurrentLikeContext): Expression = withOrigin(ctx) {
if (conf.ansiEnabled) {
ctx.name.getType match {
case SqlBaseParser.CURRENT_DATE =>
CurrentDate()
case SqlBaseParser.CURRENT_TIMESTAMP =>
CurrentTimestamp()
case SqlBaseParser.CURRENT_USER =>
CurrentUser()
}
} else {
// If the parser is not in ansi mode, we should return `UnresolvedAttribute`, in case there
// are columns named `CURRENT_DATE` or `CURRENT_TIMESTAMP`.
UnresolvedAttribute.quoted(ctx.name.getText)
}
}
/**
* Create a [[Cast]] expression.
*/
override def visitCast(ctx: CastContext): Expression = withOrigin(ctx) {
val rawDataType = typedVisit[DataType](ctx.dataType())
val dataType = CharVarcharUtils.replaceCharVarcharWithStringForCast(rawDataType)
val cast = ctx.name.getType match {
case SqlBaseParser.CAST =>
Cast(expression(ctx.expression), dataType)
case SqlBaseParser.TRY_CAST =>
TryCast(expression(ctx.expression), dataType)
}
cast.setTagValue(Cast.USER_SPECIFIED_CAST, true)
cast
}
/**
* Create a [[CreateStruct]] expression.
*/
override def visitStruct(ctx: StructContext): Expression = withOrigin(ctx) {
CreateStruct.create(ctx.argument.asScala.map(expression).toSeq)
}
/**
* Create a [[First]] expression.
*/
override def visitFirst(ctx: FirstContext): Expression = withOrigin(ctx) {
val ignoreNullsExpr = ctx.IGNORE != null
First(expression(ctx.expression), ignoreNullsExpr).toAggregateExpression()
}
/**
* Create a [[Last]] expression.
*/
override def visitLast(ctx: LastContext): Expression = withOrigin(ctx) {
val ignoreNullsExpr = ctx.IGNORE != null
Last(expression(ctx.expression), ignoreNullsExpr).toAggregateExpression()
}
/**
* Create a Position expression.
*/
override def visitPosition(ctx: PositionContext): Expression = withOrigin(ctx) {
new StringLocate(expression(ctx.substr), expression(ctx.str))
}
/**
* Create a Extract expression.
*/
override def visitExtract(ctx: ExtractContext): Expression = withOrigin(ctx) {
val arguments = Seq(Literal(ctx.field.getText), expression(ctx.source))
UnresolvedFunction("extract", arguments, isDistinct = false)
}
/**
* Create a Substring/Substr expression.
*/
override def visitSubstring(ctx: SubstringContext): Expression = withOrigin(ctx) {
if (ctx.len != null) {
Substring(expression(ctx.str), expression(ctx.pos), expression(ctx.len))
} else {
new Substring(expression(ctx.str), expression(ctx.pos))
}
}
/**
* Create a Trim expression.
*/
override def visitTrim(ctx: TrimContext): Expression = withOrigin(ctx) {
val srcStr = expression(ctx.srcStr)
val trimStr = Option(ctx.trimStr).map(expression)
Option(ctx.trimOption).map(_.getType).getOrElse(SqlBaseParser.BOTH) match {
case SqlBaseParser.BOTH =>
StringTrim(srcStr, trimStr)
case SqlBaseParser.LEADING =>
StringTrimLeft(srcStr, trimStr)
case SqlBaseParser.TRAILING =>
StringTrimRight(srcStr, trimStr)
case other =>
throw QueryParsingErrors.trimOptionUnsupportedError(other, ctx)
}
}
/**
* Create a Overlay expression.
*/
override def visitOverlay(ctx: OverlayContext): Expression = withOrigin(ctx) {
val input = expression(ctx.input)
val replace = expression(ctx.replace)
val position = expression(ctx.position)
val lengthOpt = Option(ctx.length).map(expression)
lengthOpt match {
case Some(length) => Overlay(input, replace, position, length)
case None => new Overlay(input, replace, position)
}
}
/**
* Create a (windowed) Function expression.
*/
override def visitFunctionCall(ctx: FunctionCallContext): Expression = withOrigin(ctx) {
// Create the function call.
val name = ctx.functionName.getText
val isDistinct = Option(ctx.setQuantifier()).exists(_.DISTINCT != null)
// Call `toSeq`, otherwise `ctx.argument.asScala.map(expression)` is `Buffer` in Scala 2.13
val arguments = ctx.argument.asScala.map(expression).toSeq match {
case Seq(UnresolvedStar(None))
if name.toLowerCase(Locale.ROOT) == "count" && !isDistinct =>
// Transform COUNT(*) into COUNT(1).
Seq(Literal(1))
case expressions =>
expressions
}
val filter = Option(ctx.where).map(expression(_))
val ignoreNulls =
Option(ctx.nullsOption).map(_.getType == SqlBaseParser.IGNORE).getOrElse(false)
val function = UnresolvedFunction(
getFunctionMultiparts(ctx.functionName), arguments, isDistinct, filter, ignoreNulls)
// Check if the function is evaluated in a windowed context.
ctx.windowSpec match {
case spec: WindowRefContext =>
UnresolvedWindowExpression(function, visitWindowRef(spec))
case spec: WindowDefContext =>
WindowExpression(function, visitWindowDef(spec))
case _ => function
}
}
/**
* Create a function database (optional) and name pair.
*/
protected def visitFunctionName(ctx: QualifiedNameContext): FunctionIdentifier = {
visitFunctionName(ctx, ctx.identifier().asScala.map(_.getText).toSeq)
}
/**
* Create a function database (optional) and name pair.
*/
private def visitFunctionName(ctx: ParserRuleContext, texts: Seq[String]): FunctionIdentifier = {
texts match {
case Seq(db, fn) => FunctionIdentifier(fn, Option(db))
case Seq(fn) => FunctionIdentifier(fn, None)
case other =>
throw QueryParsingErrors.functionNameUnsupportedError(texts.mkString("."), ctx)
}
}
/**
* Get a function identifier consist by database (optional) and name.
*/
protected def getFunctionIdentifier(ctx: FunctionNameContext): FunctionIdentifier = {
if (ctx.qualifiedName != null) {
visitFunctionName(ctx.qualifiedName)
} else {
FunctionIdentifier(ctx.getText, None)
}
}
protected def getFunctionMultiparts(ctx: FunctionNameContext): Seq[String] = {
if (ctx.qualifiedName != null) {
ctx.qualifiedName().identifier().asScala.map(_.getText).toSeq
} else {
Seq(ctx.getText)
}
}
/**
* Create an [[LambdaFunction]].
*/
override def visitLambda(ctx: LambdaContext): Expression = withOrigin(ctx) {
val arguments = ctx.identifier().asScala.map { name =>
UnresolvedNamedLambdaVariable(UnresolvedAttribute.quoted(name.getText).nameParts)
}
val function = expression(ctx.expression).transformUp {
case a: UnresolvedAttribute => UnresolvedNamedLambdaVariable(a.nameParts)
}
LambdaFunction(function, arguments.toSeq)
}
/**
* Create a reference to a window frame, i.e. [[WindowSpecReference]].
*/
override def visitWindowRef(ctx: WindowRefContext): WindowSpecReference = withOrigin(ctx) {
WindowSpecReference(ctx.name.getText)
}
/**
* Create a window definition, i.e. [[WindowSpecDefinition]].
*/
override def visitWindowDef(ctx: WindowDefContext): WindowSpecDefinition = withOrigin(ctx) {
// CLUSTER BY ... | PARTITION BY ... ORDER BY ...
val partition = ctx.partition.asScala.map(expression)
val order = ctx.sortItem.asScala.map(visitSortItem)
// RANGE/ROWS BETWEEN ...
val frameSpecOption = Option(ctx.windowFrame).map { frame =>
val frameType = frame.frameType.getType match {
case SqlBaseParser.RANGE => RangeFrame
case SqlBaseParser.ROWS => RowFrame
}
SpecifiedWindowFrame(
frameType,
visitFrameBound(frame.start),
Option(frame.end).map(visitFrameBound).getOrElse(CurrentRow))
}
WindowSpecDefinition(
partition.toSeq,
order.toSeq,
frameSpecOption.getOrElse(UnspecifiedFrame))
}
/**
* Create or resolve a frame boundary expressions.
*/
override def visitFrameBound(ctx: FrameBoundContext): Expression = withOrigin(ctx) {
def value: Expression = {
val e = expression(ctx.expression)
validate(e.resolved && e.foldable, "Frame bound value must be a literal.", ctx)
e
}
ctx.boundType.getType match {
case SqlBaseParser.PRECEDING if ctx.UNBOUNDED != null =>
UnboundedPreceding
case SqlBaseParser.PRECEDING =>
UnaryMinus(value)
case SqlBaseParser.CURRENT =>
CurrentRow
case SqlBaseParser.FOLLOWING if ctx.UNBOUNDED != null =>
UnboundedFollowing
case SqlBaseParser.FOLLOWING =>
value
}
}
/**
* Create a [[CreateStruct]] expression.
*/
override def visitRowConstructor(ctx: RowConstructorContext): Expression = withOrigin(ctx) {
CreateStruct(ctx.namedExpression().asScala.map(expression).toSeq)
}
/**
* Create a [[ScalarSubquery]] expression.
*/
override def visitSubqueryExpression(
ctx: SubqueryExpressionContext): Expression = withOrigin(ctx) {
ScalarSubquery(plan(ctx.query))
}
/**
* Create a value based [[CaseWhen]] expression. This has the following SQL form:
* {{{
* CASE [expression]
* WHEN [value] THEN [expression]
* ...
* ELSE [expression]
* END
* }}}
*/
override def visitSimpleCase(ctx: SimpleCaseContext): Expression = withOrigin(ctx) {
val e = expression(ctx.value)
val branches = ctx.whenClause.asScala.map { wCtx =>
(EqualTo(e, expression(wCtx.condition)), expression(wCtx.result))
}
CaseWhen(branches.toSeq, Option(ctx.elseExpression).map(expression))
}
/**
* Create a condition based [[CaseWhen]] expression. This has the following SQL syntax:
* {{{
* CASE
* WHEN [predicate] THEN [expression]
* ...
* ELSE [expression]
* END
* }}}
*
* @param ctx the parse tree
* */
override def visitSearchedCase(ctx: SearchedCaseContext): Expression = withOrigin(ctx) {
val branches = ctx.whenClause.asScala.map { wCtx =>
(expression(wCtx.condition), expression(wCtx.result))
}
CaseWhen(branches.toSeq, Option(ctx.elseExpression).map(expression))
}
/**
* Currently only regex in expressions of SELECT statements are supported; in other
* places, e.g., where `(a)?+.+` = 2, regex are not meaningful.
*/
private def canApplyRegex(ctx: ParserRuleContext): Boolean = withOrigin(ctx) {
var parent = ctx.getParent
while (parent != null) {
if (parent.isInstanceOf[NamedExpressionContext]) return true
parent = parent.getParent
}
return false
}
/**
* Create a dereference expression. The return type depends on the type of the parent.
* If the parent is an [[UnresolvedAttribute]], it can be a [[UnresolvedAttribute]] or
* a [[UnresolvedRegex]] for regex quoted in ``; if the parent is some other expression,
* it can be [[UnresolvedExtractValue]].
*/
override def visitDereference(ctx: DereferenceContext): Expression = withOrigin(ctx) {
val attr = ctx.fieldName.getText
expression(ctx.base) match {
case unresolved_attr @ UnresolvedAttribute(nameParts) =>
ctx.fieldName.getStart.getText match {
case escapedIdentifier(columnNameRegex)
if conf.supportQuotedRegexColumnName && canApplyRegex(ctx) =>
UnresolvedRegex(columnNameRegex, Some(unresolved_attr.name),
conf.caseSensitiveAnalysis)
case _ =>
UnresolvedAttribute(nameParts :+ attr)
}
case e =>
UnresolvedExtractValue(e, Literal(attr))
}
}
/**
* Create an [[UnresolvedAttribute]] expression or a [[UnresolvedRegex]] if it is a regex
* quoted in ``
*/
override def visitColumnReference(ctx: ColumnReferenceContext): Expression = withOrigin(ctx) {
ctx.getStart.getText match {
case escapedIdentifier(columnNameRegex)
if conf.supportQuotedRegexColumnName && canApplyRegex(ctx) =>
UnresolvedRegex(columnNameRegex, None, conf.caseSensitiveAnalysis)
case _ =>
UnresolvedAttribute.quoted(ctx.getText)
}
}
/**
* Create an [[UnresolvedExtractValue]] expression, this is used for subscript access to an array.
*/
override def visitSubscript(ctx: SubscriptContext): Expression = withOrigin(ctx) {
UnresolvedExtractValue(expression(ctx.value), expression(ctx.index))
}
/**
* Create an expression for an expression between parentheses. This is need because the ANTLR
* visitor cannot automatically convert the nested context into an expression.
*/
override def visitParenthesizedExpression(
ctx: ParenthesizedExpressionContext): Expression = withOrigin(ctx) {
expression(ctx.expression)
}
/**
* Create a [[SortOrder]] expression.
*/
override def visitSortItem(ctx: SortItemContext): SortOrder = withOrigin(ctx) {
val direction = if (ctx.DESC != null) {
Descending
} else {
Ascending
}
val nullOrdering = if (ctx.FIRST != null) {
NullsFirst
} else if (ctx.LAST != null) {
NullsLast
} else {
direction.defaultNullOrdering
}
SortOrder(expression(ctx.expression), direction, nullOrdering, Seq.empty)
}
/**
* Create a typed Literal expression. A typed literal has the following SQL syntax:
* {{{
* [TYPE] '[VALUE]'
* }}}
* Currently Date, Timestamp, Interval and Binary typed literals are supported.
*/
override def visitTypeConstructor(ctx: TypeConstructorContext): Literal = withOrigin(ctx) {
val value = string(ctx.STRING)
val valueType = ctx.identifier.getText.toUpperCase(Locale.ROOT)
def toLiteral[T](f: UTF8String => Option[T], t: DataType): Literal = {
f(UTF8String.fromString(value)).map(Literal(_, t)).getOrElse {
throw QueryParsingErrors.cannotParseValueTypeError(valueType, value, ctx)
}
}
def constructTimestampLTZLiteral(value: String): Literal = {
val zoneId = getZoneId(conf.sessionLocalTimeZone)
val specialTs = convertSpecialTimestamp(value, zoneId).map(Literal(_, TimestampType))
specialTs.getOrElse(toLiteral(stringToTimestamp(_, zoneId), TimestampType))
}
try {
valueType match {
case "DATE" =>
val zoneId = getZoneId(conf.sessionLocalTimeZone)
val specialDate = convertSpecialDate(value, zoneId).map(Literal(_, DateType))
specialDate.getOrElse(toLiteral(stringToDate, DateType))
case "TIMESTAMP_NTZ" =>
convertSpecialTimestampNTZ(value, getZoneId(conf.sessionLocalTimeZone))
.map(Literal(_, TimestampNTZType))
.getOrElse(toLiteral(stringToTimestampWithoutTimeZone, TimestampNTZType))
case "TIMESTAMP_LTZ" =>
constructTimestampLTZLiteral(value)
case "TIMESTAMP" =>
SQLConf.get.timestampType match {
case TimestampNTZType =>
convertSpecialTimestampNTZ(value, getZoneId(conf.sessionLocalTimeZone))
.map(Literal(_, TimestampNTZType))
.getOrElse {
val containsTimeZonePart =
DateTimeUtils.parseTimestampString(UTF8String.fromString(value))._2.isDefined
// If the input string contains time zone part, return a timestamp with local time
// zone literal.
if (containsTimeZonePart) {
constructTimestampLTZLiteral(value)
} else {
toLiteral(stringToTimestampWithoutTimeZone, TimestampNTZType)
}
}
case TimestampType =>
constructTimestampLTZLiteral(value)
}
case "INTERVAL" =>
val interval = try {
IntervalUtils.stringToInterval(UTF8String.fromString(value))
} catch {
case e: IllegalArgumentException =>
val ex = QueryParsingErrors.cannotParseIntervalValueError(value, ctx)
ex.setStackTrace(e.getStackTrace)
throw ex
}
if (!conf.legacyIntervalEnabled) {
val units = value
.split("\\\\s")
.map(_.toLowerCase(Locale.ROOT).stripSuffix("s"))
.filter(s => s != "interval" && s.matches("[a-z]+"))
constructMultiUnitsIntervalLiteral(ctx, interval, units)
} else {
Literal(interval, CalendarIntervalType)
}
case "X" =>
val padding = if (value.length % 2 != 0) "0" else ""
try {
Literal(Hex.decodeHex(padding + value))
} catch {
case _: DecoderException =>
throw new IllegalArgumentException(
s"contains illegal character for hexBinary: $padding$value");
}
case other =>
throw QueryParsingErrors.literalValueTypeUnsupportedError(other, ctx)
}
} catch {
case e: IllegalArgumentException =>
throw QueryParsingErrors.parsingValueTypeError(e, valueType, ctx)
}
}
/**
* Create a NULL literal expression.
*/
override def visitNullLiteral(ctx: NullLiteralContext): Literal = withOrigin(ctx) {
Literal(null)
}
/**
* Create a Boolean literal expression.
*/
override def visitBooleanLiteral(ctx: BooleanLiteralContext): Literal = withOrigin(ctx) {
if (ctx.getText.toBoolean) {
Literal.TrueLiteral
} else {
Literal.FalseLiteral
}
}
/**
* Create an integral literal expression. The code selects the most narrow integral type
* possible, either a BigDecimal, a Long or an Integer is returned.
*/
override def visitIntegerLiteral(ctx: IntegerLiteralContext): Literal = withOrigin(ctx) {
BigDecimal(ctx.getText) match {
case v if v.isValidInt =>
Literal(v.intValue)
case v if v.isValidLong =>
Literal(v.longValue)
case v => Literal(v.underlying())
}
}
/**
* Create a decimal literal for a regular decimal number.
*/
override def visitDecimalLiteral(ctx: DecimalLiteralContext): Literal = withOrigin(ctx) {
Literal(BigDecimal(ctx.getText).underlying())
}
/**
* Create a decimal literal for a regular decimal number or a scientific decimal number.
*/
override def visitLegacyDecimalLiteral(
ctx: LegacyDecimalLiteralContext): Literal = withOrigin(ctx) {
Literal(BigDecimal(ctx.getText).underlying())
}
/**
* Create a double literal for number with an exponent, e.g. 1E-30
*/
override def visitExponentLiteral(ctx: ExponentLiteralContext): Literal = {
numericLiteral(ctx, ctx.getText, /* exponent values don't have a suffix */
Double.MinValue, Double.MaxValue, DoubleType.simpleString)(_.toDouble)
}
/** Create a numeric literal expression. */
private def numericLiteral(
ctx: NumberContext,
rawStrippedQualifier: String,
minValue: BigDecimal,
maxValue: BigDecimal,
typeName: String)(converter: String => Any): Literal = withOrigin(ctx) {
try {
val rawBigDecimal = BigDecimal(rawStrippedQualifier)
if (rawBigDecimal < minValue || rawBigDecimal > maxValue) {
throw QueryParsingErrors.invalidNumericLiteralRangeError(
rawStrippedQualifier, minValue, maxValue, typeName, ctx)
}
Literal(converter(rawStrippedQualifier))
} catch {
case e: NumberFormatException =>
throw new ParseException(e.getMessage, ctx)
}
}
/**
* Create a Byte Literal expression.
*/
override def visitTinyIntLiteral(ctx: TinyIntLiteralContext): Literal = {
val rawStrippedQualifier = ctx.getText.substring(0, ctx.getText.length - 1)
numericLiteral(ctx, rawStrippedQualifier,
Byte.MinValue, Byte.MaxValue, ByteType.simpleString)(_.toByte)
}
/**
* Create a Short Literal expression.
*/
override def visitSmallIntLiteral(ctx: SmallIntLiteralContext): Literal = {
val rawStrippedQualifier = ctx.getText.substring(0, ctx.getText.length - 1)
numericLiteral(ctx, rawStrippedQualifier,
Short.MinValue, Short.MaxValue, ShortType.simpleString)(_.toShort)
}
/**
* Create a Long Literal expression.
*/
override def visitBigIntLiteral(ctx: BigIntLiteralContext): Literal = {
val rawStrippedQualifier = ctx.getText.substring(0, ctx.getText.length - 1)
numericLiteral(ctx, rawStrippedQualifier,
Long.MinValue, Long.MaxValue, LongType.simpleString)(_.toLong)
}
/**
* Create a Float Literal expression.
*/
override def visitFloatLiteral(ctx: FloatLiteralContext): Literal = {
val rawStrippedQualifier = ctx.getText.substring(0, ctx.getText.length - 1)
numericLiteral(ctx, rawStrippedQualifier,
Float.MinValue, Float.MaxValue, FloatType.simpleString)(_.toFloat)
}
/**
* Create a Double Literal expression.
*/
override def visitDoubleLiteral(ctx: DoubleLiteralContext): Literal = {
val rawStrippedQualifier = ctx.getText.substring(0, ctx.getText.length - 1)
numericLiteral(ctx, rawStrippedQualifier,
Double.MinValue, Double.MaxValue, DoubleType.simpleString)(_.toDouble)
}
/**
* Create a BigDecimal Literal expression.
*/
override def visitBigDecimalLiteral(ctx: BigDecimalLiteralContext): Literal = {
val raw = ctx.getText.substring(0, ctx.getText.length - 2)
try {
Literal(BigDecimal(raw).underlying())
} catch {
case e: AnalysisException =>
throw new ParseException(e.message, ctx)
}
}
/**
* Create a String literal expression.
*/
override def visitStringLiteral(ctx: StringLiteralContext): Literal = withOrigin(ctx) {
Literal(createString(ctx))
}
/**
* Create a String from a string literal context. This supports multiple consecutive string
* literals, these are concatenated, for example this expression "'hello' 'world'" will be
* converted into "helloworld".
*
* Special characters can be escaped by using Hive/C-style escaping.
*/
private def createString(ctx: StringLiteralContext): String = {
if (conf.escapedStringLiterals) {
ctx.STRING().asScala.map(stringWithoutUnescape).mkString
} else {
ctx.STRING().asScala.map(string).mkString
}
}
/**
* Create an [[UnresolvedRelation]] from a multi-part identifier context.
*/
private def createUnresolvedRelation(
ctx: MultipartIdentifierContext): UnresolvedRelation = withOrigin(ctx) {
UnresolvedRelation(visitMultipartIdentifier(ctx))
}
/**
* Create an [[UnresolvedTable]] from a multi-part identifier context.
*/
private def createUnresolvedTable(
ctx: MultipartIdentifierContext,
commandName: String,
relationTypeMismatchHint: Option[String] = None): UnresolvedTable = withOrigin(ctx) {
UnresolvedTable(visitMultipartIdentifier(ctx), commandName, relationTypeMismatchHint)
}
/**
* Create an [[UnresolvedView]] from a multi-part identifier context.
*/
private def createUnresolvedView(
ctx: MultipartIdentifierContext,
commandName: String,
allowTemp: Boolean = true,
relationTypeMismatchHint: Option[String] = None): UnresolvedView = withOrigin(ctx) {
UnresolvedView(visitMultipartIdentifier(ctx), commandName, allowTemp, relationTypeMismatchHint)
}
/**
* Create an [[UnresolvedTableOrView]] from a multi-part identifier context.
*/
private def createUnresolvedTableOrView(
ctx: MultipartIdentifierContext,
commandName: String,
allowTempView: Boolean = true): UnresolvedTableOrView = withOrigin(ctx) {
UnresolvedTableOrView(visitMultipartIdentifier(ctx), commandName, allowTempView)
}
/**
* Construct an [[Literal]] from [[CalendarInterval]] and
* units represented as a [[Seq]] of [[String]].
*/
private def constructMultiUnitsIntervalLiteral(
ctx: ParserRuleContext,
calendarInterval: CalendarInterval,
units: Seq[String]): Literal = {
val yearMonthFields = Set.empty[Byte]
val dayTimeFields = Set.empty[Byte]
for (unit <- units) {
if (YearMonthIntervalType.stringToField.contains(unit)) {
yearMonthFields += YearMonthIntervalType.stringToField(unit)
} else if (DayTimeIntervalType.stringToField.contains(unit)) {
dayTimeFields += DayTimeIntervalType.stringToField(unit)
} else if (unit == "week") {
dayTimeFields += DayTimeIntervalType.DAY
} else {
assert(unit == "millisecond" || unit == "microsecond")
dayTimeFields += DayTimeIntervalType.SECOND
}
}
if (yearMonthFields.nonEmpty) {
if (dayTimeFields.nonEmpty) {
val literalStr = source(ctx)
throw QueryParsingErrors.mixedIntervalUnitsError(literalStr, ctx)
}
Literal(
calendarInterval.months,
YearMonthIntervalType(yearMonthFields.min, yearMonthFields.max)
)
} else {
Literal(
IntervalUtils.getDuration(calendarInterval, TimeUnit.MICROSECONDS),
DayTimeIntervalType(dayTimeFields.min, dayTimeFields.max))
}
}
/**
* Create a [[CalendarInterval]] or ANSI interval literal expression.
* Two syntaxes are supported:
* - multiple unit value pairs, for instance: interval 2 months 2 days.
* - from-to unit, for instance: interval '1-2' year to month.
*/
override def visitInterval(ctx: IntervalContext): Literal = withOrigin(ctx) {
val calendarInterval = parseIntervalLiteral(ctx)
if (ctx.errorCapturingUnitToUnitInterval != null && !conf.legacyIntervalEnabled) {
// Check the `to` unit to distinguish year-month and day-time intervals because
// `CalendarInterval` doesn't have enough info. For instance, new CalendarInterval(0, 0, 0)
// can be derived from INTERVAL '0-0' YEAR TO MONTH as well as from
// INTERVAL '0 00:00:00' DAY TO SECOND.
val fromUnit =
ctx.errorCapturingUnitToUnitInterval.body.from.getText.toLowerCase(Locale.ROOT)
val toUnit = ctx.errorCapturingUnitToUnitInterval.body.to.getText.toLowerCase(Locale.ROOT)
if (toUnit == "month") {
assert(calendarInterval.days == 0 && calendarInterval.microseconds == 0)
val start = YearMonthIntervalType.stringToField(fromUnit)
Literal(calendarInterval.months, YearMonthIntervalType(start, YearMonthIntervalType.MONTH))
} else {
assert(calendarInterval.months == 0)
val micros = IntervalUtils.getDuration(calendarInterval, TimeUnit.MICROSECONDS)
val start = DayTimeIntervalType.stringToField(fromUnit)
val end = DayTimeIntervalType.stringToField(toUnit)
Literal(micros, DayTimeIntervalType(start, end))
}
} else if (ctx.errorCapturingMultiUnitsInterval != null && !conf.legacyIntervalEnabled) {
val units =
ctx.errorCapturingMultiUnitsInterval.body.unit.asScala.map(
_.getText.toLowerCase(Locale.ROOT).stripSuffix("s")).toSeq
constructMultiUnitsIntervalLiteral(ctx, calendarInterval, units)
} else {
Literal(calendarInterval, CalendarIntervalType)
}
}
/**
* Create a [[CalendarInterval]] object
*/
protected def parseIntervalLiteral(ctx: IntervalContext): CalendarInterval = withOrigin(ctx) {
if (ctx.errorCapturingMultiUnitsInterval != null) {
val innerCtx = ctx.errorCapturingMultiUnitsInterval
if (innerCtx.unitToUnitInterval != null) {
throw QueryParsingErrors.moreThanOneFromToUnitInIntervalLiteralError(
innerCtx.unitToUnitInterval)
}
visitMultiUnitsInterval(innerCtx.multiUnitsInterval)
} else if (ctx.errorCapturingUnitToUnitInterval != null) {
val innerCtx = ctx.errorCapturingUnitToUnitInterval
if (innerCtx.error1 != null || innerCtx.error2 != null) {
val errorCtx = if (innerCtx.error1 != null) innerCtx.error1 else innerCtx.error2
throw QueryParsingErrors.moreThanOneFromToUnitInIntervalLiteralError(errorCtx)
}
visitUnitToUnitInterval(innerCtx.body)
} else {
throw QueryParsingErrors.invalidIntervalLiteralError(ctx)
}
}
/**
* Creates a [[CalendarInterval]] with multiple unit value pairs, e.g. 1 YEAR 2 DAYS.
*/
override def visitMultiUnitsInterval(ctx: MultiUnitsIntervalContext): CalendarInterval = {
withOrigin(ctx) {
val units = ctx.unit.asScala
val values = ctx.intervalValue().asScala
try {
assert(units.length == values.length)
val kvs = units.indices.map { i =>
val u = units(i).getText
val v = if (values(i).STRING() != null) {
val value = string(values(i).STRING())
// SPARK-32840: For invalid cases, e.g. INTERVAL '1 day 2' hour,
// INTERVAL 'interval 1' day, we need to check ahead before they are concatenated with
// units and become valid ones, e.g. '1 day 2 hour'.
// Ideally, we only ensure the value parts don't contain any units here.
if (value.exists(Character.isLetter)) {
throw QueryParsingErrors.invalidIntervalFormError(value, ctx)
}
value
} else {
values(i).getText
}
UTF8String.fromString(" " + v + " " + u)
}
IntervalUtils.stringToInterval(UTF8String.concat(kvs: _*))
} catch {
case i: IllegalArgumentException =>
val e = new ParseException(i.getMessage, ctx)
e.setStackTrace(i.getStackTrace)
throw e
}
}
}
/**
* Creates a [[CalendarInterval]] with from-to unit, e.g. '2-1' YEAR TO MONTH.
*/
override def visitUnitToUnitInterval(ctx: UnitToUnitIntervalContext): CalendarInterval = {
withOrigin(ctx) {
val value = Option(ctx.intervalValue.STRING).map(string).map { interval =>
if (ctx.intervalValue().MINUS() == null) {
interval
} else {
interval.startsWith("-") match {
case true => interval.replaceFirst("-", "")
case false => s"-$interval"
}
}
}.getOrElse {
throw QueryParsingErrors.invalidFromToUnitValueError(ctx.intervalValue)
}
try {
val from = ctx.from.getText.toLowerCase(Locale.ROOT)
val to = ctx.to.getText.toLowerCase(Locale.ROOT)
(from, to) match {
case ("year", "month") =>
IntervalUtils.fromYearMonthString(value)
case ("day", "hour") | ("day", "minute") | ("day", "second") | ("hour", "minute") |
("hour", "second") | ("minute", "second") =>
IntervalUtils.fromDayTimeString(value,
DayTimeIntervalType.stringToField(from), DayTimeIntervalType.stringToField(to))
case _ =>
throw QueryParsingErrors.fromToIntervalUnsupportedError(from, to, ctx)
}
} catch {
// Handle Exceptions thrown by CalendarInterval
case e: IllegalArgumentException =>
val pe = new ParseException(e.getMessage, ctx)
pe.setStackTrace(e.getStackTrace)
throw pe
}
}
}
/* ********************************************************************************************
* DataType parsing
* ******************************************************************************************** */
/**
* Resolve/create a primitive type.
*/
override def visitPrimitiveDataType(ctx: PrimitiveDataTypeContext): DataType = withOrigin(ctx) {
val dataType = ctx.identifier.getText.toLowerCase(Locale.ROOT)
(dataType, ctx.INTEGER_VALUE().asScala.toList) match {
case ("boolean", Nil) => BooleanType
case ("tinyint" | "byte", Nil) => ByteType
case ("smallint" | "short", Nil) => ShortType
case ("int" | "integer", Nil) => IntegerType
case ("bigint" | "long", Nil) => LongType
case ("float" | "real", Nil) => FloatType
case ("double", Nil) => DoubleType
case ("date", Nil) => DateType
case ("timestamp", Nil) => SQLConf.get.timestampType
case ("timestamp_ntz", Nil) => TimestampNTZType
case ("timestamp_ltz", Nil) => TimestampType
case ("string", Nil) => StringType
case ("character" | "char", length :: Nil) => CharType(length.getText.toInt)
case ("varchar", length :: Nil) => VarcharType(length.getText.toInt)
case ("binary", Nil) => BinaryType
case ("decimal" | "dec" | "numeric", Nil) => DecimalType.USER_DEFAULT
case ("decimal" | "dec" | "numeric", precision :: Nil) =>
DecimalType(precision.getText.toInt, 0)
case ("decimal" | "dec" | "numeric", precision :: scale :: Nil) =>
DecimalType(precision.getText.toInt, scale.getText.toInt)
case ("void", Nil) => NullType
case ("interval", Nil) => CalendarIntervalType
case (dt, params) =>
val dtStr = if (params.nonEmpty) s"$dt(${params.mkString(",")})" else dt
throw QueryParsingErrors.dataTypeUnsupportedError(dtStr, ctx)
}
}
override def visitYearMonthIntervalDataType(ctx: YearMonthIntervalDataTypeContext): DataType = {
val startStr = ctx.from.getText.toLowerCase(Locale.ROOT)
val start = YearMonthIntervalType.stringToField(startStr)
if (ctx.to != null) {
val endStr = ctx.to.getText.toLowerCase(Locale.ROOT)
val end = YearMonthIntervalType.stringToField(endStr)
if (end <= start) {
throw QueryParsingErrors.fromToIntervalUnsupportedError(startStr, endStr, ctx)
}
YearMonthIntervalType(start, end)
} else {
YearMonthIntervalType(start)
}
}
override def visitDayTimeIntervalDataType(ctx: DayTimeIntervalDataTypeContext): DataType = {
val startStr = ctx.from.getText.toLowerCase(Locale.ROOT)
val start = DayTimeIntervalType.stringToField(startStr)
if (ctx.to != null ) {
val endStr = ctx.to.getText.toLowerCase(Locale.ROOT)
val end = DayTimeIntervalType.stringToField(endStr)
if (end <= start) {
throw QueryParsingErrors.fromToIntervalUnsupportedError(startStr, endStr, ctx)
}
DayTimeIntervalType(start, end)
} else {
DayTimeIntervalType(start)
}
}
/**
* Create a complex DataType. Arrays, Maps and Structures are supported.
*/
override def visitComplexDataType(ctx: ComplexDataTypeContext): DataType = withOrigin(ctx) {
ctx.complex.getType match {
case SqlBaseParser.ARRAY =>
ArrayType(typedVisit(ctx.dataType(0)))
case SqlBaseParser.MAP =>
MapType(typedVisit(ctx.dataType(0)), typedVisit(ctx.dataType(1)))
case SqlBaseParser.STRUCT =>
StructType(Option(ctx.complexColTypeList).toSeq.flatMap(visitComplexColTypeList))
}
}
/**
* Create top level table schema.
*/
protected def createSchema(ctx: ColTypeListContext): StructType = {
StructType(Option(ctx).toSeq.flatMap(visitColTypeList))
}
/**
* Create a [[StructType]] from a number of column definitions.
*/
override def visitColTypeList(ctx: ColTypeListContext): Seq[StructField] = withOrigin(ctx) {
ctx.colType().asScala.map(visitColType).toSeq
}
/**
* Create a top level [[StructField]] from a column definition.
*/
override def visitColType(ctx: ColTypeContext): StructField = withOrigin(ctx) {
import ctx._
val builder = new MetadataBuilder
// Add comment to metadata
Option(commentSpec()).map(visitCommentSpec).foreach {
builder.putString("comment", _)
}
StructField(
name = colName.getText,
dataType = typedVisit[DataType](ctx.dataType),
nullable = NULL == null,
metadata = builder.build())
}
/**
* Create a [[StructType]] from a sequence of [[StructField]]s.
*/
protected def createStructType(ctx: ComplexColTypeListContext): StructType = {
StructType(Option(ctx).toSeq.flatMap(visitComplexColTypeList))
}
/**
* Create a [[StructType]] from a number of column definitions.
*/
override def visitComplexColTypeList(
ctx: ComplexColTypeListContext): Seq[StructField] = withOrigin(ctx) {
ctx.complexColType().asScala.map(visitComplexColType).toSeq
}
/**
* Create a [[StructField]] from a column definition.
*/
override def visitComplexColType(ctx: ComplexColTypeContext): StructField = withOrigin(ctx) {
import ctx._
val structField = StructField(
name = identifier.getText,
dataType = typedVisit(dataType()),
nullable = NULL == null)
Option(commentSpec).map(visitCommentSpec).map(structField.withComment).getOrElse(structField)
}
/**
* Create a location string.
*/
override def visitLocationSpec(ctx: LocationSpecContext): String = withOrigin(ctx) {
string(ctx.STRING)
}
/**
* Create an optional location string.
*/
protected def visitLocationSpecList(ctx: java.util.List[LocationSpecContext]): Option[String] = {
ctx.asScala.headOption.map(visitLocationSpec)
}
/**
* Create a comment string.
*/
override def visitCommentSpec(ctx: CommentSpecContext): String = withOrigin(ctx) {
string(ctx.STRING)
}
/**
* Create an optional comment string.
*/
protected def visitCommentSpecList(ctx: java.util.List[CommentSpecContext]): Option[String] = {
ctx.asScala.headOption.map(visitCommentSpec)
}
/**
* Create a [[BucketSpec]].
*/
override def visitBucketSpec(ctx: BucketSpecContext): BucketSpec = withOrigin(ctx) {
BucketSpec(
ctx.INTEGER_VALUE.getText.toInt,
visitIdentifierList(ctx.identifierList),
Option(ctx.orderedIdentifierList)
.toSeq
.flatMap(_.orderedIdentifier.asScala)
.map { orderedIdCtx =>
Option(orderedIdCtx.ordering).map(_.getText).foreach { dir =>
if (dir.toLowerCase(Locale.ROOT) != "asc") {
operationNotAllowed(s"Column ordering must be ASC, was '$dir'", ctx)
}
}
orderedIdCtx.ident.getText
})
}
/**
* Convert a table property list into a key-value map.
* This should be called through [[visitPropertyKeyValues]] or [[visitPropertyKeys]].
*/
override def visitTablePropertyList(
ctx: TablePropertyListContext): Map[String, String] = withOrigin(ctx) {
val properties = ctx.tableProperty.asScala.map { property =>
val key = visitTablePropertyKey(property.key)
val value = visitTablePropertyValue(property.value)
key -> value
}
// Check for duplicate property names.
checkDuplicateKeys(properties.toSeq, ctx)
properties.toMap
}
/**
* Parse a key-value map from a [[TablePropertyListContext]], assuming all values are specified.
*/
def visitPropertyKeyValues(ctx: TablePropertyListContext): Map[String, String] = {
val props = visitTablePropertyList(ctx)
val badKeys = props.collect { case (key, null) => key }
if (badKeys.nonEmpty) {
operationNotAllowed(
s"Values must be specified for key(s): ${badKeys.mkString("[", ",", "]")}", ctx)
}
props
}
/**
* Parse a list of keys from a [[TablePropertyListContext]], assuming no values are specified.
*/
def visitPropertyKeys(ctx: TablePropertyListContext): Seq[String] = {
val props = visitTablePropertyList(ctx)
val badKeys = props.filter { case (_, v) => v != null }.keys
if (badKeys.nonEmpty) {
operationNotAllowed(
s"Values should not be specified for key(s): ${badKeys.mkString("[", ",", "]")}", ctx)
}
props.keys.toSeq
}
/**
* A table property key can either be String or a collection of dot separated elements. This
* function extracts the property key based on whether its a string literal or a table property
* identifier.
*/
override def visitTablePropertyKey(key: TablePropertyKeyContext): String = {
if (key.STRING != null) {
string(key.STRING)
} else {
key.getText
}
}
/**
* A table property value can be String, Integer, Boolean or Decimal. This function extracts
* the property value based on whether its a string, integer, boolean or decimal literal.
*/
override def visitTablePropertyValue(value: TablePropertyValueContext): String = {
if (value == null) {
null
} else if (value.STRING != null) {
string(value.STRING)
} else if (value.booleanValue != null) {
value.getText.toLowerCase(Locale.ROOT)
} else {
value.getText
}
}
/**
* Type to keep track of a table header: (identifier, isTemporary, ifNotExists, isExternal).
*/
type TableHeader = (Seq[String], Boolean, Boolean, Boolean)
/**
* Type to keep track of table clauses:
* - partition transforms
* - partition columns
* - bucketSpec
* - properties
* - options
* - location
* - comment
* - serde
*
* Note: Partition transforms are based on existing table schema definition. It can be simple
* column names, or functions like `year(date_col)`. Partition columns are column names with data
* types like `i INT`, which should be appended to the existing table schema.
*/
type TableClauses = (
Seq[Transform], Seq[StructField], Option[BucketSpec], Map[String, String],
Map[String, String], Option[String], Option[String], Option[SerdeInfo])
/**
* Validate a create table statement and return the [[TableIdentifier]].
*/
override def visitCreateTableHeader(
ctx: CreateTableHeaderContext): TableHeader = withOrigin(ctx) {
val temporary = ctx.TEMPORARY != null
val ifNotExists = ctx.EXISTS != null
if (temporary && ifNotExists) {
operationNotAllowed("CREATE TEMPORARY TABLE ... IF NOT EXISTS", ctx)
}
val multipartIdentifier = ctx.multipartIdentifier.parts.asScala.map(_.getText).toSeq
(multipartIdentifier, temporary, ifNotExists, ctx.EXTERNAL != null)
}
/**
* Validate a replace table statement and return the [[TableIdentifier]].
*/
override def visitReplaceTableHeader(
ctx: ReplaceTableHeaderContext): TableHeader = withOrigin(ctx) {
val multipartIdentifier = ctx.multipartIdentifier.parts.asScala.map(_.getText).toSeq
(multipartIdentifier, false, false, false)
}
/**
* Parse a qualified name to a multipart name.
*/
override def visitQualifiedName(ctx: QualifiedNameContext): Seq[String] = withOrigin(ctx) {
ctx.identifier.asScala.map(_.getText).toSeq
}
/**
* Parse a list of transforms or columns.
*/
override def visitPartitionFieldList(
ctx: PartitionFieldListContext): (Seq[Transform], Seq[StructField]) = withOrigin(ctx) {
val (transforms, columns) = ctx.fields.asScala.map {
case transform: PartitionTransformContext =>
(Some(visitPartitionTransform(transform)), None)
case field: PartitionColumnContext =>
(None, Some(visitColType(field.colType)))
}.unzip
(transforms.flatten.toSeq, columns.flatten.toSeq)
}
override def visitPartitionTransform(
ctx: PartitionTransformContext): Transform = withOrigin(ctx) {
def getFieldReference(
ctx: ApplyTransformContext,
arg: V2Expression): FieldReference = {
lazy val name: String = ctx.identifier.getText
arg match {
case ref: FieldReference =>
ref
case nonRef =>
throw QueryParsingErrors.partitionTransformNotExpectedError(name, nonRef.describe, ctx)
}
}
def getSingleFieldReference(
ctx: ApplyTransformContext,
arguments: Seq[V2Expression]): FieldReference = {
lazy val name: String = ctx.identifier.getText
if (arguments.size > 1) {
throw QueryParsingErrors.tooManyArgumentsForTransformError(name, ctx)
} else if (arguments.isEmpty) {
throw QueryParsingErrors.notEnoughArgumentsForTransformError(name, ctx)
} else {
getFieldReference(ctx, arguments.head)
}
}
ctx.transform match {
case identityCtx: IdentityTransformContext =>
IdentityTransform(FieldReference(typedVisit[Seq[String]](identityCtx.qualifiedName)))
case applyCtx: ApplyTransformContext =>
val arguments = applyCtx.argument.asScala.map(visitTransformArgument).toSeq
applyCtx.identifier.getText match {
case "bucket" =>
val numBuckets: Int = arguments.head match {
case LiteralValue(shortValue, ShortType) =>
shortValue.asInstanceOf[Short].toInt
case LiteralValue(intValue, IntegerType) =>
intValue.asInstanceOf[Int]
case LiteralValue(longValue, LongType) =>
longValue.asInstanceOf[Long].toInt
case lit =>
throw QueryParsingErrors.invalidBucketsNumberError(lit.describe, applyCtx)
}
val fields = arguments.tail.map(arg => getFieldReference(applyCtx, arg))
BucketTransform(LiteralValue(numBuckets, IntegerType), fields)
case "years" =>
YearsTransform(getSingleFieldReference(applyCtx, arguments))
case "months" =>
MonthsTransform(getSingleFieldReference(applyCtx, arguments))
case "days" =>
DaysTransform(getSingleFieldReference(applyCtx, arguments))
case "hours" =>
HoursTransform(getSingleFieldReference(applyCtx, arguments))
case name =>
ApplyTransform(name, arguments)
}
}
}
/**
* Parse an argument to a transform. An argument may be a field reference (qualified name) or
* a value literal.
*/
override def visitTransformArgument(ctx: TransformArgumentContext): V2Expression = {
withOrigin(ctx) {
val reference = Option(ctx.qualifiedName)
.map(typedVisit[Seq[String]])
.map(FieldReference(_))
val literal = Option(ctx.constant)
.map(typedVisit[Literal])
.map(lit => LiteralValue(lit.value, lit.dataType))
reference.orElse(literal)
.getOrElse(throw QueryParsingErrors.invalidTransformArgumentError(ctx))
}
}
private def cleanNamespaceProperties(
properties: Map[String, String],
ctx: ParserRuleContext): Map[String, String] = withOrigin(ctx) {
import SupportsNamespaces._
val legacyOn = conf.getConf(SQLConf.LEGACY_PROPERTY_NON_RESERVED)
properties.filter {
case (PROP_LOCATION, _) if !legacyOn =>
throw QueryParsingErrors.cannotCleanReservedNamespacePropertyError(
PROP_LOCATION, ctx, "please use the LOCATION clause to specify it")
case (PROP_LOCATION, _) => false
case (PROP_OWNER, _) if !legacyOn =>
throw QueryParsingErrors.cannotCleanReservedNamespacePropertyError(
PROP_OWNER, ctx, "it will be set to the current user")
case (PROP_OWNER, _) => false
case _ => true
}
}
/**
* Create a [[CreateNamespace]] command.
*
* For example:
* {{{
* CREATE NAMESPACE [IF NOT EXISTS] ns1.ns2.ns3
* create_namespace_clauses;
*
* create_namespace_clauses (order insensitive):
* [COMMENT namespace_comment]
* [LOCATION path]
* [WITH PROPERTIES (key1=val1, key2=val2, ...)]
* }}}
*/
override def visitCreateNamespace(ctx: CreateNamespaceContext): LogicalPlan = withOrigin(ctx) {
import SupportsNamespaces._
checkDuplicateClauses(ctx.commentSpec(), "COMMENT", ctx)
checkDuplicateClauses(ctx.locationSpec, "LOCATION", ctx)
checkDuplicateClauses(ctx.PROPERTIES, "WITH PROPERTIES", ctx)
checkDuplicateClauses(ctx.DBPROPERTIES, "WITH DBPROPERTIES", ctx)
if (!ctx.PROPERTIES.isEmpty && !ctx.DBPROPERTIES.isEmpty) {
throw QueryParsingErrors.propertiesAndDbPropertiesBothSpecifiedError(ctx)
}
var properties = ctx.tablePropertyList.asScala.headOption
.map(visitPropertyKeyValues)
.getOrElse(Map.empty)
properties = cleanNamespaceProperties(properties, ctx)
visitCommentSpecList(ctx.commentSpec()).foreach {
properties += PROP_COMMENT -> _
}
visitLocationSpecList(ctx.locationSpec()).foreach {
properties += PROP_LOCATION -> _
}
CreateNamespace(
UnresolvedDBObjectName(
visitMultipartIdentifier(ctx.multipartIdentifier),
isNamespace = true),
ctx.EXISTS != null,
properties)
}
/**
* Create a [[DropNamespace]] command.
*
* For example:
* {{{
* DROP (DATABASE|SCHEMA|NAMESPACE) [IF EXISTS] ns1.ns2 [RESTRICT|CASCADE];
* }}}
*/
override def visitDropNamespace(ctx: DropNamespaceContext): LogicalPlan = withOrigin(ctx) {
DropNamespace(
UnresolvedNamespace(visitMultipartIdentifier(ctx.multipartIdentifier)),
ctx.EXISTS != null,
ctx.CASCADE != null)
}
/**
* Create an [[SetNamespaceProperties]] logical plan.
*
* For example:
* {{{
* ALTER (DATABASE|SCHEMA|NAMESPACE) database
* SET (DBPROPERTIES|PROPERTIES) (property_name=property_value, ...);
* }}}
*/
override def visitSetNamespaceProperties(ctx: SetNamespacePropertiesContext): LogicalPlan = {
withOrigin(ctx) {
val properties = cleanNamespaceProperties(visitPropertyKeyValues(ctx.tablePropertyList), ctx)
SetNamespaceProperties(
UnresolvedNamespace(visitMultipartIdentifier(ctx.multipartIdentifier)),
properties)
}
}
/**
* Create an [[SetNamespaceLocation]] logical plan.
*
* For example:
* {{{
* ALTER (DATABASE|SCHEMA|NAMESPACE) namespace SET LOCATION path;
* }}}
*/
override def visitSetNamespaceLocation(ctx: SetNamespaceLocationContext): LogicalPlan = {
withOrigin(ctx) {
SetNamespaceLocation(
UnresolvedNamespace(visitMultipartIdentifier(ctx.multipartIdentifier)),
visitLocationSpec(ctx.locationSpec))
}
}
/**
* Create a [[ShowNamespaces]] command.
*/
override def visitShowNamespaces(ctx: ShowNamespacesContext): LogicalPlan = withOrigin(ctx) {
if (ctx.DATABASES != null && ctx.multipartIdentifier != null) {
throw QueryParsingErrors.fromOrInNotAllowedInShowDatabasesError(ctx)
}
val multiPart = Option(ctx.multipartIdentifier).map(visitMultipartIdentifier)
ShowNamespaces(
UnresolvedNamespace(multiPart.getOrElse(Seq.empty[String])),
Option(ctx.pattern).map(string))
}
/**
* Create a [[DescribeNamespace]].
*
* For example:
* {{{
* DESCRIBE (DATABASE|SCHEMA|NAMESPACE) [EXTENDED] database;
* }}}
*/
override def visitDescribeNamespace(ctx: DescribeNamespaceContext): LogicalPlan =
withOrigin(ctx) {
DescribeNamespace(
UnresolvedNamespace(visitMultipartIdentifier(ctx.multipartIdentifier())),
ctx.EXTENDED != null)
}
def cleanTableProperties(
ctx: ParserRuleContext, properties: Map[String, String]): Map[String, String] = {
import TableCatalog._
val legacyOn = conf.getConf(SQLConf.LEGACY_PROPERTY_NON_RESERVED)
properties.filter {
case (PROP_PROVIDER, _) if !legacyOn =>
throw QueryParsingErrors.cannotCleanReservedTablePropertyError(
PROP_PROVIDER, ctx, "please use the USING clause to specify it")
case (PROP_PROVIDER, _) => false
case (PROP_LOCATION, _) if !legacyOn =>
throw QueryParsingErrors.cannotCleanReservedTablePropertyError(
PROP_LOCATION, ctx, "please use the LOCATION clause to specify it")
case (PROP_LOCATION, _) => false
case (PROP_OWNER, _) if !legacyOn =>
throw QueryParsingErrors.cannotCleanReservedTablePropertyError(
PROP_OWNER, ctx, "it will be set to the current user")
case (PROP_OWNER, _) => false
case _ => true
}
}
def cleanTableOptions(
ctx: ParserRuleContext,
options: Map[String, String],
location: Option[String]): (Map[String, String], Option[String]) = {
var path = location
val filtered = cleanTableProperties(ctx, options).filter {
case (k, v) if k.equalsIgnoreCase("path") && path.nonEmpty =>
throw QueryParsingErrors.duplicatedTablePathsFoundError(path.get, v, ctx)
case (k, v) if k.equalsIgnoreCase("path") =>
path = Some(v)
false
case _ => true
}
(filtered, path)
}
/**
* Create a [[SerdeInfo]] for creating tables.
*
* Format: STORED AS (name | INPUTFORMAT input_format OUTPUTFORMAT output_format)
*/
override def visitCreateFileFormat(ctx: CreateFileFormatContext): SerdeInfo = withOrigin(ctx) {
(ctx.fileFormat, ctx.storageHandler) match {
// Expected format: INPUTFORMAT input_format OUTPUTFORMAT output_format
case (c: TableFileFormatContext, null) =>
SerdeInfo(formatClasses = Some(FormatClasses(string(c.inFmt), string(c.outFmt))))
// Expected format: SEQUENCEFILE | TEXTFILE | RCFILE | ORC | PARQUET | AVRO
case (c: GenericFileFormatContext, null) =>
SerdeInfo(storedAs = Some(c.identifier.getText))
case (null, storageHandler) =>
operationNotAllowed("STORED BY", ctx)
case _ =>
throw QueryParsingErrors.storedAsAndStoredByBothSpecifiedError(ctx)
}
}
/**
* Create a [[SerdeInfo]] used for creating tables.
*
* Example format:
* {{{
* SERDE serde_name [WITH SERDEPROPERTIES (k1=v1, k2=v2, ...)]
* }}}
*
* OR
*
* {{{
* DELIMITED [FIELDS TERMINATED BY char [ESCAPED BY char]]
* [COLLECTION ITEMS TERMINATED BY char]
* [MAP KEYS TERMINATED BY char]
* [LINES TERMINATED BY char]
* [NULL DEFINED AS char]
* }}}
*/
def visitRowFormat(ctx: RowFormatContext): SerdeInfo = withOrigin(ctx) {
ctx match {
case serde: RowFormatSerdeContext => visitRowFormatSerde(serde)
case delimited: RowFormatDelimitedContext => visitRowFormatDelimited(delimited)
}
}
/**
* Create SERDE row format name and properties pair.
*/
override def visitRowFormatSerde(ctx: RowFormatSerdeContext): SerdeInfo = withOrigin(ctx) {
import ctx._
SerdeInfo(
serde = Some(string(name)),
serdeProperties = Option(tablePropertyList).map(visitPropertyKeyValues).getOrElse(Map.empty))
}
/**
* Create a delimited row format properties object.
*/
override def visitRowFormatDelimited(
ctx: RowFormatDelimitedContext): SerdeInfo = withOrigin(ctx) {
// Collect the entries if any.
def entry(key: String, value: Token): Seq[(String, String)] = {
Option(value).toSeq.map(x => key -> string(x))
}
// TODO we need proper support for the NULL format.
val entries =
entry("field.delim", ctx.fieldsTerminatedBy) ++
entry("serialization.format", ctx.fieldsTerminatedBy) ++
entry("escape.delim", ctx.escapedBy) ++
// The following typo is inherited from Hive...
entry("colelction.delim", ctx.collectionItemsTerminatedBy) ++
entry("mapkey.delim", ctx.keysTerminatedBy) ++
Option(ctx.linesSeparatedBy).toSeq.map { token =>
val value = string(token)
validate(
value == "\\n",
s"LINES TERMINATED BY only supports newline '\\\\n' right now: $value",
ctx)
"line.delim" -> value
}
SerdeInfo(serdeProperties = entries.toMap)
}
/**
* Throw a [[ParseException]] if the user specified incompatible SerDes through ROW FORMAT
* and STORED AS.
*
* The following are allowed. Anything else is not:
* ROW FORMAT SERDE ... STORED AS [SEQUENCEFILE | RCFILE | TEXTFILE]
* ROW FORMAT DELIMITED ... STORED AS TEXTFILE
* ROW FORMAT ... STORED AS INPUTFORMAT ... OUTPUTFORMAT ...
*/
protected def validateRowFormatFileFormat(
rowFormatCtx: RowFormatContext,
createFileFormatCtx: CreateFileFormatContext,
parentCtx: ParserRuleContext): Unit = {
if (rowFormatCtx == null || createFileFormatCtx == null) {
return
}
(rowFormatCtx, createFileFormatCtx.fileFormat) match {
case (_, ffTable: TableFileFormatContext) => // OK
case (rfSerde: RowFormatSerdeContext, ffGeneric: GenericFileFormatContext) =>
ffGeneric.identifier.getText.toLowerCase(Locale.ROOT) match {
case ("sequencefile" | "textfile" | "rcfile") => // OK
case fmt =>
operationNotAllowed(
s"ROW FORMAT SERDE is incompatible with format '$fmt', which also specifies a serde",
parentCtx)
}
case (rfDelimited: RowFormatDelimitedContext, ffGeneric: GenericFileFormatContext) =>
ffGeneric.identifier.getText.toLowerCase(Locale.ROOT) match {
case "textfile" => // OK
case fmt => operationNotAllowed(
s"ROW FORMAT DELIMITED is only compatible with 'textfile', not '$fmt'", parentCtx)
}
case _ =>
// should never happen
def str(ctx: ParserRuleContext): String = {
(0 until ctx.getChildCount).map { i => ctx.getChild(i).getText }.mkString(" ")
}
operationNotAllowed(
s"Unexpected combination of ${str(rowFormatCtx)} and ${str(createFileFormatCtx)}",
parentCtx)
}
}
protected def validateRowFormatFileFormat(
rowFormatCtx: Seq[RowFormatContext],
createFileFormatCtx: Seq[CreateFileFormatContext],
parentCtx: ParserRuleContext): Unit = {
if (rowFormatCtx.size == 1 && createFileFormatCtx.size == 1) {
validateRowFormatFileFormat(rowFormatCtx.head, createFileFormatCtx.head, parentCtx)
}
}
override def visitCreateTableClauses(ctx: CreateTableClausesContext): TableClauses = {
checkDuplicateClauses(ctx.TBLPROPERTIES, "TBLPROPERTIES", ctx)
checkDuplicateClauses(ctx.OPTIONS, "OPTIONS", ctx)
checkDuplicateClauses(ctx.PARTITIONED, "PARTITIONED BY", ctx)
checkDuplicateClauses(ctx.createFileFormat, "STORED AS/BY", ctx)
checkDuplicateClauses(ctx.rowFormat, "ROW FORMAT", ctx)
checkDuplicateClauses(ctx.commentSpec(), "COMMENT", ctx)
checkDuplicateClauses(ctx.bucketSpec(), "CLUSTERED BY", ctx)
checkDuplicateClauses(ctx.locationSpec, "LOCATION", ctx)
if (ctx.skewSpec.size > 0) {
operationNotAllowed("CREATE TABLE ... SKEWED BY", ctx)
}
val (partTransforms, partCols) =
Option(ctx.partitioning).map(visitPartitionFieldList).getOrElse((Nil, Nil))
val bucketSpec = ctx.bucketSpec().asScala.headOption.map(visitBucketSpec)
val properties = Option(ctx.tableProps).map(visitPropertyKeyValues).getOrElse(Map.empty)
val cleanedProperties = cleanTableProperties(ctx, properties)
val options = Option(ctx.options).map(visitPropertyKeyValues).getOrElse(Map.empty)
val location = visitLocationSpecList(ctx.locationSpec())
val (cleanedOptions, newLocation) = cleanTableOptions(ctx, options, location)
val comment = visitCommentSpecList(ctx.commentSpec())
val serdeInfo =
getSerdeInfo(ctx.rowFormat.asScala.toSeq, ctx.createFileFormat.asScala.toSeq, ctx)
(partTransforms, partCols, bucketSpec, cleanedProperties, cleanedOptions, newLocation, comment,
serdeInfo)
}
protected def getSerdeInfo(
rowFormatCtx: Seq[RowFormatContext],
createFileFormatCtx: Seq[CreateFileFormatContext],
ctx: ParserRuleContext): Option[SerdeInfo] = {
validateRowFormatFileFormat(rowFormatCtx, createFileFormatCtx, ctx)
val rowFormatSerdeInfo = rowFormatCtx.map(visitRowFormat)
val fileFormatSerdeInfo = createFileFormatCtx.map(visitCreateFileFormat)
(fileFormatSerdeInfo ++ rowFormatSerdeInfo).reduceLeftOption((l, r) => l.merge(r))
}
private def partitionExpressions(
partTransforms: Seq[Transform],
partCols: Seq[StructField],
ctx: ParserRuleContext): Seq[Transform] = {
if (partTransforms.nonEmpty) {
if (partCols.nonEmpty) {
val references = partTransforms.map(_.describe()).mkString(", ")
val columns = partCols
.map(field => s"${field.name} ${field.dataType.simpleString}")
.mkString(", ")
operationNotAllowed(
s"""PARTITION BY: Cannot mix partition expressions and partition columns:
|Expressions: $references
|Columns: $columns""".stripMargin, ctx)
}
partTransforms
} else {
// columns were added to create the schema. convert to column references
partCols.map { column =>
IdentityTransform(FieldReference(Seq(column.name)))
}
}
}
/**
* Create a table, returning a [[CreateTableStatement]] logical plan.
*
* Expected format:
* {{{
* CREATE [TEMPORARY] TABLE [IF NOT EXISTS] [db_name.]table_name
* [USING table_provider]
* create_table_clauses
* [[AS] select_statement];
*
* create_table_clauses (order insensitive):
* [PARTITIONED BY (partition_fields)]
* [OPTIONS table_property_list]
* [ROW FORMAT row_format]
* [STORED AS file_format]
* [CLUSTERED BY (col_name, col_name, ...)
* [SORTED BY (col_name [ASC|DESC], ...)]
* INTO num_buckets BUCKETS
* ]
* [LOCATION path]
* [COMMENT table_comment]
* [TBLPROPERTIES (property_name=property_value, ...)]
*
* partition_fields:
* col_name, transform(col_name), transform(constant, col_name), ... |
* col_name data_type [NOT NULL] [COMMENT col_comment], ...
* }}}
*/
override def visitCreateTable(ctx: CreateTableContext): LogicalPlan = withOrigin(ctx) {
val (table, temp, ifNotExists, external) = visitCreateTableHeader(ctx.createTableHeader)
val columns = Option(ctx.colTypeList()).map(visitColTypeList).getOrElse(Nil)
val provider = Option(ctx.tableProvider).map(_.multipartIdentifier.getText)
val (partTransforms, partCols, bucketSpec, properties, options, location, comment, serdeInfo) =
visitCreateTableClauses(ctx.createTableClauses())
if (provider.isDefined && serdeInfo.isDefined) {
operationNotAllowed(s"CREATE TABLE ... USING ... ${serdeInfo.get.describe}", ctx)
}
if (temp) {
val asSelect = if (ctx.query == null) "" else " AS ..."
operationNotAllowed(
s"CREATE TEMPORARY TABLE ...$asSelect, use CREATE TEMPORARY VIEW instead", ctx)
}
val partitioning = partitionExpressions(partTransforms, partCols, ctx)
Option(ctx.query).map(plan) match {
case Some(_) if columns.nonEmpty =>
operationNotAllowed(
"Schema may not be specified in a Create Table As Select (CTAS) statement",
ctx)
case Some(_) if partCols.nonEmpty =>
// non-reference partition columns are not allowed because schema can't be specified
operationNotAllowed(
"Partition column types may not be specified in Create Table As Select (CTAS)",
ctx)
case Some(query) =>
CreateTableAsSelectStatement(
table, query, partitioning, bucketSpec, properties, provider, options, location, comment,
writeOptions = Map.empty, serdeInfo, external = external, ifNotExists = ifNotExists)
case _ =>
// Note: table schema includes both the table columns list and the partition columns
// with data type.
val schema = StructType(columns ++ partCols)
CreateTableStatement(table, schema, partitioning, bucketSpec, properties, provider,
options, location, comment, serdeInfo, external = external, ifNotExists = ifNotExists)
}
}
/**
* Replace a table, returning a [[ReplaceTableStatement]] logical plan.
*
* Expected format:
* {{{
* [CREATE OR] REPLACE TABLE [db_name.]table_name
* [USING table_provider]
* replace_table_clauses
* [[AS] select_statement];
*
* replace_table_clauses (order insensitive):
* [OPTIONS table_property_list]
* [PARTITIONED BY (partition_fields)]
* [CLUSTERED BY (col_name, col_name, ...)
* [SORTED BY (col_name [ASC|DESC], ...)]
* INTO num_buckets BUCKETS
* ]
* [LOCATION path]
* [COMMENT table_comment]
* [TBLPROPERTIES (property_name=property_value, ...)]
*
* partition_fields:
* col_name, transform(col_name), transform(constant, col_name), ... |
* col_name data_type [NOT NULL] [COMMENT col_comment], ...
* }}}
*/
override def visitReplaceTable(ctx: ReplaceTableContext): LogicalPlan = withOrigin(ctx) {
val (table, temp, ifNotExists, external) = visitReplaceTableHeader(ctx.replaceTableHeader)
val orCreate = ctx.replaceTableHeader().CREATE() != null
if (temp) {
val action = if (orCreate) "CREATE OR REPLACE" else "REPLACE"
operationNotAllowed(s"$action TEMPORARY TABLE ..., use $action TEMPORARY VIEW instead.", ctx)
}
if (external) {
operationNotAllowed("REPLACE EXTERNAL TABLE ...", ctx)
}
if (ifNotExists) {
operationNotAllowed("REPLACE ... IF NOT EXISTS, use CREATE IF NOT EXISTS instead", ctx)
}
val (partTransforms, partCols, bucketSpec, properties, options, location, comment, serdeInfo) =
visitCreateTableClauses(ctx.createTableClauses())
val columns = Option(ctx.colTypeList()).map(visitColTypeList).getOrElse(Nil)
val provider = Option(ctx.tableProvider).map(_.multipartIdentifier.getText)
if (provider.isDefined && serdeInfo.isDefined) {
operationNotAllowed(s"CREATE TABLE ... USING ... ${serdeInfo.get.describe}", ctx)
}
val partitioning = partitionExpressions(partTransforms, partCols, ctx)
Option(ctx.query).map(plan) match {
case Some(_) if columns.nonEmpty =>
operationNotAllowed(
"Schema may not be specified in a Replace Table As Select (RTAS) statement",
ctx)
case Some(_) if partCols.nonEmpty =>
// non-reference partition columns are not allowed because schema can't be specified
operationNotAllowed(
"Partition column types may not be specified in Replace Table As Select (RTAS)",
ctx)
case Some(query) =>
ReplaceTableAsSelectStatement(table, query, partitioning, bucketSpec, properties,
provider, options, location, comment, writeOptions = Map.empty, serdeInfo,
orCreate = orCreate)
case _ =>
// Note: table schema includes both the table columns list and the partition columns
// with data type.
val schema = StructType(columns ++ partCols)
ReplaceTableStatement(table, schema, partitioning, bucketSpec, properties, provider,
options, location, comment, serdeInfo, orCreate = orCreate)
}
}
/**
* Create a [[DropTable]] command.
*/
override def visitDropTable(ctx: DropTableContext): LogicalPlan = withOrigin(ctx) {
// DROP TABLE works with either a table or a temporary view.
DropTable(
createUnresolvedTableOrView(ctx.multipartIdentifier(), "DROP TABLE"),
ctx.EXISTS != null,
ctx.PURGE != null)
}
/**
* Create a [[DropView]] command.
*/
override def visitDropView(ctx: DropViewContext): AnyRef = withOrigin(ctx) {
DropView(
createUnresolvedView(
ctx.multipartIdentifier(),
commandName = "DROP VIEW",
allowTemp = true,
relationTypeMismatchHint = Some("Please use DROP TABLE instead.")),
ctx.EXISTS != null)
}
/**
* Create a [[UseStatement]] logical plan.
*/
override def visitUse(ctx: UseContext): LogicalPlan = withOrigin(ctx) {
val nameParts = visitMultipartIdentifier(ctx.multipartIdentifier)
UseStatement(ctx.NAMESPACE != null, nameParts)
}
/**
* Create a [[ShowCurrentNamespaceStatement]].
*/
override def visitShowCurrentNamespace(
ctx: ShowCurrentNamespaceContext) : LogicalPlan = withOrigin(ctx) {
ShowCurrentNamespaceStatement()
}
/**
* Create a [[ShowTables]] command.
*/
override def visitShowTables(ctx: ShowTablesContext): LogicalPlan = withOrigin(ctx) {
val multiPart = Option(ctx.multipartIdentifier).map(visitMultipartIdentifier)
ShowTables(
UnresolvedNamespace(multiPart.getOrElse(Seq.empty[String])),
Option(ctx.pattern).map(string))
}
/**
* Create a [[ShowTableExtended]] command.
*/
override def visitShowTableExtended(
ctx: ShowTableExtendedContext): LogicalPlan = withOrigin(ctx) {
val multiPart = Option(ctx.multipartIdentifier).map(visitMultipartIdentifier)
val partitionKeys = Option(ctx.partitionSpec).map { specCtx =>
UnresolvedPartitionSpec(visitNonOptionalPartitionSpec(specCtx), None)
}
ShowTableExtended(
UnresolvedNamespace(multiPart.getOrElse(Seq.empty[String])),
string(ctx.pattern),
partitionKeys)
}
/**
* Create a [[ShowViews]] command.
*/
override def visitShowViews(ctx: ShowViewsContext): LogicalPlan = withOrigin(ctx) {
val multiPart = Option(ctx.multipartIdentifier).map(visitMultipartIdentifier)
ShowViews(
UnresolvedNamespace(multiPart.getOrElse(Seq.empty[String])),
Option(ctx.pattern).map(string))
}
override def visitColPosition(ctx: ColPositionContext): ColumnPosition = {
ctx.position.getType match {
case SqlBaseParser.FIRST => ColumnPosition.first()
case SqlBaseParser.AFTER => ColumnPosition.after(ctx.afterCol.getText)
}
}
/**
* Parse new column info from ADD COLUMN into a QualifiedColType.
*/
override def visitQualifiedColTypeWithPosition(
ctx: QualifiedColTypeWithPositionContext): QualifiedColType = withOrigin(ctx) {
val name = typedVisit[Seq[String]](ctx.name)
QualifiedColType(
path = if (name.length > 1) Some(UnresolvedFieldName(name.init)) else None,
colName = name.last,
dataType = typedVisit[DataType](ctx.dataType),
nullable = ctx.NULL == null,
comment = Option(ctx.commentSpec()).map(visitCommentSpec),
position = Option(ctx.colPosition).map( pos =>
UnresolvedFieldPosition(typedVisit[ColumnPosition](pos))))
}
/**
* Parse a [[AlterTableAddColumns]] command.
*
* For example:
* {{{
* ALTER TABLE table1
* ADD COLUMNS (col_name data_type [COMMENT col_comment], ...);
* }}}
*/
override def visitAddTableColumns(ctx: AddTableColumnsContext): LogicalPlan = withOrigin(ctx) {
val colToken = if (ctx.COLUMN() != null) "COLUMN" else "COLUMNS"
AddColumns(
createUnresolvedTable(ctx.multipartIdentifier, s"ALTER TABLE ... ADD $colToken"),
ctx.columns.qualifiedColTypeWithPosition.asScala.map(typedVisit[QualifiedColType]).toSeq
)
}
/**
* Parse a [[AlterTableRenameColumn]] command.
*
* For example:
* {{{
* ALTER TABLE table1 RENAME COLUMN a.b.c TO x
* }}}
*/
override def visitRenameTableColumn(
ctx: RenameTableColumnContext): LogicalPlan = withOrigin(ctx) {
RenameColumn(
createUnresolvedTable(ctx.table, "ALTER TABLE ... RENAME COLUMN"),
UnresolvedFieldName(typedVisit[Seq[String]](ctx.from)),
ctx.to.getText)
}
/**
* Parse a [[AlterTableAlterColumn]] command to alter a column's property.
*
* For example:
* {{{
* ALTER TABLE table1 ALTER COLUMN a.b.c TYPE bigint
* ALTER TABLE table1 ALTER COLUMN a.b.c SET NOT NULL
* ALTER TABLE table1 ALTER COLUMN a.b.c DROP NOT NULL
* ALTER TABLE table1 ALTER COLUMN a.b.c COMMENT 'new comment'
* ALTER TABLE table1 ALTER COLUMN a.b.c FIRST
* ALTER TABLE table1 ALTER COLUMN a.b.c AFTER x
* }}}
*/
override def visitAlterTableAlterColumn(
ctx: AlterTableAlterColumnContext): LogicalPlan = withOrigin(ctx) {
val action = ctx.alterColumnAction
val verb = if (ctx.CHANGE != null) "CHANGE" else "ALTER"
if (action == null) {
operationNotAllowed(
s"ALTER TABLE table $verb COLUMN requires a TYPE, a SET/DROP, a COMMENT, or a FIRST/AFTER",
ctx)
}
val dataType = if (action.dataType != null) {
Some(typedVisit[DataType](action.dataType))
} else {
None
}
val nullable = if (action.setOrDrop != null) {
action.setOrDrop.getType match {
case SqlBaseParser.SET => Some(false)
case SqlBaseParser.DROP => Some(true)
}
} else {
None
}
val comment = if (action.commentSpec != null) {
Some(visitCommentSpec(action.commentSpec()))
} else {
None
}
val position = if (action.colPosition != null) {
Some(UnresolvedFieldPosition(typedVisit[ColumnPosition](action.colPosition)))
} else {
None
}
assert(Seq(dataType, nullable, comment, position).count(_.nonEmpty) == 1)
AlterColumn(
createUnresolvedTable(ctx.table, s"ALTER TABLE ... $verb COLUMN"),
UnresolvedFieldName(typedVisit[Seq[String]](ctx.column)),
dataType = dataType,
nullable = nullable,
comment = comment,
position = position)
}
/**
* Parse a [[AlterTableAlterColumn]] command. This is Hive SQL syntax.
*
* For example:
* {{{
* ALTER TABLE table [PARTITION partition_spec]
* CHANGE [COLUMN] column_old_name column_new_name column_dataType [COMMENT column_comment]
* [FIRST | AFTER column_name];
* }}}
*/
override def visitHiveChangeColumn(ctx: HiveChangeColumnContext): LogicalPlan = withOrigin(ctx) {
if (ctx.partitionSpec != null) {
operationNotAllowed("ALTER TABLE table PARTITION partition_spec CHANGE COLUMN", ctx)
}
val columnNameParts = typedVisit[Seq[String]](ctx.colName)
if (!conf.resolver(columnNameParts.last, ctx.colType().colName.getText)) {
throw QueryParsingErrors.operationInHiveStyleCommandUnsupportedError("Renaming column",
"ALTER COLUMN", ctx, Some("please run RENAME COLUMN instead"))
}
if (ctx.colType.NULL != null) {
throw QueryParsingErrors.operationInHiveStyleCommandUnsupportedError(
"NOT NULL", "ALTER COLUMN", ctx,
Some("please run ALTER COLUMN ... SET/DROP NOT NULL instead"))
}
AlterColumn(
createUnresolvedTable(ctx.table, s"ALTER TABLE ... CHANGE COLUMN"),
UnresolvedFieldName(columnNameParts),
dataType = Option(ctx.colType().dataType()).map(typedVisit[DataType]),
nullable = None,
comment = Option(ctx.colType().commentSpec()).map(visitCommentSpec),
position = Option(ctx.colPosition).map(
pos => UnresolvedFieldPosition(typedVisit[ColumnPosition](pos))))
}
override def visitHiveReplaceColumns(
ctx: HiveReplaceColumnsContext): LogicalPlan = withOrigin(ctx) {
if (ctx.partitionSpec != null) {
operationNotAllowed("ALTER TABLE table PARTITION partition_spec REPLACE COLUMNS", ctx)
}
ReplaceColumns(
createUnresolvedTable(ctx.multipartIdentifier, "ALTER TABLE ... REPLACE COLUMNS"),
ctx.columns.qualifiedColTypeWithPosition.asScala.map { colType =>
if (colType.NULL != null) {
throw QueryParsingErrors.operationInHiveStyleCommandUnsupportedError(
"NOT NULL", "REPLACE COLUMNS", ctx)
}
if (colType.colPosition != null) {
throw QueryParsingErrors.operationInHiveStyleCommandUnsupportedError(
"Column position", "REPLACE COLUMNS", ctx)
}
val col = typedVisit[QualifiedColType](colType)
if (col.path.isDefined) {
throw QueryParsingErrors.operationInHiveStyleCommandUnsupportedError(
"Replacing with a nested column", "REPLACE COLUMNS", ctx)
}
col
}.toSeq
)
}
/**
* Parse a [[AlterTableDropColumns]] command.
*
* For example:
* {{{
* ALTER TABLE table1 DROP COLUMN a.b.c
* ALTER TABLE table1 DROP COLUMNS a.b.c, x, y
* }}}
*/
override def visitDropTableColumns(
ctx: DropTableColumnsContext): LogicalPlan = withOrigin(ctx) {
val columnsToDrop = ctx.columns.multipartIdentifier.asScala.map(typedVisit[Seq[String]])
DropColumns(
createUnresolvedTable(
ctx.multipartIdentifier,
"ALTER TABLE ... DROP COLUMNS"),
columnsToDrop.map(UnresolvedFieldName(_)).toSeq)
}
/**
* Parse [[SetViewProperties]] or [[SetTableProperties]] commands.
*
* For example:
* {{{
* ALTER TABLE table SET TBLPROPERTIES ('table_property' = 'property_value');
* ALTER VIEW view SET TBLPROPERTIES ('table_property' = 'property_value');
* }}}
*/
override def visitSetTableProperties(
ctx: SetTablePropertiesContext): LogicalPlan = withOrigin(ctx) {
val properties = visitPropertyKeyValues(ctx.tablePropertyList)
val cleanedTableProperties = cleanTableProperties(ctx, properties)
if (ctx.VIEW != null) {
SetViewProperties(
createUnresolvedView(
ctx.multipartIdentifier,
commandName = "ALTER VIEW ... SET TBLPROPERTIES",
allowTemp = false,
relationTypeMismatchHint = alterViewTypeMismatchHint),
cleanedTableProperties)
} else {
SetTableProperties(
createUnresolvedTable(
ctx.multipartIdentifier,
"ALTER TABLE ... SET TBLPROPERTIES",
alterTableTypeMismatchHint),
cleanedTableProperties)
}
}
/**
* Parse [[UnsetViewProperties]] or [[UnsetTableProperties]] commands.
*
* For example:
* {{{
* ALTER TABLE table UNSET TBLPROPERTIES [IF EXISTS] ('comment', 'key');
* ALTER VIEW view UNSET TBLPROPERTIES [IF EXISTS] ('comment', 'key');
* }}}
*/
override def visitUnsetTableProperties(
ctx: UnsetTablePropertiesContext): LogicalPlan = withOrigin(ctx) {
val properties = visitPropertyKeys(ctx.tablePropertyList)
val cleanedProperties = cleanTableProperties(ctx, properties.map(_ -> "").toMap).keys.toSeq
val ifExists = ctx.EXISTS != null
if (ctx.VIEW != null) {
UnsetViewProperties(
createUnresolvedView(
ctx.multipartIdentifier,
commandName = "ALTER VIEW ... UNSET TBLPROPERTIES",
allowTemp = false,
relationTypeMismatchHint = alterViewTypeMismatchHint),
cleanedProperties,
ifExists)
} else {
UnsetTableProperties(
createUnresolvedTable(
ctx.multipartIdentifier,
"ALTER TABLE ... UNSET TBLPROPERTIES",
alterTableTypeMismatchHint),
cleanedProperties,
ifExists)
}
}
/**
* Create an [[SetTableLocation]] command.
*
* For example:
* {{{
* ALTER TABLE table_name [PARTITION partition_spec] SET LOCATION "loc";
* }}}
*/
override def visitSetTableLocation(ctx: SetTableLocationContext): LogicalPlan = withOrigin(ctx) {
SetTableLocation(
createUnresolvedTable(
ctx.multipartIdentifier,
"ALTER TABLE ... SET LOCATION ...",
alterTableTypeMismatchHint),
Option(ctx.partitionSpec).map(visitNonOptionalPartitionSpec),
visitLocationSpec(ctx.locationSpec))
}
/**
* Create a [[DescribeColumn]] or [[DescribeRelation]] commands.
*/
override def visitDescribeRelation(ctx: DescribeRelationContext): LogicalPlan = withOrigin(ctx) {
val isExtended = ctx.EXTENDED != null || ctx.FORMATTED != null
val relation = createUnresolvedTableOrView(
ctx.multipartIdentifier(),
"DESCRIBE TABLE")
if (ctx.describeColName != null) {
if (ctx.partitionSpec != null) {
throw QueryParsingErrors.descColumnForPartitionUnsupportedError(ctx)
} else {
DescribeColumn(
relation,
UnresolvedAttribute(ctx.describeColName.nameParts.asScala.map(_.getText).toSeq),
isExtended)
}
} else {
val partitionSpec = if (ctx.partitionSpec != null) {
// According to the syntax, visitPartitionSpec returns `Map[String, Option[String]]`.
visitPartitionSpec(ctx.partitionSpec).map {
case (key, Some(value)) => key -> value
case (key, _) =>
throw QueryParsingErrors.incompletePartitionSpecificationError(key, ctx)
}
} else {
Map.empty[String, String]
}
DescribeRelation(relation, partitionSpec, isExtended)
}
}
/**
* Create an [[AnalyzeTable]], or an [[AnalyzeColumn]].
* Example SQL for analyzing a table or a set of partitions :
* {{{
* ANALYZE TABLE multi_part_name [PARTITION (partcol1[=val1], partcol2[=val2], ...)]
* COMPUTE STATISTICS [NOSCAN];
* }}}
*
* Example SQL for analyzing columns :
* {{{
* ANALYZE TABLE multi_part_name COMPUTE STATISTICS FOR COLUMNS column1, column2;
* }}}
*
* Example SQL for analyzing all columns of a table:
* {{{
* ANALYZE TABLE multi_part_name COMPUTE STATISTICS FOR ALL COLUMNS;
* }}}
*/
override def visitAnalyze(ctx: AnalyzeContext): LogicalPlan = withOrigin(ctx) {
def checkPartitionSpec(): Unit = {
if (ctx.partitionSpec != null) {
logWarning("Partition specification is ignored when collecting column statistics: " +
ctx.partitionSpec.getText)
}
}
if (ctx.identifier != null &&
ctx.identifier.getText.toLowerCase(Locale.ROOT) != "noscan") {
throw QueryParsingErrors.computeStatisticsNotExpectedError(ctx.identifier())
}
if (ctx.ALL() != null) {
checkPartitionSpec()
AnalyzeColumn(
createUnresolvedTableOrView(
ctx.multipartIdentifier(),
"ANALYZE TABLE ... FOR ALL COLUMNS"),
None,
allColumns = true)
} else if (ctx.identifierSeq() == null) {
val partitionSpec = if (ctx.partitionSpec != null) {
visitPartitionSpec(ctx.partitionSpec)
} else {
Map.empty[String, Option[String]]
}
AnalyzeTable(
createUnresolvedTableOrView(
ctx.multipartIdentifier(),
"ANALYZE TABLE",
allowTempView = false),
partitionSpec,
noScan = ctx.identifier != null)
} else {
checkPartitionSpec()
AnalyzeColumn(
createUnresolvedTableOrView(
ctx.multipartIdentifier(),
"ANALYZE TABLE ... FOR COLUMNS ..."),
Option(visitIdentifierSeq(ctx.identifierSeq())),
allColumns = false)
}
}
/**
* Create an [[AnalyzeTables]].
* Example SQL for analyzing all tables in default database:
* {{{
* ANALYZE TABLES IN default COMPUTE STATISTICS;
* }}}
*/
override def visitAnalyzeTables(ctx: AnalyzeTablesContext): LogicalPlan = withOrigin(ctx) {
if (ctx.identifier != null &&
ctx.identifier.getText.toLowerCase(Locale.ROOT) != "noscan") {
throw QueryParsingErrors.computeStatisticsNotExpectedError(ctx.identifier())
}
val multiPart = Option(ctx.multipartIdentifier).map(visitMultipartIdentifier)
AnalyzeTables(
UnresolvedNamespace(multiPart.getOrElse(Seq.empty[String])),
noScan = ctx.identifier != null)
}
/**
* Create a [[RepairTable]].
*
* For example:
* {{{
* MSCK REPAIR TABLE multi_part_name [{ADD|DROP|SYNC} PARTITIONS]
* }}}
*/
override def visitRepairTable(ctx: RepairTableContext): LogicalPlan = withOrigin(ctx) {
val (enableAddPartitions, enableDropPartitions, option) =
if (ctx.SYNC() != null) {
(true, true, " ... SYNC PARTITIONS")
} else if (ctx.DROP() != null) {
(false, true, " ... DROP PARTITIONS")
} else if (ctx.ADD() != null) {
(true, false, " ... ADD PARTITIONS")
} else {
(true, false, "")
}
RepairTable(
createUnresolvedTable(ctx.multipartIdentifier, s"MSCK REPAIR TABLE$option"),
enableAddPartitions,
enableDropPartitions)
}
/**
* Create a [[LoadData]].
*
* For example:
* {{{
* LOAD DATA [LOCAL] INPATH 'filepath' [OVERWRITE] INTO TABLE multi_part_name
* [PARTITION (partcol1=val1, partcol2=val2 ...)]
* }}}
*/
override def visitLoadData(ctx: LoadDataContext): LogicalPlan = withOrigin(ctx) {
LoadData(
child = createUnresolvedTable(ctx.multipartIdentifier, "LOAD DATA"),
path = string(ctx.path),
isLocal = ctx.LOCAL != null,
isOverwrite = ctx.OVERWRITE != null,
partition = Option(ctx.partitionSpec).map(visitNonOptionalPartitionSpec)
)
}
/**
* Creates a [[ShowCreateTable]]
*/
override def visitShowCreateTable(ctx: ShowCreateTableContext): LogicalPlan = withOrigin(ctx) {
ShowCreateTable(
createUnresolvedTableOrView(
ctx.multipartIdentifier(),
"SHOW CREATE TABLE",
allowTempView = false),
ctx.SERDE != null)
}
/**
* Create a [[CacheTable]] or [[CacheTableAsSelect]].
*
* For example:
* {{{
* CACHE [LAZY] TABLE multi_part_name
* [OPTIONS tablePropertyList] [[AS] query]
* }}}
*/
override def visitCacheTable(ctx: CacheTableContext): LogicalPlan = withOrigin(ctx) {
import org.apache.spark.sql.connector.catalog.CatalogV2Implicits._
val query = Option(ctx.query).map(plan)
val relation = createUnresolvedRelation(ctx.multipartIdentifier)
val tableName = relation.multipartIdentifier
if (query.isDefined && tableName.length > 1) {
val catalogAndNamespace = tableName.init
throw QueryParsingErrors.addCatalogInCacheTableAsSelectNotAllowedError(
catalogAndNamespace.quoted, ctx)
}
val options = Option(ctx.options).map(visitPropertyKeyValues).getOrElse(Map.empty)
val isLazy = ctx.LAZY != null
if (query.isDefined) {
CacheTableAsSelect(tableName.head, query.get, source(ctx.query()), isLazy, options)
} else {
CacheTable(relation, tableName, isLazy, options)
}
}
/**
* Create an [[UncacheTable]] logical plan.
*/
override def visitUncacheTable(ctx: UncacheTableContext): LogicalPlan = withOrigin(ctx) {
UncacheTable(
createUnresolvedRelation(ctx.multipartIdentifier),
ctx.EXISTS != null)
}
/**
* Create a [[TruncateTable]] command.
*
* For example:
* {{{
* TRUNCATE TABLE multi_part_name [PARTITION (partcol1=val1, partcol2=val2 ...)]
* }}}
*/
override def visitTruncateTable(ctx: TruncateTableContext): LogicalPlan = withOrigin(ctx) {
val table = createUnresolvedTable(ctx.multipartIdentifier, "TRUNCATE TABLE")
Option(ctx.partitionSpec).map { spec =>
TruncatePartition(table, UnresolvedPartitionSpec(visitNonOptionalPartitionSpec(spec)))
}.getOrElse(TruncateTable(table))
}
/**
* A command for users to list the partition names of a table. If partition spec is specified,
* partitions that match the spec are returned. Otherwise an empty result set is returned.
*
* This function creates a [[ShowPartitionsStatement]] logical plan
*
* The syntax of using this command in SQL is:
* {{{
* SHOW PARTITIONS multi_part_name [partition_spec];
* }}}
*/
override def visitShowPartitions(ctx: ShowPartitionsContext): LogicalPlan = withOrigin(ctx) {
val partitionKeys = Option(ctx.partitionSpec).map { specCtx =>
UnresolvedPartitionSpec(visitNonOptionalPartitionSpec(specCtx), None)
}
ShowPartitions(
createUnresolvedTable(ctx.multipartIdentifier(), "SHOW PARTITIONS"),
partitionKeys)
}
/**
* Create a [[RefreshTable]].
*
* For example:
* {{{
* REFRESH TABLE multi_part_name
* }}}
*/
override def visitRefreshTable(ctx: RefreshTableContext): LogicalPlan = withOrigin(ctx) {
RefreshTable(
createUnresolvedTableOrView(
ctx.multipartIdentifier(),
"REFRESH TABLE"))
}
/**
* A command for users to list the column names for a table.
* This function creates a [[ShowColumns]] logical plan.
*
* The syntax of using this command in SQL is:
* {{{
* SHOW COLUMNS (FROM | IN) tableName=multipartIdentifier
* ((FROM | IN) namespace=multipartIdentifier)?
* }}}
*/
override def visitShowColumns(ctx: ShowColumnsContext): LogicalPlan = withOrigin(ctx) {
val table = createUnresolvedTableOrView(ctx.table, "SHOW COLUMNS")
val namespace = Option(ctx.ns).map(visitMultipartIdentifier)
// Use namespace only if table name doesn't specify it. If namespace is already specified
// in the table name, it's checked against the given namespace after table/view is resolved.
val tableWithNamespace = if (namespace.isDefined && table.multipartIdentifier.length == 1) {
CurrentOrigin.withOrigin(table.origin) {
table.copy(multipartIdentifier = namespace.get ++ table.multipartIdentifier)
}
} else {
table
}
ShowColumns(tableWithNamespace, namespace)
}
/**
* Create an [[RecoverPartitions]]
*
* For example:
* {{{
* ALTER TABLE multi_part_name RECOVER PARTITIONS;
* }}}
*/
override def visitRecoverPartitions(
ctx: RecoverPartitionsContext): LogicalPlan = withOrigin(ctx) {
RecoverPartitions(
createUnresolvedTable(
ctx.multipartIdentifier,
"ALTER TABLE ... RECOVER PARTITIONS",
alterTableTypeMismatchHint))
}
/**
* Create an [[AddPartitions]].
*
* For example:
* {{{
* ALTER TABLE multi_part_name ADD [IF NOT EXISTS] PARTITION spec [LOCATION 'loc1']
* ALTER VIEW multi_part_name ADD [IF NOT EXISTS] PARTITION spec
* }}}
*
* ALTER VIEW ... ADD PARTITION ... is not supported because the concept of partitioning
* is associated with physical tables
*/
override def visitAddTablePartition(
ctx: AddTablePartitionContext): LogicalPlan = withOrigin(ctx) {
if (ctx.VIEW != null) {
operationNotAllowed("ALTER VIEW ... ADD PARTITION", ctx)
}
// Create partition spec to location mapping.
val specsAndLocs = ctx.partitionSpecLocation.asScala.map { splCtx =>
val spec = visitNonOptionalPartitionSpec(splCtx.partitionSpec)
val location = Option(splCtx.locationSpec).map(visitLocationSpec)
UnresolvedPartitionSpec(spec, location)
}
AddPartitions(
createUnresolvedTable(
ctx.multipartIdentifier,
"ALTER TABLE ... ADD PARTITION ...",
alterTableTypeMismatchHint),
specsAndLocs.toSeq,
ctx.EXISTS != null)
}
/**
* Create an [[RenamePartitions]]
*
* For example:
* {{{
* ALTER TABLE multi_part_name PARTITION spec1 RENAME TO PARTITION spec2;
* }}}
*/
override def visitRenameTablePartition(
ctx: RenameTablePartitionContext): LogicalPlan = withOrigin(ctx) {
RenamePartitions(
createUnresolvedTable(
ctx.multipartIdentifier,
"ALTER TABLE ... RENAME TO PARTITION",
alterTableTypeMismatchHint),
UnresolvedPartitionSpec(visitNonOptionalPartitionSpec(ctx.from)),
UnresolvedPartitionSpec(visitNonOptionalPartitionSpec(ctx.to)))
}
/**
* Create an [[DropPartitions]]
*
* For example:
* {{{
* ALTER TABLE multi_part_name DROP [IF EXISTS] PARTITION spec1[, PARTITION spec2, ...]
* [PURGE];
* ALTER VIEW view DROP [IF EXISTS] PARTITION spec1[, PARTITION spec2, ...];
* }}}
*
* ALTER VIEW ... DROP PARTITION ... is not supported because the concept of partitioning
* is associated with physical tables
*/
override def visitDropTablePartitions(
ctx: DropTablePartitionsContext): LogicalPlan = withOrigin(ctx) {
if (ctx.VIEW != null) {
operationNotAllowed("ALTER VIEW ... DROP PARTITION", ctx)
}
val partSpecs = ctx.partitionSpec.asScala.map(visitNonOptionalPartitionSpec)
.map(spec => UnresolvedPartitionSpec(spec))
DropPartitions(
createUnresolvedTable(
ctx.multipartIdentifier,
"ALTER TABLE ... DROP PARTITION ...",
alterTableTypeMismatchHint),
partSpecs.toSeq,
ifExists = ctx.EXISTS != null,
purge = ctx.PURGE != null)
}
/**
* Create an [[SetTableSerDeProperties]]
*
* For example:
* {{{
* ALTER TABLE multi_part_name [PARTITION spec] SET SERDE serde_name
* [WITH SERDEPROPERTIES props];
* ALTER TABLE multi_part_name [PARTITION spec] SET SERDEPROPERTIES serde_properties;
* }}}
*/
override def visitSetTableSerDe(ctx: SetTableSerDeContext): LogicalPlan = withOrigin(ctx) {
SetTableSerDeProperties(
createUnresolvedTable(
ctx.multipartIdentifier,
"ALTER TABLE ... SET [SERDE|SERDEPROPERTIES]",
alterTableTypeMismatchHint),
Option(ctx.STRING).map(string),
Option(ctx.tablePropertyList).map(visitPropertyKeyValues),
// TODO a partition spec is allowed to have optional values. This is currently violated.
Option(ctx.partitionSpec).map(visitNonOptionalPartitionSpec))
}
/**
* Create or replace a view. This creates a [[CreateViewStatement]]
*
* For example:
* {{{
* CREATE [OR REPLACE] [[GLOBAL] TEMPORARY] VIEW [IF NOT EXISTS] multi_part_name
* [(column_name [COMMENT column_comment], ...) ]
* create_view_clauses
*
* AS SELECT ...;
*
* create_view_clauses (order insensitive):
* [COMMENT view_comment]
* [TBLPROPERTIES (property_name = property_value, ...)]
* }}}
*/
override def visitCreateView(ctx: CreateViewContext): LogicalPlan = withOrigin(ctx) {
if (!ctx.identifierList.isEmpty) {
operationNotAllowed("CREATE VIEW ... PARTITIONED ON", ctx)
}
checkDuplicateClauses(ctx.commentSpec(), "COMMENT", ctx)
checkDuplicateClauses(ctx.PARTITIONED, "PARTITIONED ON", ctx)
checkDuplicateClauses(ctx.TBLPROPERTIES, "TBLPROPERTIES", ctx)
val userSpecifiedColumns = Option(ctx.identifierCommentList).toSeq.flatMap { icl =>
icl.identifierComment.asScala.map { ic =>
ic.identifier.getText -> Option(ic.commentSpec()).map(visitCommentSpec)
}
}
val properties = ctx.tablePropertyList.asScala.headOption.map(visitPropertyKeyValues)
.getOrElse(Map.empty)
if (ctx.TEMPORARY != null && !properties.isEmpty) {
operationNotAllowed("TBLPROPERTIES can't coexist with CREATE TEMPORARY VIEW", ctx)
}
val viewType = if (ctx.TEMPORARY == null) {
PersistedView
} else if (ctx.GLOBAL != null) {
GlobalTempView
} else {
LocalTempView
}
CreateViewStatement(
visitMultipartIdentifier(ctx.multipartIdentifier),
userSpecifiedColumns,
visitCommentSpecList(ctx.commentSpec()),
properties,
Option(source(ctx.query)),
plan(ctx.query),
ctx.EXISTS != null,
ctx.REPLACE != null,
viewType)
}
/**
* Alter the query of a view. This creates a [[AlterViewAs]]
*
* For example:
* {{{
* ALTER VIEW multi_part_name AS SELECT ...;
* }}}
*/
override def visitAlterViewQuery(ctx: AlterViewQueryContext): LogicalPlan = withOrigin(ctx) {
AlterViewAs(
createUnresolvedView(ctx.multipartIdentifier, "ALTER VIEW ... AS"),
originalText = source(ctx.query),
query = plan(ctx.query))
}
/**
* Create a [[RenameTable]] command.
*
* For example:
* {{{
* ALTER TABLE multi_part_name1 RENAME TO multi_part_name2;
* ALTER VIEW multi_part_name1 RENAME TO multi_part_name2;
* }}}
*/
override def visitRenameTable(ctx: RenameTableContext): LogicalPlan = withOrigin(ctx) {
val isView = ctx.VIEW != null
val relationStr = if (isView) "VIEW" else "TABLE"
RenameTable(
createUnresolvedTableOrView(ctx.from, s"ALTER $relationStr ... RENAME TO"),
visitMultipartIdentifier(ctx.to),
isView)
}
/**
* A command for users to list the properties for a table. If propertyKey is specified, the value
* for the propertyKey is returned. If propertyKey is not specified, all the keys and their
* corresponding values are returned.
* The syntax of using this command in SQL is:
* {{{
* SHOW TBLPROPERTIES multi_part_name[('propertyKey')];
* }}}
*/
override def visitShowTblProperties(
ctx: ShowTblPropertiesContext): LogicalPlan = withOrigin(ctx) {
ShowTableProperties(
createUnresolvedTableOrView(ctx.table, "SHOW TBLPROPERTIES"),
Option(ctx.key).map(visitTablePropertyKey))
}
/**
* Create a plan for a DESCRIBE FUNCTION statement.
*/
override def visitDescribeFunction(ctx: DescribeFunctionContext): LogicalPlan = withOrigin(ctx) {
import ctx._
val functionName =
if (describeFuncName.STRING() != null) {
Seq(string(describeFuncName.STRING()))
} else if (describeFuncName.qualifiedName() != null) {
visitQualifiedName(describeFuncName.qualifiedName)
} else {
Seq(describeFuncName.getText)
}
DescribeFunction(UnresolvedFunc(functionName), EXTENDED != null)
}
/**
* Create a plan for a SHOW FUNCTIONS command.
*/
override def visitShowFunctions(ctx: ShowFunctionsContext): LogicalPlan = withOrigin(ctx) {
val (userScope, systemScope) = Option(ctx.identifier)
.map(_.getText.toLowerCase(Locale.ROOT)) match {
case None | Some("all") => (true, true)
case Some("system") => (false, true)
case Some("user") => (true, false)
case Some(x) => throw QueryParsingErrors.showFunctionsUnsupportedError(x, ctx.identifier())
}
val pattern = Option(ctx.pattern).map(string(_))
val unresolvedFuncOpt = Option(ctx.multipartIdentifier)
.map(visitMultipartIdentifier)
.map(UnresolvedFunc(_))
ShowFunctions(unresolvedFuncOpt, userScope, systemScope, pattern)
}
/**
* Create a DROP FUNCTION statement.
*
* For example:
* {{{
* DROP [TEMPORARY] FUNCTION [IF EXISTS] function;
* }}}
*/
override def visitDropFunction(ctx: DropFunctionContext): LogicalPlan = withOrigin(ctx) {
val functionName = visitMultipartIdentifier(ctx.multipartIdentifier)
DropFunction(
UnresolvedFunc(functionName),
ctx.EXISTS != null,
ctx.TEMPORARY != null)
}
/**
* Create a CREATE FUNCTION statement.
*
* For example:
* {{{
* CREATE [OR REPLACE] [TEMPORARY] FUNCTION [IF NOT EXISTS] [db_name.]function_name
* AS class_name [USING JAR|FILE|ARCHIVE 'file_uri' [, JAR|FILE|ARCHIVE 'file_uri']];
* }}}
*/
override def visitCreateFunction(ctx: CreateFunctionContext): LogicalPlan = withOrigin(ctx) {
val resources = ctx.resource.asScala.map { resource =>
val resourceType = resource.identifier.getText.toLowerCase(Locale.ROOT)
resourceType match {
case "jar" | "file" | "archive" =>
FunctionResource(FunctionResourceType.fromString(resourceType), string(resource.STRING))
case other =>
operationNotAllowed(s"CREATE FUNCTION with resource type '$resourceType'", ctx)
}
}
val functionIdentifier = visitMultipartIdentifier(ctx.multipartIdentifier)
CreateFunctionStatement(
functionIdentifier,
string(ctx.className),
resources.toSeq,
ctx.TEMPORARY != null,
ctx.EXISTS != null,
ctx.REPLACE != null)
}
override def visitRefreshFunction(ctx: RefreshFunctionContext): LogicalPlan = withOrigin(ctx) {
val functionIdentifier = visitMultipartIdentifier(ctx.multipartIdentifier)
RefreshFunction(UnresolvedFunc(functionIdentifier))
}
override def visitCommentNamespace(ctx: CommentNamespaceContext): LogicalPlan = withOrigin(ctx) {
val comment = ctx.comment.getType match {
case SqlBaseParser.NULL => ""
case _ => string(ctx.STRING)
}
val nameParts = visitMultipartIdentifier(ctx.multipartIdentifier)
CommentOnNamespace(UnresolvedNamespace(nameParts), comment)
}
override def visitCommentTable(ctx: CommentTableContext): LogicalPlan = withOrigin(ctx) {
val comment = ctx.comment.getType match {
case SqlBaseParser.NULL => ""
case _ => string(ctx.STRING)
}
CommentOnTable(createUnresolvedTable(ctx.multipartIdentifier, "COMMENT ON TABLE"), comment)
}
private def alterViewTypeMismatchHint: Option[String] = Some("Please use ALTER TABLE instead.")
private def alterTableTypeMismatchHint: Option[String] = Some("Please use ALTER VIEW instead.")
}
| chuckchen/spark | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/AstBuilder.scala | Scala | apache-2.0 | 164,640 |
package edu.cmu.lti.nlp.amr
import scala.util.matching.Regex
import scala.collection.mutable.Map
import scala.collection.mutable.Set
import scala.collection.mutable.ArrayBuffer
//import org.scalatest.Suite
object Corpus {
def splitOnNewline(iterator: Iterator[String]) : Iterator[String] = { // This treats more than one newline in a row as a single newline
for {
x <- iterator if x != ""
p = (x :: iterator.takeWhile(_ != "").toList).mkString("\\n")
} yield p
}
/**
* Takes an iterator of lines, splits on empty lines, and yields only
* blocks of lines that contain some AMR content
*/
def getAMRBlocks(iterator: Iterator[String]) : Iterator[String] = for (
block <- splitOnNewline(iterator)
if block.split("\\n").exists(_.startsWith("(")) // needs to contain some AMR
) yield block
/*
def getUlfString(string: String) : Map[String,String] = {
// returns a map representation of Ulf's weird string representation
assert(string.matches("^# ::(.|\\n)*"), "This is not a valid properties string")
val split = string.replaceAll("\\n","").replaceAll("""#""","").split(" ::")
val map = Map[String,String]()
for (x <- split if x != "") {
val line = x.split(" ")
map += (("::"+line(0)) -> line.tail.mkString(" "))
}
return map
}
def toAMRTriple(input: String) : AMRTriple = {
val lines = input.split("\\n")
val amrstr = lines.filterNot(_.matches("^#.*")).mkString(" ")
val tokenized = lines.filter(_.matches("^# ::tok .*"))
assert(tokenized.size == 1, "Incorrect number of tokenized ::tok ")
val spanlines = lines.filter(_.matches("^# ::alignments .*"))
assert(spanlines.size > 0, "Missing alignments")
val graph = Graph.parse(amrstr)
val sentence = getUlfString(tokenized(0))("::tok").split(" ")
val extras = lines.filter(_.matches("^#.*")).filterNot(_.matches("^# ::alignments .*")).mkString("\\n")
// logger(2,graph.toString)
// logger(2,sentence.toList.toString)
var spans = ArrayBuffer[String]()
var annotators = ArrayBuffer[String]()
var annotation_dates = ArrayBuffer[String]()
for (spanline <- spanlines) {
val ulfstr : Map[String, String] = getUlfString(spanline)
// logger(2,spanline)
spans += ulfstr("::alignments")
annotators += ulfstr("::annotator")
annotation_dates += ulfstr("::date")
}
return AMRTriple(sentence, graph, spans, annotators, annotation_dates, lines.filterNot(_.matches("^#.*")).mkString("\\n"), extras)
} */
}
class CorpusTest /* extends Suite*/ {
def testSplitOnNewline() {
val split = Corpus.splitOnNewline(Iterator("a", "b", "c", "", "a", "c", "b"))
assert(split.toList == List("a\\nb\\nc", "a\\nc\\nb"))
}
}
| jflanigan/jamr | src/Corpus.scala | Scala | bsd-2-clause | 2,934 |
package com.leeavital
import com.leeavital.util.ChannelBufferHelper
/**
* Created by lee on 10/11/14.
*/
class ChannelBufferHelperSpec extends UnfangledSpec {
"ChannelBufferHelper operations" should "be inverse for String" in {
val s: String = "FOOBAR12"
val encoded = ChannelBufferHelper.create(s)
val decoded = ChannelBufferHelper.extract[String](encoded)
decoded should be(s)
}
it should "be inverse for Array[Byte]" in {
val s: Array[Byte] = "abc".getBytes
val encoded = ChannelBufferHelper.create(s)
val decoded = ChannelBufferHelper.extract[String](encoded)
// we need to use .toList because array comparisons are compare-by-reference
decoded.toList should be(s.toList)
}
}
| leeavital/unfangled | src/test/scala/com/leeavital/unfangled/ChannelBufferHelperSpec.scala | Scala | mit | 735 |
/*
* Created on 2010/04/01
* Copyright (c) 2010-2011, Wei-ju Wu.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* Neither the name of Wei-ju Wu nor the names of its contributors may
* be used to endorse or promote products derived from this software without
* specific prior written permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
package org.zmpp.glulx
import org.scalatest.FlatSpec
import org.scalatest.matchers.ShouldMatchers
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
import org.scalatest.BeforeAndAfterEach
import java.io._
import org.zmpp.base._
@RunWith(classOf[JUnitRunner])
class StackSpec extends FlatSpec with ShouldMatchers with BeforeAndAfterEach {
val DummyMem = Array[Byte](0x47, 0x6c, 0x75, 0x6c, 0x00, 0x03, 0x01, 0x01,
0x00, 0x00, 0x00, 0x00, // RAMSTART
0x00, 0x00, 0x00, 0x00, // EXTSTART
0x00, 0x00, 0x00, 0x00, // ENDMEM
0x00, 0x00, 0x00, 0xff.asInstanceOf[Byte], // STACKSIZE
0x00, 0x00, 0x00, 0x00, // STARTFUNC
0x00, 0x00, 0x00, 0x00, // Decoding table
0x00, 0x00, 0x00, 0x00 // Checksum
)
var vm = new GlulxVM
override def beforeEach {
vm.initState(DummyMem)
}
"GlulxVM stack" should "be initialized" in {
vm.stackEmpty should be (true)
}
it should "push and pop a byte" in {
vm.pushInt(0) // artificial frame len
vm.pushByte(1)
vm.sp should be (5)
vm.topByte should be (1)
vm.popByte should be (1)
vm.sp should be (4)
vm.pushByte(255)
vm.topByte should be (255)
vm.popByte should be (255)
}
it should "push and pop short" in {
vm.pushInt(0) // artificial frame len
vm.pushShort(32767)
vm.topShort should be (32767)
vm.pushShort(65535)
vm.topShort should be (65535)
vm.popShort should be (65535)
vm.popShort should be (32767)
vm.sp should be (4)
}
it should "push and pop int" in {
vm.pushInt(0) // artificial frame len
vm.pushInt(32767)
vm.topInt should equal (32767)
vm.pushInt(-42)
vm.topInt should equal (-42)
vm.popInt should equal (-42)
vm.popInt should equal (32767)
vm.sp should be (4)
}
it should "set and get a byte" in {
vm.setByteInStack(3, 0xba)
vm.getByteInStack(3) should be (0xba)
vm.sp should be (0)
}
it should "set and get a short" in {
vm.setShortInStack(4, 0xcafe)
vm.getShortInStack(4) should be (0xcafe)
vm.sp should be (0)
}
it should "set and get a int" in {
vm.setIntInStack(4, 0xdeadbeef)
vm.getIntInStack(4) should be (0xdeadbeef)
vm.sp should be (0)
}
}
| logicmoo/zmpp2 | zmpp-glulx/src/test/scala/org/zmpp/glulx/StackTest.scala | Scala | bsd-3-clause | 3,998 |
package scommons.client.ui
import scommons.react._
import scommons.react.test.TestSpec
import scommons.react.test.util.ShallowRendererUtils
class ImageLabelWrapperSpec extends TestSpec with ShallowRendererUtils {
it should "render only image" in {
//given
val image = ButtonImagesCss.folder
val wrapper = new FunctionComponent[Unit] {
protected def render(props: Props): ReactElement = {
<.>()(
ImageLabelWrapper(image, None)
)
}
}
//when
val result = shallowRender(<(wrapper()).empty)
//then
assertNativeComponent(result, <.>()(
<.img(^.className := image, ^.src := "")()
))
}
it should "render image and text aligned" in {
//given
val image = ButtonImagesCss.folder
val text = "some text"
val wrapper = new FunctionComponent[Unit] {
protected def render(props: Props): ReactElement = {
<.>()(
ImageLabelWrapper(image, Some(text))
)
}
}
//when
val result = shallowRender(<(wrapper()).empty)
//then
assertNativeComponent(result, <.>()(
<.img(^.className := image, ^.src := "")(),
<.span(^.style := Map(
"paddingLeft" -> "3px",
"verticalAlign" -> "middle"
))(text)
))
}
it should "render image and text not aligned" in {
//given
val image = ButtonImagesCss.folder
val text = "some text"
val wrapper = new FunctionComponent[Unit] {
protected def render(props: Props): ReactElement = {
<.>()(
ImageLabelWrapper(image, Some(text), alignText = false)
)
}
}
//when
val result = shallowRender(<(wrapper()).empty)
//then
assertNativeComponent(result, <.>()(
<.img(^.className := image, ^.src := "")(),
<.span(^.style := Map(
"paddingLeft" -> "3px"
))(text)
))
}
}
| viktor-podzigun/scommons | ui/src/test/scala/scommons/client/ui/ImageLabelWrapperSpec.scala | Scala | apache-2.0 | 1,891 |
package edu.rice.habanero.benchmarks.uct
import java.util.Random
import edu.rice.habanero.actors.{GparsActor, GparsActorState, GparsPool}
import edu.rice.habanero.benchmarks.uct.UctConfig._
import edu.rice.habanero.benchmarks.{Benchmark, BenchmarkRunner}
/**
* @author <a href="http://shams.web.rice.edu/">Shams Imam</a> (shams@rice.edu)
*/
object UctGparsActorBenchmark {
def main(args: Array[String]) {
BenchmarkRunner.runBenchmark(args, new UctGparsActorBenchmark)
}
private final class UctGparsActorBenchmark extends Benchmark {
def initialize(args: Array[String]) {
UctConfig.parseArgs(args)
}
def printArgInfo() {
UctConfig.printArgs()
}
def runIteration() {
val rootActor = new RootActor()
rootActor.start()
rootActor.send(GenerateTreeMessage.ONLY)
GparsActorState.awaitTermination()
}
def cleanupIteration(lastIteration: Boolean, execTimeMillis: Double) {
if (lastIteration) {
GparsPool.shutdown()
}
}
}
/**
* @author xinghuizhao
* @author <a href="http://shams.web.rice.edu/">Shams Imam</a> (shams@rice.edu)
*/
protected class RootActor extends GparsActor[AnyRef] {
private final val ran: Random = new Random(2)
private var height: Int = 1
private var size: Int = 1
private final val children = new Array[GparsActor[AnyRef]](UctConfig.BINOMIAL_PARAM)
private final val hasGrantChildren = new Array[Boolean](UctConfig.BINOMIAL_PARAM)
private var traversed: Boolean = false
private var finalSizePrinted: Boolean = false
override def process(theMsg: AnyRef) {
theMsg match {
case _: UctConfig.GenerateTreeMessage =>
generateTree()
case grantMessage: UctConfig.UpdateGrantMessage =>
updateGrant(grantMessage.childId)
case booleanMessage: UctConfig.ShouldGenerateChildrenMessage =>
val sender: GparsActor[AnyRef] = booleanMessage.sender.asInstanceOf[GparsActor[AnyRef]]
checkGenerateChildrenRequest(sender, booleanMessage.childHeight)
case _: UctConfig.PrintInfoMessage =>
printInfo()
case _: UctConfig.TerminateMessage =>
terminateMe()
case _ =>
}
}
/**
* This message is called externally to create the BINOMIAL_PARAM tree
*/
def generateTree() {
height += 1
val computationSize: Int = getNextNormal(UctConfig.AVG_COMP_SIZE, UctConfig.STDEV_COMP_SIZE)
var i: Int = 0
while (i < UctConfig.BINOMIAL_PARAM) {
hasGrantChildren(i) = false
children(i) = NodeActor.createNodeActor(this, this, height, size + i, computationSize, urgent = false)
i += 1
}
size += UctConfig.BINOMIAL_PARAM
var j: Int = 0
while (j < UctConfig.BINOMIAL_PARAM) {
children(j).send(TryGenerateChildrenMessage.ONLY)
j += 1
}
}
/**
* This message is called by a child node before generating children;
* the child may generate children only if this message returns true
*
* @param childName The child name
* @param childHeight The height of the child in the tree
*/
def checkGenerateChildrenRequest(childName: GparsActor[AnyRef], childHeight: Int) {
if (size + UctConfig.BINOMIAL_PARAM <= UctConfig.MAX_NODES) {
val moreChildren: Boolean = ran.nextBoolean
if (moreChildren) {
val childComp: Int = getNextNormal(UctConfig.AVG_COMP_SIZE, UctConfig.STDEV_COMP_SIZE)
val randomInt: Int = ran.nextInt(100)
if (randomInt > UctConfig.URGENT_NODE_PERCENT) {
childName.send(new UctConfig.GenerateChildrenMessage(size, childComp))
} else {
childName.send(new UctConfig.UrgentGenerateChildrenMessage(ran.nextInt(UctConfig.BINOMIAL_PARAM), size, childComp))
}
size += UctConfig.BINOMIAL_PARAM
if (childHeight + 1 > height) {
height = childHeight + 1
}
}
else {
if (childHeight > height) {
height = childHeight
}
}
}
else {
if (!finalSizePrinted) {
System.out.println("final size= " + size)
System.out.println("final height= " + height)
finalSizePrinted = true
}
if (!traversed) {
traversed = true
traverse()
}
terminateMe()
}
}
/**
* This method is called by getBoolean in order to generate computation times for actors, which
* follows a normal distribution with mean value and a std value
*/
def getNextNormal(pMean: Int, pDev: Int): Int = {
var result: Int = 0
while (result <= 0) {
val tempDouble: Double = ran.nextGaussian * pDev + pMean
result = Math.round(tempDouble).asInstanceOf[Int]
}
result
}
/**
* This message is called by a child node to indicate that it has children
*/
def updateGrant(childId: Int) {
hasGrantChildren(childId) = true
}
/**
* This is the method for traversing the tree
*/
def traverse() {
var i: Int = 0
while (i < UctConfig.BINOMIAL_PARAM) {
children(i).send(TraverseMessage.ONLY)
i += 1
}
}
def printInfo() {
System.out.println("0 0 children starts 1")
var i: Int = 0
while (i < UctConfig.BINOMIAL_PARAM) {
children(i).send(PrintInfoMessage.ONLY)
i += 1
}
}
def terminateMe() {
if (hasExited()) {
return
}
var i: Int = 0
while (i < UctConfig.BINOMIAL_PARAM) {
children(i).send(TerminateMessage.ONLY)
i += 1
}
exit()
}
}
/**
* @author xinghuizhao
* @author <a href="http://shams.web.rice.edu/">Shams Imam</a> (shams@rice.edu)
*/
protected object NodeActor {
def createNodeActor(parent: GparsActor[AnyRef], root: GparsActor[AnyRef], height: Int, id: Int, comp: Int, urgent: Boolean): NodeActor = {
val nodeActor: NodeActor = new NodeActor(parent, root, height, id, comp, urgent)
nodeActor.start()
nodeActor
}
private final val dummy: Int = 40000
}
protected class NodeActor(myParent: GparsActor[AnyRef], myRoot: GparsActor[AnyRef], myHeight: Int, myId: Int, myCompSize: Int, isUrgent: Boolean) extends GparsActor[AnyRef] {
private var urgentChild: Int = 0
private var hasChildren: Boolean = false
private final val children = new Array[GparsActor[AnyRef]](UctConfig.BINOMIAL_PARAM)
private final val hasGrantChildren = new Array[Boolean](UctConfig.BINOMIAL_PARAM)
override def process(theMsg: AnyRef) {
theMsg match {
case _: UctConfig.TryGenerateChildrenMessage =>
tryGenerateChildren()
case childrenMessage: UctConfig.GenerateChildrenMessage =>
generateChildren(childrenMessage.currentId, childrenMessage.compSize)
case childrenMessage: UctConfig.UrgentGenerateChildrenMessage =>
generateUrgentChildren(childrenMessage.urgentChildId, childrenMessage.currentId, childrenMessage.compSize)
case grantMessage: UctConfig.UpdateGrantMessage =>
updateGrant(grantMessage.childId)
case _: UctConfig.TraverseMessage =>
traverse()
case _: UctConfig.UrgentTraverseMessage =>
urgentTraverse()
case _: UctConfig.PrintInfoMessage =>
printInfo()
case _: UctConfig.GetIdMessage =>
getId
case _: UctConfig.TerminateMessage =>
terminateMe()
case _ =>
}
}
/**
* This message is called by parent node, try to generate children of this node.
* If the "getBoolean" message returns true, the node is allowed to generate BINOMIAL_PARAM children
*/
def tryGenerateChildren() {
UctConfig.loop(100, NodeActor.dummy)
myRoot.send(new UctConfig.ShouldGenerateChildrenMessage(this, myHeight))
}
def generateChildren(currentId: Int, compSize: Int) {
val myArrayId: Int = myId % UctConfig.BINOMIAL_PARAM
myParent.send(new UctConfig.UpdateGrantMessage(myArrayId))
val childrenHeight: Int = myHeight + 1
val idValue: Int = currentId
var i: Int = 0
while (i < UctConfig.BINOMIAL_PARAM) {
children(i) = NodeActor.createNodeActor(this, myRoot, childrenHeight, idValue + i, compSize, urgent = false)
i += 1
}
hasChildren = true
var j: Int = 0
while (j < UctConfig.BINOMIAL_PARAM) {
children(j).send(TryGenerateChildrenMessage.ONLY)
j += 1
}
}
def generateUrgentChildren(urgentChildId: Int, currentId: Int, compSize: Int) {
val myArrayId: Int = myId % UctConfig.BINOMIAL_PARAM
myParent.send(new UctConfig.UpdateGrantMessage(myArrayId))
val childrenHeight: Int = myHeight + 1
val idValue: Int = currentId
urgentChild = urgentChildId
var i: Int = 0
while (i < UctConfig.BINOMIAL_PARAM) {
children(i) = NodeActor.createNodeActor(this, myRoot, childrenHeight, idValue + i, compSize, i == urgentChild)
i += 1
}
hasChildren = true
var j: Int = 0
while (j < UctConfig.BINOMIAL_PARAM) {
children(j).send(TryGenerateChildrenMessage.ONLY)
j += 1
}
}
/**
* This message is called by a child node to indicate that it has children
*/
def updateGrant(childId: Int) {
hasGrantChildren(childId) = true
}
/**
* This message is called by parent while doing a traverse
*/
def traverse() {
UctConfig.loop(myCompSize, NodeActor.dummy)
if (hasChildren) {
var i: Int = 0
while (i < UctConfig.BINOMIAL_PARAM) {
children(i).send(TraverseMessage.ONLY)
i += 1
}
}
}
/**
* This message is called by parent while doing traverse, if this node is an urgent node
*/
def urgentTraverse() {
UctConfig.loop(myCompSize, NodeActor.dummy)
if (hasChildren) {
if (urgentChild != -1) {
var i: Int = 0
while (i < UctConfig.BINOMIAL_PARAM) {
if (i != urgentChild) {
children(i).send(TraverseMessage.ONLY)
} else {
children(urgentChild).send(UrgentTraverseMessage.ONLY)
}
i += 1
}
} else {
var i: Int = 0
while (i < UctConfig.BINOMIAL_PARAM) {
children(i).send(TraverseMessage.ONLY)
i += 1
}
}
}
if (isUrgent) {
System.out.println("urgent traverse node " + myId + " " + System.currentTimeMillis)
} else {
System.out.println(myId + " " + System.currentTimeMillis)
}
}
def printInfo() {
if (isUrgent) {
System.out.print("Urgent......")
}
if (hasChildren) {
System.out.println(myId + " " + myCompSize + " children starts ")
var i: Int = 0
while (i < UctConfig.BINOMIAL_PARAM) {
children(i).send(PrintInfoMessage.ONLY)
i += 1
}
} else {
System.out.println(myId + " " + myCompSize)
}
}
def getId: Int = {
myId
}
def terminateMe() {
if (hasExited()) {
return
}
if (hasChildren) {
var i: Int = 0
while (i < UctConfig.BINOMIAL_PARAM) {
children(i).send(TerminateMessage.ONLY)
i += 1
}
}
exit()
}
}
}
| shamsmahmood/savina | src/main/scala/edu/rice/habanero/benchmarks/uct/UctGparsActorBenchmark.scala | Scala | gpl-2.0 | 11,552 |
/*
* Copyright 2017 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.accounts.frs102.boxes
import uk.gov.hmrc.ct.accounts.frs102.calculations.IntangibleAssetsCalculator
import uk.gov.hmrc.ct.accounts.frs102.retriever.{Frs102AccountsBoxRetriever, FullAccountsBoxRetriever}
import uk.gov.hmrc.ct.box._
case class AC120(value: Option[Int]) extends CtBoxIdentifier(name = "Amortisation on disposals")
with CtOptionalInteger
with Input
with ValidatableBox[Frs102AccountsBoxRetriever]
with Validators
with Debit {
override def validate(boxRetriever: Frs102AccountsBoxRetriever): Set[CtValidation] = {
collectErrors(
validateMoney(value, min = 0)
)
}
}
object AC120 extends Calculated[AC120, FullAccountsBoxRetriever]
with IntangibleAssetsCalculator {
override def calculate(boxRetriever: FullAccountsBoxRetriever): AC120 = {
calculateAC120(boxRetriever.ac120A(), boxRetriever.ac120B())
}
}
| liquidarmour/ct-calculations | src/main/scala/uk/gov/hmrc/ct/accounts/frs102/boxes/AC120.scala | Scala | apache-2.0 | 1,489 |
/*
* Copyright (c) 2012-15 Crown Copyright
* Animal Health and Veterinary Laboratories Agency
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package sampler.abc.actor.children.flushing
import sampler.io.Logging
import sampler.abc.ABCConfig
import sampler.abc.Weighted
import sampler._
import sampler.empirical.EmpiricalImplicits
trait ToleranceCalculator extends Logging {
def apply[P](weighted: Seq[Weighted[P]], config: ABCConfig, currentTolerance: Double): Double = {
val percentileMeanScore = weighted.map(_.meanScore).toEmpirical.percentile(config.toleranceDescentPercentile)
if (percentileMeanScore == 0) {
warn("New tolerance evaluated to 0. Will use old tolerance again.")
currentTolerance
} else if (percentileMeanScore > currentTolerance) {
warn("New tolerance is greater than old tolerance. Will use old tolerance again.")
currentTolerance
} else percentileMeanScore
}
}
object ToleranceCalculator
extends ToleranceCalculator
with EmpiricalImplicits | tearne/Sampler | sampler-abc/src/main/scala/sampler/abc/actor/children/flushing/ToleranceCalculator.scala | Scala | apache-2.0 | 1,529 |
package com.sksamuel.elastic4s.analyzers
class Analyzer(val name: String)
case object WhitespaceAnalyzer extends Analyzer("whitespace")
case object StandardAnalyzer extends Analyzer("standard")
case object SimpleAnalyzer extends Analyzer("simple")
case object StopAnalyzer extends Analyzer("stop")
case object KeywordAnalyzer extends Analyzer("keyword")
case object PatternAnalyzer extends Analyzer("pattern")
@deprecated("Use the language-specific analyzer in modules/analysis instead", "5.0.0")
case object SnowballAnalyzer extends Analyzer("snowball")
case class CustomAnalyzer(override val name: String) extends Analyzer(name)
class LanguageAnalyzer(name: String) extends Analyzer(name: String)
case object ArabicLanguageAnalyzer extends LanguageAnalyzer("arabic")
case object ArmenianLanguageAnalyzer extends LanguageAnalyzer("armenian")
case object BasqueLanguageAnalyzer extends LanguageAnalyzer("basque")
case object BrazilianLanguageAnalyzer extends LanguageAnalyzer("brazilian")
case object BulgarianLanguageAnalyzer extends LanguageAnalyzer("bulgarian")
case object CatalanLanguageAnalyzer extends LanguageAnalyzer("catalan")
case object ChineseLanguageAnalyzer extends LanguageAnalyzer("chinese")
case object CjkLanguageAnalyzer extends LanguageAnalyzer("cjk")
case object CzechLanguageAnalyzer extends LanguageAnalyzer("czech")
case object DanishLanguageAnalyzer extends LanguageAnalyzer("danish")
case object DutchLanguageAnalyzer extends LanguageAnalyzer("dutch")
case object EnglishLanguageAnalyzer extends LanguageAnalyzer("english")
case object FinnishLanguageAnalyzer extends LanguageAnalyzer("finnish")
case object FrenchLanguageAnalyzer extends LanguageAnalyzer("french")
case object GalicianLanguageAnalyzer extends LanguageAnalyzer("galician")
case object GermanLanguageAnalyzer extends LanguageAnalyzer("german")
case object GreekLanguageAnalyzer extends LanguageAnalyzer("greek")
case object HindiLanguageAnalyzer extends LanguageAnalyzer("hindi")
case object HungarianLanguageAnalyzer extends LanguageAnalyzer("hungarian")
case object IndonesianLanguageAnalyzer extends LanguageAnalyzer("indonesian")
case object IrishLanguageAnalyzer extends LanguageAnalyzer("irish")
case object ItalianLanguageAnalyzer extends LanguageAnalyzer("italian")
case object LatvianLanguageAnalyzer extends LanguageAnalyzer("latvian")
case object LithuanianLanguageAnalyzer extends LanguageAnalyzer("lithuanian")
case object NorwegianLanguageAnalyzer extends LanguageAnalyzer("norwegian")
case object PersianLanguageAnalyzer extends LanguageAnalyzer("persian")
case object PortugueseLanguageAnalyzer extends LanguageAnalyzer("portuguese")
case object RomanianLanguageAnalyzer extends LanguageAnalyzer("romanian")
case object RussianLanguageAnalyzer extends LanguageAnalyzer("russian")
case object SoraniLanguageAnalyzer extends LanguageAnalyzer("sorani")
case object SpanishLanguageAnalyzer extends LanguageAnalyzer("spanish")
case object SwedishLanguageAnalyzer extends LanguageAnalyzer("swedish")
case object TurkishLanguageAnalyzer extends LanguageAnalyzer("turkish")
case object ThaiLanguageAnalyzer extends LanguageAnalyzer("thai")
| Tecsisa/elastic4s | elastic4s-core/src/main/scala/com/sksamuel/elastic4s/analyzers/Analyzer.scala | Scala | apache-2.0 | 3,270 |
/**
* This file is part of the TA Buddy project.
* Copyright (c) 2013-2015 Alexey Aksenov ezh@ezh.msk.ru
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU Affero General Global License version 3
* as published by the Free Software Foundation with the addition of the
* following permission added to Section 15 as permitted in Section 7(a):
* FOR ANY PART OF THE COVERED WORK IN WHICH THE COPYRIGHT IS OWNED
* BY Limited Liability Company «MEZHGALAKTICHESKIJ TORGOVYJ ALIANS»,
* Limited Liability Company «MEZHGALAKTICHESKIJ TORGOVYJ ALIANS» DISCLAIMS
* THE WARRANTY OF NON INFRINGEMENT OF THIRD PARTY RIGHTS.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Affero General Global License for more details.
* You should have received a copy of the GNU Affero General Global License
* along with this program; if not, see http://www.gnu.org/licenses or write to
* the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
* Boston, MA, 02110-1301 USA, or download the license from the following URL:
* http://www.gnu.org/licenses/agpl.html
*
* The interactive user interfaces in modified source and object code versions
* of this program must display Appropriate Legal Notices, as required under
* Section 5 of the GNU Affero General Global License.
*
* In accordance with Section 7(b) of the GNU Affero General Global License,
* you must retain the producer line in every report, form or document
* that is created or manipulated using TA Buddy.
*
* You can be released from the requirements of the license by purchasing
* a commercial license. Buying such a license is mandatory as soon as you
* develop commercial activities involving the TA Buddy software without
* disclosing the source code of your own applications.
* These activities include: offering paid services to customers,
* serving files in a web or/and network application,
* shipping TA Buddy with a closed source product.
*
* For more information, please contact Digimead Team at this
* address: ezh@ezh.msk.ru
*/
package org.digimead.tabuddy.desktop.element.editor.operation
import java.util.concurrent.{ CancellationException, Exchanger }
import org.digimead.digi.lib.log.api.XLoggable
import org.digimead.tabuddy.desktop.core.definition.Operation
import org.digimead.tabuddy.desktop.core.support.App
import org.digimead.tabuddy.desktop.logic
import org.digimead.tabuddy.desktop.logic.payload.marker.GraphMarker
import org.digimead.tabuddy.model.element.Element
import org.eclipse.core.runtime.{ IAdaptable, IProgressMonitor }
/**
* 'Delete the element' operation.
*/
class OperationDeleteElement extends logic.operation.OperationDeleteElement with XLoggable {
/**
* Delete the element.
*
* @param element element for delete
* @return true on success
*/
def apply(element: Element, interactive: Boolean): Unit = {
log.info(s"Delete element ${element}")
element.eNode.detach()
Operation.Result.OK()
}
/**
* Create 'Delete the element' operation.
*
* @param element element for delete
* @return 'Delete the element' operation
*/
def operation(element: Element, interactive: Boolean) =
new Implemetation(element, interactive)
class Implemetation(element: Element, interactive: Boolean)
extends logic.operation.OperationDeleteElement.Abstract(element, interactive) with XLoggable {
@volatile protected var allowExecute = true
override def canExecute() = allowExecute
override def canRedo() = false
override def canUndo() = false
protected def execute(monitor: IProgressMonitor, info: IAdaptable): Operation.Result[Unit] = {
try {
Operation.Result.OK(Option(OperationDeleteElement.this(element, interactive)))
} catch {
case e: CancellationException ⇒
Operation.Result.Cancel()
}
}
protected def redo(monitor: IProgressMonitor, info: IAdaptable): Operation.Result[Unit] =
throw new UnsupportedOperationException
protected def undo(monitor: IProgressMonitor, info: IAdaptable): Operation.Result[Unit] =
throw new UnsupportedOperationException
}
}
| digimead/digi-TABuddy-desktop | part-element-editor/src/main/scala/org/digimead/tabuddy/desktop/element/editor/operation/OperationDeleteElement.scala | Scala | agpl-3.0 | 4,338 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.parquet
import org.scalatest.BeforeAndAfterAll
import parquet.filter2.predicate.Operators._
import parquet.filter2.predicate.{FilterPredicate, Operators}
import org.apache.spark.sql.catalyst.dsl.expressions._
import org.apache.spark.sql.catalyst.expressions.{Attribute, Cast, Literal, Predicate, Row}
import org.apache.spark.sql.catalyst.planning.PhysicalOperation
import org.apache.spark.sql.sources.LogicalRelation
import org.apache.spark.sql.test.TestSQLContext
import org.apache.spark.sql.types._
import org.apache.spark.sql.{Column, DataFrame, QueryTest, SQLConf}
/**
* A test suite that tests Parquet filter2 API based filter pushdown optimization.
*
* NOTE:
*
* 1. `!(a cmp b)` is always transformed to its negated form `a cmp' b` by the
* `BooleanSimplification` optimization rule whenever possible. As a result, predicate `!(a < 1)`
* results in a `GtEq` filter predicate rather than a `Not`.
*
* 2. `Tuple1(Option(x))` is used together with `AnyVal` types like `Int` to ensure the inferred
* data type is nullable.
*/
class ParquetFilterSuiteBase extends QueryTest with ParquetTest {
val sqlContext = TestSQLContext
private def checkFilterPredicate(
df: DataFrame,
predicate: Predicate,
filterClass: Class[_ <: FilterPredicate],
checker: (DataFrame, Seq[Row]) => Unit,
expected: Seq[Row]): Unit = {
val output = predicate.collect { case a: Attribute => a }.distinct
withSQLConf(SQLConf.PARQUET_FILTER_PUSHDOWN_ENABLED -> "true") {
val query = df
.select(output.map(e => Column(e)): _*)
.where(Column(predicate))
val maybeAnalyzedPredicate = {
val forParquetTableScan = query.queryExecution.executedPlan.collect {
case plan: ParquetTableScan => plan.columnPruningPred
}.flatten.reduceOption(_ && _)
val forParquetDataSource = query.queryExecution.optimizedPlan.collect {
case PhysicalOperation(_, filters, LogicalRelation(_: ParquetRelation2)) => filters
}.flatten.reduceOption(_ && _)
forParquetTableScan.orElse(forParquetDataSource)
}
assert(maybeAnalyzedPredicate.isDefined)
maybeAnalyzedPredicate.foreach { pred =>
val maybeFilter = ParquetFilters.createFilter(pred)
assert(maybeFilter.isDefined, s"Couldn't generate filter predicate for $pred")
maybeFilter.foreach { f =>
// Doesn't bother checking type parameters here (e.g. `Eq[Integer]`)
assert(f.getClass === filterClass)
}
}
checker(query, expected)
}
}
private def checkFilterPredicate
(predicate: Predicate, filterClass: Class[_ <: FilterPredicate], expected: Seq[Row])
(implicit df: DataFrame): Unit = {
checkFilterPredicate(df, predicate, filterClass, checkAnswer(_, _: Seq[Row]), expected)
}
private def checkFilterPredicate[T]
(predicate: Predicate, filterClass: Class[_ <: FilterPredicate], expected: T)
(implicit df: DataFrame): Unit = {
checkFilterPredicate(predicate, filterClass, Seq(Row(expected)))(df)
}
private def checkBinaryFilterPredicate
(predicate: Predicate, filterClass: Class[_ <: FilterPredicate], expected: Seq[Row])
(implicit df: DataFrame): Unit = {
def checkBinaryAnswer(df: DataFrame, expected: Seq[Row]) = {
assertResult(expected.map(_.getAs[Array[Byte]](0).mkString(",")).toSeq.sorted) {
df.map(_.getAs[Array[Byte]](0).mkString(",")).collect().toSeq.sorted
}
}
checkFilterPredicate(df, predicate, filterClass, checkBinaryAnswer _, expected)
}
private def checkBinaryFilterPredicate
(predicate: Predicate, filterClass: Class[_ <: FilterPredicate], expected: Array[Byte])
(implicit df: DataFrame): Unit = {
checkBinaryFilterPredicate(predicate, filterClass, Seq(Row(expected)))(df)
}
test("filter pushdown - boolean") {
withParquetDataFrame((true :: false :: Nil).map(b => Tuple1.apply(Option(b)))) { implicit df =>
checkFilterPredicate('_1.isNull, classOf[Eq[_]], Seq.empty[Row])
checkFilterPredicate('_1.isNotNull, classOf[NotEq[_]], Seq(Row(true), Row(false)))
checkFilterPredicate('_1 === true, classOf[Eq[_]], true)
checkFilterPredicate('_1 !== true, classOf[NotEq[_]], false)
}
}
test("filter pushdown - short") {
withParquetDataFrame((1 to 4).map(i => Tuple1(Option(i.toShort)))) { implicit df =>
checkFilterPredicate(Cast('_1, IntegerType) === 1, classOf[Eq[_]], 1)
checkFilterPredicate(
Cast('_1, IntegerType) !== 1, classOf[NotEq[_]], (2 to 4).map(Row.apply(_)))
checkFilterPredicate(Cast('_1, IntegerType) < 2, classOf[Lt[_]], 1)
checkFilterPredicate(Cast('_1, IntegerType) > 3, classOf[Gt[_]], 4)
checkFilterPredicate(Cast('_1, IntegerType) <= 1, classOf[LtEq[_]], 1)
checkFilterPredicate(Cast('_1, IntegerType) >= 4, classOf[GtEq[_]], 4)
checkFilterPredicate(Literal(1) === Cast('_1, IntegerType), classOf[Eq[_]], 1)
checkFilterPredicate(Literal(2) > Cast('_1, IntegerType), classOf[Lt[_]], 1)
checkFilterPredicate(Literal(3) < Cast('_1, IntegerType), classOf[Gt[_]], 4)
checkFilterPredicate(Literal(1) >= Cast('_1, IntegerType), classOf[LtEq[_]], 1)
checkFilterPredicate(Literal(4) <= Cast('_1, IntegerType), classOf[GtEq[_]], 4)
checkFilterPredicate(!(Cast('_1, IntegerType) < 4), classOf[GtEq[_]], 4)
checkFilterPredicate(
Cast('_1, IntegerType) > 2 && Cast('_1, IntegerType) < 4, classOf[Operators.And], 3)
checkFilterPredicate(
Cast('_1, IntegerType) < 2 || Cast('_1, IntegerType) > 3,
classOf[Operators.Or],
Seq(Row(1), Row(4)))
}
}
test("filter pushdown - integer") {
withParquetDataFrame((1 to 4).map(i => Tuple1(Option(i)))) { implicit df =>
checkFilterPredicate('_1.isNull, classOf[Eq[_]], Seq.empty[Row])
checkFilterPredicate('_1.isNotNull, classOf[NotEq[_]], (1 to 4).map(Row.apply(_)))
checkFilterPredicate('_1 === 1, classOf[Eq[_]], 1)
checkFilterPredicate('_1 !== 1, classOf[NotEq[_]], (2 to 4).map(Row.apply(_)))
checkFilterPredicate('_1 < 2, classOf[Lt[_]], 1)
checkFilterPredicate('_1 > 3, classOf[Gt[_]], 4)
checkFilterPredicate('_1 <= 1, classOf[LtEq[_]], 1)
checkFilterPredicate('_1 >= 4, classOf[GtEq[_]], 4)
checkFilterPredicate(Literal(1) === '_1, classOf[Eq[_]], 1)
checkFilterPredicate(Literal(2) > '_1, classOf[Lt[_]], 1)
checkFilterPredicate(Literal(3) < '_1, classOf[Gt[_]], 4)
checkFilterPredicate(Literal(1) >= '_1, classOf[LtEq[_]], 1)
checkFilterPredicate(Literal(4) <= '_1, classOf[GtEq[_]], 4)
checkFilterPredicate(!('_1 < 4), classOf[GtEq[_]], 4)
checkFilterPredicate('_1 > 2 && '_1 < 4, classOf[Operators.And], 3)
checkFilterPredicate('_1 < 2 || '_1 > 3, classOf[Operators.Or], Seq(Row(1), Row(4)))
}
}
test("filter pushdown - long") {
withParquetDataFrame((1 to 4).map(i => Tuple1(Option(i.toLong)))) { implicit df =>
checkFilterPredicate('_1.isNull, classOf[Eq[_]], Seq.empty[Row])
checkFilterPredicate('_1.isNotNull, classOf[NotEq[_]], (1 to 4).map(Row.apply(_)))
checkFilterPredicate('_1 === 1, classOf[Eq[_]], 1)
checkFilterPredicate('_1 !== 1, classOf[NotEq[_]], (2 to 4).map(Row.apply(_)))
checkFilterPredicate('_1 < 2, classOf[Lt[_]], 1)
checkFilterPredicate('_1 > 3, classOf[Gt[_]], 4)
checkFilterPredicate('_1 <= 1, classOf[LtEq[_]], 1)
checkFilterPredicate('_1 >= 4, classOf[GtEq[_]], 4)
checkFilterPredicate(Literal(1) === '_1, classOf[Eq[_]], 1)
checkFilterPredicate(Literal(2) > '_1, classOf[Lt[_]], 1)
checkFilterPredicate(Literal(3) < '_1, classOf[Gt[_]], 4)
checkFilterPredicate(Literal(1) >= '_1, classOf[LtEq[_]], 1)
checkFilterPredicate(Literal(4) <= '_1, classOf[GtEq[_]], 4)
checkFilterPredicate(!('_1 < 4), classOf[GtEq[_]], 4)
checkFilterPredicate('_1 > 2 && '_1 < 4, classOf[Operators.And], 3)
checkFilterPredicate('_1 < 2 || '_1 > 3, classOf[Operators.Or], Seq(Row(1), Row(4)))
}
}
test("filter pushdown - float") {
withParquetDataFrame((1 to 4).map(i => Tuple1(Option(i.toFloat)))) { implicit df =>
checkFilterPredicate('_1.isNull, classOf[Eq[_]], Seq.empty[Row])
checkFilterPredicate('_1.isNotNull, classOf[NotEq[_]], (1 to 4).map(Row.apply(_)))
checkFilterPredicate('_1 === 1, classOf[Eq[_]], 1)
checkFilterPredicate('_1 !== 1, classOf[NotEq[_]], (2 to 4).map(Row.apply(_)))
checkFilterPredicate('_1 < 2, classOf[Lt[_]], 1)
checkFilterPredicate('_1 > 3, classOf[Gt[_]], 4)
checkFilterPredicate('_1 <= 1, classOf[LtEq[_]], 1)
checkFilterPredicate('_1 >= 4, classOf[GtEq[_]], 4)
checkFilterPredicate(Literal(1) === '_1, classOf[Eq[_]], 1)
checkFilterPredicate(Literal(2) > '_1, classOf[Lt[_]], 1)
checkFilterPredicate(Literal(3) < '_1, classOf[Gt[_]], 4)
checkFilterPredicate(Literal(1) >= '_1, classOf[LtEq[_]], 1)
checkFilterPredicate(Literal(4) <= '_1, classOf[GtEq[_]], 4)
checkFilterPredicate(!('_1 < 4), classOf[GtEq[_]], 4)
checkFilterPredicate('_1 > 2 && '_1 < 4, classOf[Operators.And], 3)
checkFilterPredicate('_1 < 2 || '_1 > 3, classOf[Operators.Or], Seq(Row(1), Row(4)))
}
}
test("filter pushdown - double") {
withParquetDataFrame((1 to 4).map(i => Tuple1(Option(i.toDouble)))) { implicit df =>
checkFilterPredicate('_1.isNull, classOf[Eq[_]], Seq.empty[Row])
checkFilterPredicate('_1.isNotNull, classOf[NotEq[_]], (1 to 4).map(Row.apply(_)))
checkFilterPredicate('_1 === 1, classOf[Eq[_]], 1)
checkFilterPredicate('_1 !== 1, classOf[NotEq[_]], (2 to 4).map(Row.apply(_)))
checkFilterPredicate('_1 < 2, classOf[Lt[_]], 1)
checkFilterPredicate('_1 > 3, classOf[Gt[_]], 4)
checkFilterPredicate('_1 <= 1, classOf[LtEq[_]], 1)
checkFilterPredicate('_1 >= 4, classOf[GtEq[_]], 4)
checkFilterPredicate(Literal(1) === '_1, classOf[Eq[_]], 1)
checkFilterPredicate(Literal(2) > '_1, classOf[Lt[_]], 1)
checkFilterPredicate(Literal(3) < '_1, classOf[Gt[_]], 4)
checkFilterPredicate(Literal(1) >= '_1, classOf[LtEq[_]], 1)
checkFilterPredicate(Literal(4) <= '_1, classOf[GtEq[_]], 4)
checkFilterPredicate(!('_1 < 4), classOf[GtEq[_]], 4)
checkFilterPredicate('_1 > 2 && '_1 < 4, classOf[Operators.And], 3)
checkFilterPredicate('_1 < 2 || '_1 > 3, classOf[Operators.Or], Seq(Row(1), Row(4)))
}
}
test("filter pushdown - string") {
withParquetDataFrame((1 to 4).map(i => Tuple1(i.toString))) { implicit df =>
checkFilterPredicate('_1.isNull, classOf[Eq[_]], Seq.empty[Row])
checkFilterPredicate(
'_1.isNotNull, classOf[NotEq[_]], (1 to 4).map(i => Row.apply(i.toString)))
checkFilterPredicate('_1 === "1", classOf[Eq[_]], "1")
checkFilterPredicate(
'_1 !== "1", classOf[NotEq[_]], (2 to 4).map(i => Row.apply(i.toString)))
checkFilterPredicate('_1 < "2", classOf[Lt[_]], "1")
checkFilterPredicate('_1 > "3", classOf[Gt[_]], "4")
checkFilterPredicate('_1 <= "1", classOf[LtEq[_]], "1")
checkFilterPredicate('_1 >= "4", classOf[GtEq[_]], "4")
checkFilterPredicate(Literal("1") === '_1, classOf[Eq[_]], "1")
checkFilterPredicate(Literal("2") > '_1, classOf[Lt[_]], "1")
checkFilterPredicate(Literal("3") < '_1, classOf[Gt[_]], "4")
checkFilterPredicate(Literal("1") >= '_1, classOf[LtEq[_]], "1")
checkFilterPredicate(Literal("4") <= '_1, classOf[GtEq[_]], "4")
checkFilterPredicate(!('_1 < "4"), classOf[GtEq[_]], "4")
checkFilterPredicate('_1 > "2" && '_1 < "4", classOf[Operators.And], "3")
checkFilterPredicate('_1 < "2" || '_1 > "3", classOf[Operators.Or], Seq(Row("1"), Row("4")))
}
}
test("filter pushdown - binary") {
implicit class IntToBinary(int: Int) {
def b: Array[Byte] = int.toString.getBytes("UTF-8")
}
withParquetDataFrame((1 to 4).map(i => Tuple1(i.b))) { implicit df =>
checkBinaryFilterPredicate('_1 === 1.b, classOf[Eq[_]], 1.b)
checkBinaryFilterPredicate('_1.isNull, classOf[Eq[_]], Seq.empty[Row])
checkBinaryFilterPredicate(
'_1.isNotNull, classOf[NotEq[_]], (1 to 4).map(i => Row.apply(i.b)).toSeq)
checkBinaryFilterPredicate(
'_1 !== 1.b, classOf[NotEq[_]], (2 to 4).map(i => Row.apply(i.b)).toSeq)
checkBinaryFilterPredicate('_1 < 2.b, classOf[Lt[_]], 1.b)
checkBinaryFilterPredicate('_1 > 3.b, classOf[Gt[_]], 4.b)
checkBinaryFilterPredicate('_1 <= 1.b, classOf[LtEq[_]], 1.b)
checkBinaryFilterPredicate('_1 >= 4.b, classOf[GtEq[_]], 4.b)
checkBinaryFilterPredicate(Literal(1.b) === '_1, classOf[Eq[_]], 1.b)
checkBinaryFilterPredicate(Literal(2.b) > '_1, classOf[Lt[_]], 1.b)
checkBinaryFilterPredicate(Literal(3.b) < '_1, classOf[Gt[_]], 4.b)
checkBinaryFilterPredicate(Literal(1.b) >= '_1, classOf[LtEq[_]], 1.b)
checkBinaryFilterPredicate(Literal(4.b) <= '_1, classOf[GtEq[_]], 4.b)
checkBinaryFilterPredicate(!('_1 < 4.b), classOf[GtEq[_]], 4.b)
checkBinaryFilterPredicate('_1 > 2.b && '_1 < 4.b, classOf[Operators.And], 3.b)
checkBinaryFilterPredicate(
'_1 < 2.b || '_1 > 3.b, classOf[Operators.Or], Seq(Row(1.b), Row(4.b)))
}
}
}
class ParquetDataSourceOnFilterSuite extends ParquetFilterSuiteBase with BeforeAndAfterAll {
val originalConf = sqlContext.conf.parquetUseDataSourceApi
override protected def beforeAll(): Unit = {
sqlContext.conf.setConf(SQLConf.PARQUET_USE_DATA_SOURCE_API, "true")
}
override protected def afterAll(): Unit = {
sqlContext.setConf(SQLConf.PARQUET_USE_DATA_SOURCE_API, originalConf.toString)
}
}
class ParquetDataSourceOffFilterSuite extends ParquetFilterSuiteBase with BeforeAndAfterAll {
val originalConf = sqlContext.conf.parquetUseDataSourceApi
override protected def beforeAll(): Unit = {
sqlContext.conf.setConf(SQLConf.PARQUET_USE_DATA_SOURCE_API, "false")
}
override protected def afterAll(): Unit = {
sqlContext.setConf(SQLConf.PARQUET_USE_DATA_SOURCE_API, originalConf.toString)
}
}
| hengyicai/OnlineAggregationUCAS | sql/core/src/test/scala/org/apache/spark/sql/parquet/ParquetFilterSuite.scala | Scala | apache-2.0 | 15,074 |
/*
* Copyright (c) 2014-2015 by its authors. Some rights reserved.
* See the project homepage at: http://www.monifu.org
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monifu.concurrent
import scala.concurrent.TimeoutException
/**
* A small toolkit of classes that support compare-and-swap semantics for safe mutation of variables.
*
* On top of the JVM, this means dealing with lock-free thread-safe programming. Also works on top of Javascript,
* with Scala.js (for good reasons, as Atomic references are still useful in non-multi-threaded environments).
*
* The backbone of Atomic references is this method:
* {{{
* def compareAndSet(expect: T, update: T): Boolean
* }}}
*
* This method atomically sets a variable to the `update` value if it currently holds
* the `expect` value, reporting `true` on success or `false` on failure. The classes in this package
* also contain methods to get and unconditionally set values. They also support weak operations,
* defined in `WeakAtomic[T]`, such as (e.g. `weakCompareAndSet`, `lazySet`) or operations that
* block the current thread through ''spin-locking'', until a condition happens (e.g. `waitForCompareAndSet`),
* methods exposed by `BlockingAtomic[T]`.
*
* Building a reference is easy with the provided constructor, which will automatically return the
* most specific type needed (in the following sample, that's an `AtomicDouble`, inheriting from `AtomicNumber[T]`):
* {{{
* val atomicNumber = Atomic(12.2)
*
* atomicNumber.incrementAndGet()
* // => 13.2
* }}}
*
* In comparison with `java.util.concurrent.AtomicReference`, these references implement common interfaces
* that you can use generically (i.e. `Atomic[T]`, `AtomicNumber[T]`, `BlockableAtomic[T]`, `WeakAtomic[T]`).
* And also provide useful helpers for atomically mutating of values
* (i.e. `transform`, `transformAndGet`, `getAndTransform`, etc...) or of numbers of any kind
* (`incrementAndGet`, `getAndAdd`, etc...).
*
* A high-level documentation describing the rationale for these can be found here:
* [[https://github.com/alexandru/monifu/blob/master/docs/atomic.md Atomic Reference]]
*/
package object atomic {
/**
* For private use only by the `monifu` package.
*
* Checks if the current thread has been interrupted, throwing
* an `InterruptedException` in case it is.
*/
@inline private[atomic] def interruptedCheck(): Unit = {
if (Thread.interrupted)
throw new InterruptedException()
}
/**
* For private use only by the `monifu` package.
*
* Checks if the timeout is due, throwing a `TimeoutException` in case it is.
*/
@inline private[atomic] def timeoutCheck(endsAtNanos: Long): Unit = {
if (System.nanoTime >= endsAtNanos)
throw new TimeoutException()
}
}
| sergius/monifu | core/jvm/src/main/scala/monifu/concurrent/atomic/package.scala | Scala | apache-2.0 | 3,318 |
package com.tutuur.ducksoup.service
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server.Route
import com.tutuur.ducksoup.database.Tables
import com.tutuur.ducksoup.http.StatusCode._
import com.tutuur.ducksoup.json.PostJsonProtocol
import com.tutuur.ducksoup.meta.Post
/**
* User posts service. CRUD user posts.
* @author Zale
*/
object PostService extends Service[Post] with PostJsonProtocol {
private[this] val dao = Tables.postDao
private[this] val ContentTypes = List("json", "md", "html", "plain")
override def route: Route = path(context) {
// list all posts
get {
parameter('limit.as[Int].?, 'fromId.as[Long].?) { (limit, fromId) =>
val posts = dao.list(limit.getOrElse(20).min(20), fromId.getOrElse(0L))
complete(posts)
}
} ~
post {
entity(as[Post]) { post =>
post.contentType match {
case Some(t) =>
if (ContentTypes.contains(t)) {
dao.store(post.copy(id = None, authorId = Some(0))) match {
case Some(id) => ok(id.toString)
case None => internalError()
}
} else {
badRequest(s"content type `$t' is not available.")
}
case None =>
badRequest("content type not defined.")
}
}
}
} ~
path (context / LongNumber) { postId =>
get {
dao.retrieve(postId) match {
case Some(r) => complete(r)
case None => notFound(s"post id $postId not found")
}
}
}
}
| Thiross/ducksoup | web/src/main/scala/com/tutuur/ducksoup/service/PostService.scala | Scala | gpl-3.0 | 1,610 |
/*
* Copyright 2001-2011 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest.concurrent
import org.scalatest._
import matchers.ShouldMatchers
import Thread.State._
import java.util.concurrent.atomic.AtomicBoolean
import org.scalatest.exceptions.NotAllowedException
class ConductorDeprecatedSuite extends FunSuite with ShouldMatchers with SharedHelpers with SeveredStackTraces {
val baseLineNumber = 26
test("if conduct is called twice, the second time it throws an NotAllowedException") {
val conductor = new Conductor
conductor.conduct()
val caught = intercept[NotAllowedException] { conductor.conduct() }
caught.getMessage should be ("A Conductor's conduct method can only be invoked once.")
caught.failedCodeFileNameAndLineNumberString match {
case Some(s) => s should equal ("ConductorDeprecatedSuite.scala:" + (baseLineNumber + 5))
case None => fail("Didn't produce a file name and line number string: ", caught)
}
}
test("if conduct has not been called, conductingHasBegun should return false"){
val conductor = new Conductor
conductor.conductingHasBegun should be (false)
}
test("if conduct has been called, conductingHasBegun should return true") {
val conductor = new Conductor
conductor.conduct
conductor.conductingHasBegun should be (true)
}
test("if thread {} is called after the test has been conducted, it throws an NotAllowedException" +
"with a detail message that explains the problem") {
val conductor = new Conductor
conductor.conduct
val caught =
intercept[NotAllowedException] {
conductor.thread("name") { 1 should be (1) }
}
caught.getMessage should be ("Cannot invoke the thread method on Conductor after its multi-threaded test has completed.")
caught.failedCodeFileNameAndLineNumberString match {
case Some(s) => s should equal ("ConductorDeprecatedSuite.scala:" + (baseLineNumber + 30))
case None => fail("Didn't produce a file name and line number string: ", caught)
}
}
test("if thread(String) {} is called after the test has been conducted, it throws NotAllowedException" +
"with a detail message that explains the problem"){
val conductor = new Conductor
conductor.conduct
val caught =
intercept[NotAllowedException] {
conductor.thread("name") { 1 should be (1) }
}
caught.getMessage should be ("Cannot invoke the thread method on Conductor after its multi-threaded test has completed.")
caught.failedCodeFileNameAndLineNumberString match {
case Some(s) => s should equal ("ConductorDeprecatedSuite.scala:" + (baseLineNumber + 45))
case None => fail("Didn't produce a file name and line number string: ", caught)
}
}
test("if whenFinished is called twice on the same conductor, a NotAllowedException is thrown that explains it " +
"can only be called once") {
val conductor = new Conductor
conductor.whenFinished { 1 should be (1) }
val caught =
intercept[NotAllowedException] {
conductor.whenFinished { 1 should be (1) }
}
caught.getMessage should be ("Cannot invoke whenFinished after conduct (which is called by whenFinished) has been invoked.")
caught.failedCodeFileNameAndLineNumberString match {
case Some(s) => s should equal ("ConductorDeprecatedSuite.scala:" + (baseLineNumber + 60))
case None => fail("Didn't produce a file name and line number string: ", caught)
}
}
test("if thread(String) is called twice with the same String name, the second invocation results " +
"in an IllegalArgumentException that explains each thread in a multi-threaded test " +
"must have a unique name") {
val conductor = new Conductor
conductor.thread("Fiesta del Mar") { 1 should be (1) }
val caught =
intercept[NotAllowedException] {
conductor.thread("Fiesta del Mar") { 2 should be (2) }
}
caught.getMessage should be ("Cannot register two threads with the same name. Duplicate name: Fiesta del Mar.")
caught.failedCodeFileNameAndLineNumberString match {
case Some(s) => s should equal ("ConductorDeprecatedSuite.scala:" + (baseLineNumber + 77))
case None => fail("Didn't produce a file name and line number string: ", caught)
}
}
test("waitForBeat throws NotAllowedException if is called with zero or a negative number") {
val conductor = new Conductor
val caught =
intercept[NotAllowedException] {
conductor.waitForBeat(0)
}
caught.failedCodeFileNameAndLineNumberString match {
case Some(s) => s should equal ("ConductorDeprecatedSuite.scala:" + (baseLineNumber + 90))
case None => fail("Didn't produce a file name and line number string: ", caught)
}
caught.getMessage should be ("A Conductor starts at beat zero, so you can't wait for beat zero.")
val caught2 =
intercept[NotAllowedException] {
conductor.waitForBeat(-1)
}
caught2.getMessage should be ("A Conductor starts at beat zero, so you can only wait for a beat greater than zero.")
caught2.failedCodeFileNameAndLineNumberString match {
case Some(s) => s should equal ("ConductorDeprecatedSuite.scala:" + (baseLineNumber + 99))
case None => fail("Didn't produce a file name and line number string: ", caught)
}
}
test("If a non-positive number is passed to conduct for clockPeriod, it will throw NotAllowedException") {
val conductor = new Conductor
val caught =
intercept[NotAllowedException] {
conductor.conduct(0, 100)
}
caught.getMessage should be ("The clockPeriod passed to conduct must be greater than zero. Value passed was: 0.")
caught.failedCodeFileNameAndLineNumberString match {
case Some(s) => s should equal ("ConductorDeprecatedSuite.scala:" + (baseLineNumber + 112))
case None => fail("Didn't produce a file name and line number string: ", caught)
}
val caught2 =
intercept[NotAllowedException] {
conductor.conduct(-1, 100)
}
caught2.failedCodeFileNameAndLineNumberString match {
case Some(s) => s should equal ("ConductorDeprecatedSuite.scala:" + (baseLineNumber + 121))
case None => fail("Didn't produce a file name and line number string: ", caught)
}
caught2.getMessage should be ("The clockPeriod passed to conduct must be greater than zero. Value passed was: -1.")
}
test("If a non-positive number is passed to conduct for runLimit, it will throw NotAllowedException") {
val conductor = new Conductor
val caught =
intercept[NotAllowedException] {
conductor.conduct(100, 0)
}
caught.getMessage should be ("The timeout passed to conduct must be greater than zero. Value passed was: 0.")
caught.failedCodeFileNameAndLineNumberString match {
case Some(s) => s should equal ("ConductorDeprecatedSuite.scala:" + (baseLineNumber + 134))
case None => fail("Didn't produce a file name and line number string: ", caught)
}
val caught2 =
intercept[NotAllowedException] {
conductor.conduct(100, -1)
}
caught2.failedCodeFileNameAndLineNumberString match {
case Some(s) => s should equal ("ConductorDeprecatedSuite.scala:" + (baseLineNumber + 143))
case None => fail("Didn't produce a file name and line number string: ", caught)
}
caught2.getMessage should be ("The timeout passed to conduct must be greater than zero. Value passed was: -1.")
}
test("withConductorFrozen executes the passed function once") {
val conductor = new Conductor
var functionExecutionCount = 0
conductor.withConductorFrozen { // Function will be executed by the calling thread
functionExecutionCount += 1
}
functionExecutionCount should be (1)
}
test("first exception thrown is reported") {
val e = new RuntimeException("howdy")
class MySuite extends FunSuite {
test("this will fail") {
val conductor = new Conductor
import conductor._
thread {
waitForBeat(1)
}
thread {
throw e
()
}
conductor.conduct()
}
}
val a = new MySuite
val rep = new EventRecordingReporter
a.run(None, Args(rep))
val tf = rep.testFailedEventsReceived
tf.size should be === 1
tf.head.throwable should be ('defined)
tf.head.throwable.get should be theSameInstanceAs e
}
test("whenFinished can only be called by thread that created Conductor.") {
val conductor = new Conductor
import conductor._
thread {
intercept[NotAllowedException] {
whenFinished { 1 should be (1) }
}.getMessage should be ("whenFinished can only be called by the thread that created Conductor.")
}
whenFinished { 1 should be (1) }
}
test("isConductorFrozen returns true if the conductor is frozen, false otherwise") {
val conductor = new Conductor
import conductor._
conductor.isConductorFrozen should be (false)
withConductorFrozen {
conductor.isConductorFrozen should be (true)
}
}
test("the beat method returns the correct value") {
val conductor = new Conductor
import conductor._
beat should equal (0)
thread {
beat should equal (0)
waitForBeat(1)
beat should equal (1)
waitForBeat(2)
beat should equal (2)
}
whenFinished {
beat should equal (2)
}
}
test("if I wait for a beat that's lower than the current beat, I just keep going") {
val conductor = new Conductor
import conductor._
beat should equal (0)
thread {
beat should equal (0)
waitForBeat(1)
beat should equal (1)
waitForBeat(1) // This should also work
beat should equal (1)
waitForBeat(2)
beat should equal (2)
waitForBeat(1) // This should also work
beat should equal (2)
}
whenFinished {
beat should equal (2)
}
}
class Forevermore {
def waitForever() {
synchronized {
wait()
}
}
}
test("deadlock is detected") {
val conductor = new Conductor
import conductor._
val monitor = new Forevermore
thread {
monitor.waitForever()
}
thread {
monitor.waitForever()
}
val caught =
intercept[RuntimeException] {
conduct()
}
caught.getMessage should be ("Test aborted because of suspected deadlock. No progress has been made (the beat did not advance) for 50 clock periods (500 ms).")
}
test("other threads are killed when one thread throws an exception") {
val conductor = new Conductor
import conductor._
val monitor = new Forevermore
val threadWasKilled = new AtomicBoolean()
thread {
try {
monitor.waitForever()
}
catch {
case t: ThreadDeath =>
threadWasKilled.set(true)
throw t
}
}
thread {
waitForBeat(1)
fail()
()
}
intercept[RuntimeException] {
conduct()
}
threadWasKilled.get should be (true)
}
test("runaway threads will cause a test to be timed out") {
val conductor = new Conductor
import conductor._
class Counter {
@volatile var count = 0
}
val counter = new Counter
thread {
while (true)
counter.count += 1
}
thread {
while (true)
counter.count -= 1
}
val caught =
intercept[RuntimeException] {
conduct(10, 1)
}
caught.getMessage should be ("Test timed out because threads existed that were runnable while no progress was made (the beat did not advance) for 1 seconds.")
}
test("ConductorFixture is a stackable trait that delegates test function execution to withFixture(NoArgTest)") {
var calledSuperWithFixtureNoArgTest = false
class MySpec extends fixture.FunSuite with ConductorFixture {
override def withFixture(test: NoArgTest): Outcome = {
calledSuperWithFixtureNoArgTest = true
super.withFixture(test)
}
test("one") { c => }
}
val a = new MySpec
a.run(None, Args(SilentReporter))
calledSuperWithFixtureNoArgTest should be (true)
}
test("ConductorMethods is a stackable trait that delegates test function execution to withFixture(NoArgTest)") {
var calledSuperWithFixtureNoArgTest = false
trait SuperTrait extends SuiteMixin { this: Suite =>
abstract override def withFixture(test: NoArgTest): Outcome = {
calledSuperWithFixtureNoArgTest = true
super.withFixture(test)
}
}
class MySpec extends FunSuite with SuperTrait with ConductorMethods {
test("one") {}
}
val a = new MySpec
a.run(None, Args(SilentReporter))
calledSuperWithFixtureNoArgTest should be (true)
}
}
| svn2github/scalatest | src/test/scala/org/scalatest/concurrent/ConductorDeprecatedSuite.scala | Scala | apache-2.0 | 13,389 |
/*
* Copyright (c) 2015-2017 EpiData, Inc.
*/
package service
import models.User
import play.api.{ Logger, Application }
import securesocial.core._
import securesocial.core.providers.Token
/** A user service in Scala using a Cassandra backend. */
class CassandraUserService(application: Application) extends UserServicePlugin(application) {
val logger = Logger("application.controllers.CassandraUserService")
def find(id: IdentityId): Option[Identity] = User.find(id)
def findByEmailAndProvider(email: String, providerId: String): Option[Identity] = {
// No UsernamePassword provider in use.
None
}
def save(user: Identity): Identity = {
// Do not allow creation of new user accounts. Accounts are created
// manually during the restricted invite period.
if (find(user.identityId).isEmpty) {
throw new AuthenticationException
}
val socialUser = SocialUser(
user.identityId,
user.firstName,
user.lastName,
user.fullName,
user.email,
user.avatarUrl,
user.authMethod,
user.oAuth1Info,
user.oAuth2Info,
user.passwordInfo
)
User.save(socialUser)
socialUser
}
def link(current: Identity, to: Identity) {
// No UsernamePassword provider in use.
}
def save(token: Token) {
// No UsernamePassword provider in use.
}
def findToken(token: String): Option[Token] = {
// No UsernamePassword provider in use.
None
}
def deleteToken(uuid: String) {
// No UsernamePassword provider in use.
}
def deleteTokens() {
// No UsernamePassword provider in use.
}
def deleteExpiredTokens() {
// No UsernamePassword provider in use.
}
}
| epidataio/epidata-community | play/app/service/CassandraUserService.scala | Scala | apache-2.0 | 1,696 |
/*
* bytefrog: a tracing framework for the JVM. For more information
* see http://code-pulse.com/bytefrog
*
* Copyright (C) 2014 Applied Visions - http://securedecisions.avi.com
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.secdec.bytefrog.fileapi.data
import net.liftweb.json._
import Serialization.{ read, write }
case class TraceMarker(key: String, value: String, timestamp: Int)
object TraceMarkerJson {
implicit val jsonFormats = DefaultFormats
private case class TraceMarkers(markers: List[TraceMarker])
def serialize(markers: List[TraceMarker]): String = {
val obj = TraceMarkers(markers)
write(obj)
}
def deserialize(input: String): List[TraceMarker] = {
try {
val obj = read[TraceMarkers](input)
obj.markers
} catch {
//possible json parsing exceptions
case e: Exception => Nil
}
}
} | secdec/bytefrog-clients | file-api/src/main/scala/com/secdec/bytefrog/fileapi/data/TraceMarker.scala | Scala | apache-2.0 | 1,358 |
package org.jetbrains.plugins.scala
package codeInspection.imports
import com.intellij.openapi.project.Project
import org.jetbrains.plugins.scala.codeInspection.AbstractFixOnPsiElement
import org.jetbrains.plugins.scala.extensions._
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.imports.ScImportExpr
import org.jetbrains.plugins.scala.lang.psi.impl.ScalaPsiElementFactory.createImportExprFromText
/**
* @author Ksenia.Sautina
* @since 4/11/12
*/
class RemoveBracesForSingleImportQuickFix(importExpr: ScImportExpr)
extends AbstractFixOnPsiElement(ScalaBundle.message("remove.braces.from.import"), importExpr) {
def doApplyFix(project: Project) {
val iExpr = getElement
if (!iExpr.isValid) return
val name = if (iExpr.isSingleWildcard) "_" else iExpr.importedNames.headOption.getOrElse("")
val text = s"${iExpr.qualifier.getText}.$name"
inWriteAction {
iExpr.replace(createImportExprFromText(text)(iExpr.getManager))
}
}
}
| ilinum/intellij-scala | src/org/jetbrains/plugins/scala/codeInspection/imports/RemoveBracesForSingleImportQuickFix.scala | Scala | apache-2.0 | 982 |
/* NSC -- new Scala compiler
* Copyright 2007-2013 LAMP/EPFL
* @author Chris James
*/
package scala.tools.nsc
package doc
package model
import scala.collection._
/** A fragment of code. */
abstract class TreeEntity {
/** The human-readable representation of this abstract syntax tree. */
def expression: String
/** Maps which parts of this syntax tree's name reference entities. The map is indexed by the position of the first
* character that reference some entity, and contains the entity and the position of the last referenced
* character. The referenced character ranges do not to overlap or nest. The map is sorted by position. */
def refEntity: SortedMap[Int, (Entity, Int)]
/** The human-readable representation of this abstract syntax tree. */
override def toString = expression
}
| felixmulder/scala | src/scaladoc/scala/tools/nsc/doc/model/TreeEntity.scala | Scala | bsd-3-clause | 822 |
package domala.jdbc.entity
import domala.Column
case class EntityPropertyDescParam[ENTITY, BASIC, HOLDER] (
entityClass: Class[ENTITY],
entityPropertyClass: Class[_],
typeDesc: SingleTypeDesc[BASIC, HOLDER],
name: String,
column: Column,
namingType: NamingType,
)
| bakenezumi/domala | core/src/main/scala/domala/jdbc/entity/EntityPropertyDescParam.scala | Scala | apache-2.0 | 278 |
package com.hungrylearner.pso.swarm
import akka.actor.ActorRef
import com.hungrylearner.pso.particle.EvaluatedPosition
import com.hungrylearner.pso.swarm.Report._
object RegionalSupervisor {
import CompletedType._
def makeProgressCounts: collection.immutable.Map[CompletedType, collection.mutable.Map[Int,Int]] =
collection.immutable.Map[CompletedType, collection.mutable.Map[Int,Int]](
SwarmOneIterationCompleted -> collection.mutable.Map[Int,Int](),
SwarmAroundCompleted -> collection.mutable.Map[Int,Int](),
SwarmingCompleted -> collection.mutable.Map[Int,Int]()
)
/**
* Keep track of progress counts for each CommandType for each reported iteration.
*
* @tparam F Fitness
* @tparam P Particle backing store
*/
class ProgressCounters[F,P] {
// Map of CompletedType to (map of Iteration to descendant progress counts)
val counters: collection.immutable.Map[CompletedType, collection.mutable.Map[Int,Int]] = makeProgressCounts
/**
* return the current progress count for the CompletedType/iteration
* @param progressReport The progress report from one of our children.
* @return The current count
*/
def progressCount( progressReport: ProgressReport[F,P]): Int = {
val completedTypeCounters = counters.get( progressReport.completedType).get
completedTypeCounters.getOrElse( progressReport.iteration, 0)
}
/**
* Increment the progress count for the CompletedType/iteration. Return the new count.
* @param progressReport The progress report from one of our children.
* @return The incremented count
*/
def incrementProgressCount( progressReport: ProgressReport[F,P]): Int = {
val completedTypeCounters = counters.get( progressReport.completedType).get
val completedCount = completedTypeCounters.getOrElse( progressReport.iteration, 0) + 1
completedTypeCounters += (progressReport.iteration -> completedCount)
completedCount
}
}
}
/**
* The RegionalSupervisor manages reports going up the swarm hierarchy and manages
* how/when knowledge is shared among its children.
*
*/
trait RegionalSupervisor[F,P] extends Supervisor[F,P] {
this: RegionalId[F,P] with RegionalTerminateCriteria[F,P] =>
import RegionalSupervisor._
import TerminateCriteriaStatus._
import CompletedType._
// Keep track of progress counts for each CommandType for each reported iteration.
// The count is the count of descendants. We'll receive progress reports form children, but that doesn't mean
// the child's children have completed that command/iteration.
//
protected val childProgressCounters = new ProgressCounters[F,P]
protected val descendantProgressCounters = new ProgressCounters[F,P]
/**
* We received a ProgressReport from a child. Update our best position, report to our parent, and update our
* children.
*/
override def onProgressReport( childReport: ProgressReport[F,P], originator: ActorRef): Unit = {
val regionalProgress = calculateRegionalProgress( childReport)
val evaluatedPosition = evaluatePosition( childReport)
updateBestPosition( evaluatedPosition)
val terminateCriteriaStatus = terminateCriteriaMet( childReport, regionalProgress)
if( terminateCriteriaStatus == TerminateCriteriaMetNow) {
// We're not going to stop or child actors now. If some of the children are not completed,
// send a CancelSwarming. Our parent needs to deal with stopping the whole actor tree later.
//
if( childrenHaveNotCompleted( childReport.completedType, regionalProgress))
sendToChildren( CancelSwarming, originator)
}
// Report the position our child gave us. evaluatedPosition specifies whether it is our best or not.
reportingStrategy.reportForRegion( childReport, childIndex, evaluatedPosition, regionalProgress, terminateCriteriaStatus)
// If terminate criteria is not met, tell the children (except for the originator child who sent the position).
// The implementer of tellChildren decides when and whether the children should be told. This decision can be based on
// evaluatedPosition.isBest or other criteria.
//
if( terminateCriteriaStatus.isNotMet)
tellChildren( evaluatedPosition, childReport.iteration, regionalProgress, originator)
}
protected def childrenHaveNotCompleted( completedType: CompletedType, regionalProgress: Progress) =
! (completedType == SwarmingCompleted && regionalProgress.completed)
/**
* A child has terminated.
* @param child The child that terminated.
*/
override def onTerminated(child: ActorRef) = {
// If terminating children, let our parent know when they have all terminated.
Logger.info( s"RegionalSwarmActor Terminated( child='${child.path.name}')")
}
def updateBestPosition( evaluatedPosition: EvaluatedPosition[F,P]) =
if( evaluatedPosition.isBest || bestPosition == null)
bestPosition = evaluatedPosition.position
/**
* Decide whether the progress report's position is better than our bestPosition.
*
* @param progressReport Progress report from child
* @return Our evaluation of the reported position compared to our bestPosition
*/
def evaluatePosition( progressReport: ProgressReport[F,P]): EvaluatedPosition[F,P] = {
val isRegionalBest = isBetterPosition( progressReport.evaluatedPosition)
EvaluatedPosition( progressReport.evaluatedPosition.position, isRegionalBest)
}
protected def calculateRegionalProgress( progressReport: ProgressReport[F,P]): Progress = {
val descendantCompletedCount = descendantProgressCounters.incrementProgressCount( progressReport)
val descendantProgress = ProgressFraction( descendantCompletedCount, config.descendantSwarmCount)
val childCompletedCount = if( progressReport.progress.completed)
childProgressCounters.incrementProgressCount( progressReport)
else
childProgressCounters.progressCount( progressReport)
val childProgress = ProgressFraction( childCompletedCount, config.childCount)
val completed = childCompletedCount >= config.childCount
if( descendantCompletedCount > config.descendantSwarmCount)
Logger.error( s"RegionalSupervisor.onProgressReport: childIndex:${progressReport.childIndex}, type:${progressReport.completedType}, iteration:${progressReport.iteration} descendantCompletedCount:${descendantCompletedCount} is greater than descendantSwarmCount:${config.descendantSwarmCount} for this iteration")
Progress( childProgress, descendantProgress, completed)
}
protected def isBetterPosition( evaluatedPosition: EvaluatedPosition[F,P]): Boolean = {
bestPosition == null || evaluatedPosition.position < bestPosition
}
}
| flintobrien/akka-multiswarm | src/main/scala/com/hungrylearner/pso/swarm/RegionalSupervisor.scala | Scala | apache-2.0 | 6,748 |
package slaq.test
import scala.language.reflectiveCalls
import org.junit.Test
import org.junit.Assert._
import slaq.ql._
import slaq.ql.TypeMapper._
import slaq.ql.driver.H2Driver.Implicit.{given, *}
import slaq.ql.Table
object TablesInObjectTest {
def main(args: Array[String]): Unit = { new TablesInObjectTest().test1() }
object Categories extends Table[Int]("categories") {
def id = column[Int]("id")
def * = id
}
/* This needs to be a val instead of an object with a singleton type
* because scalac assumes that the object is a singleton and pulls the
* wrong "this" reference into the closure -- where "category" is
* referenced -- when it is used in a clone()d Posts instance.
*/
object Posts extends Table[Int]("posts") {
def category = column[Int]("category")
def * = category
def categoryJoin = Categories.filter(_.id =~ category)
}
}
class TablesInObjectTest {
import TablesInObjectTest._
@Test def test1() = {
def categoryJoin(p: Posts.type) = Categories.filter(_.id =~ p.category)
val q1 = for {
p <- Posts
c <- categoryJoin(p.asInstanceOf[Posts.type])
} yield p.category ~ c.id
//q1.dump("Local function")
val sel1 = q1.selectStatement
println("Local function: " + sel1)
val q2 = for {
p <- Posts
c <- p.categoryJoin
} yield p.category ~ c.id
//q1.dump("Method on table")
val sel2 = q2.selectStatement
println("Method on table: " + sel2)
assertEquals(sel1, sel2)
}
}
| godenji/slaq | src/test/scala/slaq/scalaquery/test/TablesInObjectTest.scala | Scala | bsd-2-clause | 1,513 |
/**
* Licensed to Big Data Genomics (BDG) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The BDG licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.bdgenomics.adam.cli
import org.apache.spark.rdd.RDD
import org.bdgenomics.adam.util.ADAMFunSuite
import org.bdgenomics.adam.rdd.ADAMContext._
import org.bdgenomics.formats.avro.AlignmentRecord
class ViewSuite extends ADAMFunSuite {
val inputSamPath = testFile("flag-values.sam")
var reads: Array[AlignmentRecord] = null
var readsCount = 0
sparkBefore("initialize 'reads' Array from flag-values.sam") {
val transform =
new Transform(
Args4j[TransformArgs](
Array(
inputSamPath,
"unused_output_path"
)
)
)
reads = transform.apply(sc.loadAlignments(inputSamPath)).collect()
readsCount = reads.size.toInt
}
def runView(matchAllBits: Int = -1,
mismatchAllBits: Int = -1,
matchSomeBits: Int = -1,
mismatchSomeBits: Int = -1)(expected: Int): Unit =
runView(
if (matchAllBits >= 0) Some(matchAllBits) else None,
if (mismatchAllBits >= 0) Some(mismatchAllBits) else None,
if (matchSomeBits >= 0) Some(matchSomeBits) else None,
if (mismatchSomeBits >= 0) Some(mismatchSomeBits) else None,
expected
)
def runView(matchAllBitsOpt: Option[Int],
mismatchAllBitsOpt: Option[Int],
matchSomeBitsOpt: Option[Int],
mismatchSomeBitsOpt: Option[Int],
expected: Int): Unit = {
val args: Array[String] =
(
matchAllBitsOpt.toList.flatMap("-f %d".format(_).split(" ")).toList ++
mismatchAllBitsOpt.toList.flatMap("-F %d".format(_).split(" ")).toList ++
matchSomeBitsOpt.toList.flatMap("-g %d".format(_).split(" ")).toList ++
mismatchSomeBitsOpt.toList.flatMap("-G %d".format(_).split(" ")).toList :+
"unused_input_path"
).toArray
assert(
new View(
Args4j[ViewArgs](
args
)
).applyFilters(sc.parallelize(reads))
.count() == expected
)
}
sparkTest("-f 0 -F 0 is a no-op") {
runView(0, 0)(readsCount)
}
sparkTest("no -f or -F args is a no-op") {
runView()(readsCount)
}
sparkTest("-f 4: only unmapped reads") {
/**
* Of the 4096 (2^12) possible values of the 12 flag-field bits:
*
* - half (2048) have the 0x4 (unmapped read) flag set, which we are filtering around in this test case.
* - only 1/8 of those (256) have the "reverse strand" (0x10), "secondary alignment" (0x100), and "supplementary
* alignment" (0x800) flags all unset (HTSJDK doesn't allow them to be set if 0x4 is set, because that wouldn't
* make sense).
* - half (128) have the "paired" flag (0x1) set and half (128) don't:
* 1. of the 128 that do, 3/4ths (96) of them have at least one of {"first in template" (0x40), "second in
* template" (0x80)} set, and are therefore valid.
* 2. of those that don't, 1/32nd (4) of them (those with none of {"proper pair" (0x2), "mate unmapped" (0x8),
* "mate reversed" (0x20), "first in template" (0x40), "last in template" (0x80)} set) are valid.
* - 96 and 4 from 1. and 2. above make for 100 total.
*/
runView(4)(100)
}
sparkTest("-F 4: only mapped reads") {
// 500 here is the rest of the 700-read input file that was not counted in the 200 above.
runView(mismatchAllBits = 4)(600)
}
/**
* - 1/4 (1024) have 0x4 set and 0x8 *not* set.
* - 1/8 of those (128) have none of {0x10, 0x100, 0x800}, which is necessary on unmapped reads
* 1. of 64 "paired" reads, 3/4ths (48) have 0x40 or 0x80
* 2. of 64 "unpaired" reads, 0x8 has already been excluded, but so must be {0x2, 0x20, 0x40, 0x80}, leaving only
* 1/16th the reads, or 4.
* - total: 52 (48 reads from 1., 4 from 2.).
*/
sparkTest("-f 4 -F 8: unmapped reads with mapped mates") {
runView(4, 8)(52)
}
// 48 reads is the complement of the last case (52) from among the 100 from the "unmapped" case.
sparkTest("-f 12: unmapped reads with unmapped mates") {
runView(12)(48)
}
/**
* - 2048 have "proper pair" set
* - 1/4 of these are no good because they don't have 0x40 or 0x80 set (one of which is required if 0x1 is set),
* so only 1536 remain.
* - 1/2 of those, or 768, have 0x4 set
* - only 1/8 (96) of these are good, because 0x10, 0x100, and 0x800 can't be set with 0x4
* - another 384 have 0x8 and not 0x4.
* - leaving out 1/4 (96) that have 0x800 and not 0x100 set, we have 288.
* - total: 384
* - 2048 possible reads don't have "proper pair" set.
* - none of them can have 0x2, 0x8, 0x20, 0x40, 0x80 set, so really only 1/32 of 2048, or 64, are possible.
* - 32 of those have 0x4 set.
* - none of {0x10, 0x100, 0x800} can be set, so only 1/8 of 32, or 4, remain.
*
* - 384 + 4 = 388
*/
sparkTest("-g 12: reads that are unmapped or whose mate is unmapped") {
runView(matchSomeBits = 12)(388)
}
// Complement of the last test case.
sparkTest("-F 12: mapped reads with mapped mates") {
runView(mismatchAllBits = 12)(312)
}
/**
* - 2048 have "proper pair" set
* - 3/4ths (1536) have 0x40 or 0x80 set.
* - 1/2 (768) have 0x4 set.
* - 1/8 (96) don't have {0x10, 0x100, 0x800}
* - 1/2 (768) don't have 0x4 set.
* - 3/4 (576) satisfy 0x800 => 0x100
* - 1/2 (288) have 0x20 set.
* - total: 96 + 288 = 384.
* - 2048 don't have "proper pair" set
* - 1/32nd (64) don't have {0x2, 0x8, 0x20, 0x40, 0x80}.
* - 1/2 (32) have 0x4 set.
* - 1/8 (4) don't have {0x10, 0x100, 0x800}
* - 384 + 4 = 388
*
* Or:
*
* - 2048 have 0x4 set.
* - 1/8th (256) have none of {0x10, 0x100, 0x800}
* - half (128) have 0x1.
* - 3/4ths (96) have 0x40 or 0x80.
* - half (128) don't have 0x1.
* - 1/32nd (4) don't have {0x2, 0x8, 0x20, 0x40, 0x80}
* - 2048 don't have 0x4 set.
* - 3/4ths (1536) satisfy 0x800 => 0x100
* - 1/2 (768) have 0x1.
* - 3/4ths (576) have 0x40 or 0x80.
* - 1/2 (288) have 0x20.
* - 1/2 (768) don't have 0x1.
* - none have 0x20, so none are valid.
* - 96 + 4 + 288 = 388.
*/
sparkTest("-g 36: unmapped reads or reads with mate on negative strand") {
runView(matchSomeBits = 36)(388)
}
sparkTest("-F 36: unmapped reads or reads with mate on negative strand") {
// Complement to the last test case (700 - 388 == 312)
runView(mismatchAllBits = 36)(312)
}
}
| tomwhite/adam | adam-cli/src/test/scala/org/bdgenomics/adam/cli/ViewSuite.scala | Scala | apache-2.0 | 7,446 |
/*
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.flaminem.flamy.parsing.hive
import com.flaminem.flamy.conf.FlamyContext
import com.flaminem.flamy.model.Variables
import com.flaminem.flamy.model.exceptions.FlamyException
import com.flaminem.flamy.utils.logging.Logging
import scala.util.control.NonFatal
/**
* Created by fpin on 11/19/16.
*/
trait Parser[T] extends Logging {
protected def unsafeParseQuery(query: String)(implicit context: FlamyContext): Seq[T]
def parseQuery(query: String)(implicit context: FlamyContext): Seq[T] = {
try {
unsafeParseQuery(query)
} catch {
case e: FlamyParsingException if e.query.isDefined => throw e
case NonFatal(e) => throw FlamyParsingException(query, e, verbose = true)
}
}
protected def ignoreQuery(query: String): Boolean = {
if(QueryUtils.isCommand(query)){
logger.debug("Ignore query: $query")
true
}
else {
false
}
}
@throws(classOf[FlamyException])
def parseText(text: String, vars: Variables, isView: Boolean)(implicit context: FlamyContext): Seq[T] = {
val cleanedQueries = QueryUtils.cleanAndSplitQuery(text)
if(isView && cleanedQueries.size > 1){
throw new FlamyParsingException("Only one query is allowed inside a view definition.")
}
cleanedQueries.filterNot{ignoreQuery}.flatMap{
case query =>
try {
val replacedQuery = vars.replaceInText(query)
parseQuery(replacedQuery)
} catch {
case e: FlamyParsingException if e.query.isDefined => throw e
case NonFatal(e) => throw FlamyParsingException(query, e, verbose = true)
}
}
}
}
| flaminem/flamy | src/main/scala/com/flaminem/flamy/parsing/hive/Parser.scala | Scala | apache-2.0 | 2,202 |
package ingraph.ire.nodes.unary.aggregation
import ingraph.ire.datatypes.Tuple
import ingraph.ire.math.GenericMath
class StatefulAverage(val sumKey: Int) extends StatefulAggregate {
val summer = new StatefulSum(sumKey)
val counter = new StatefulCount()
override def value(): Any = try {
GenericMath.divide(summer.value(), counter.value())
} catch {
case _: ArithmeticException => 0
}
override def maintainPositive(values: Iterable[Tuple]): Unit = {
summer.maintainPositive(values)
counter.maintainPositive(values)
}
override def maintainNegative(values: Iterable[Tuple]): Unit = {
summer.maintainNegative(values)
counter.maintainNegative(values)
}
}
| FTSRG/ingraph | ire/src/main/scala/ingraph/ire/nodes/unary/aggregation/StatefulAverage.scala | Scala | epl-1.0 | 697 |
/* PlayListTracker.scala
*
* Jim McBeath, June 10, 2008
*/
package net.jimmc.mimprint
import net.jimmc.util.ActorPublisher
import net.jimmc.util.AsyncUi
import net.jimmc.util.FileUtilS
import net.jimmc.util.PFCatch
import net.jimmc.util.SomeOrNone
import net.jimmc.util.StdLogger
import java.io.File;
import java.io.PrintWriter;
import scala.actors.Actor
import scala.actors.Actor.loop
import scala.collection.mutable.Map
/** A playlist of images. */
class PlayListTracker(val ui:AsyncUi) extends Actor
with ActorPublisher[PlayListMessage]
with StdLogger{
//Our current playlist
private var playList:PlayList = PlayList(ui)
private var currentIndex:Int = -1
private var isModified = false
private var lastLoadFileName:String = _
var askSaveOnChanges = false
/* We always start our actor right away so that clients can send
* us subscribe requests as soon as we are created.
*/
this.start()
def act() {
loop {
react (PFCatch(handleSubscribe orElse handleOther,
"PlayListTracker",ui))
}
}
private val handleOther : PartialFunction[Any,Unit] = {
case m:PlayListRequestInit =>
m.sub ! PlayListInit(this,playList)
case m:PlayListRequestAdd =>
if (listMatches(m.list))
addItem(m.item)
case m:PlayListRequestInsert =>
if (listMatches(m.list))
insertItem(m.index,m.item)
case m:PlayListRequestRemove =>
if (listMatches(m.list))
removeItem(m.index)
case m:PlayListRequestChange =>
if (listMatches(m.list))
changeItem(m.index,m.item)
case m:PlayListRequestUpdate =>
if (listMatches(m.list))
updateItem(m.index)
case m:PlayListRequestSetItem =>
if (listMatches(m.list))
setItem(m.index,m.item)
case m:PlayListRequestRotate =>
if (listMatches(m.list))
rotateItem(m.index, m.rot)
case m:PlayListRequestSelect =>
if (listMatches(m.list))
selectItem(m.index)
case m:PlayListRequestUp =>
if (listMatches(m.list))
selectUp()
case m:PlayListRequestDown =>
if (listMatches(m.list))
selectDown()
case m:PlayListRequestLeft =>
if (listMatches(m.list))
selectLeft()
case m:PlayListRequestRight =>
if (listMatches(m.list))
selectRight()
case _ => println("Unrecognized message to PlayList")
}
private def listMatches(list:PlayList):Boolean = {
if (list!=playList) {
println("Unknown or stale PlayList in tracker request")
//Could happen, but should be rare, so we basically ignore it
}
(list==playList) //OK to proceed if we have the right list
}
/** Add an item to our current PlayList to produce a new current PlayList,
* publish notices about the change.
*/
private def addItem(item:PlayItem) {
val oldPlayList = playList
val newPlayList = playList.addItem(item)
val newIndex = playList.size - 1
playList = newPlayList
isModified = true
publish(PlayListAddItem(this,oldPlayList,newPlayList,newIndex))
}
private def insertItem(itemIndex:Int, item:PlayItem) {
val oldPlayList = playList
val newPlayList = playList.insertItem(itemIndex, item)
playList = newPlayList
isModified = true
publish(PlayListAddItem(this,oldPlayList,newPlayList,itemIndex))
}
private def removeItem(index:Int) {
logger.debug("enter PlayListTracker.removeItem")
val oldPlayList = playList
val newPlayList = playList.removeItem(index)
playList = newPlayList
isModified = true
publish(PlayListRemoveItem(this,oldPlayList,newPlayList,index))
logger.debug("leave PlayListTracker.removeItem")
}
private def changeItem(itemIndex:Int, item:PlayItem) {
val oldPlayList = playList
val newPlayList = playList.replaceItem(itemIndex,item).
asInstanceOf[PlayList]
playList = newPlayList
isModified = true
publish(PlayListChangeItem(this,oldPlayList,newPlayList,itemIndex))
}
private def updateItem(itemIndex:Int) {
publish(PlayListUpdateItem(this,playList,itemIndex))
}
private def setItem(itemIndex:Int, item:PlayItem) {
val oldPlayList = playList
val biggerPlayList = playList.ensureSize(itemIndex+1)
val newPlayList = biggerPlayList.replaceItem(itemIndex,item).
asInstanceOf[PlayList]
playList = newPlayList
isModified = true
publish(PlayListChangeItem(this,oldPlayList,newPlayList,itemIndex))
}
private def rotateItem(itemIndex:Int, rot:Int) {
val oldPlayList = playList
val newPlayList = playList.rotateItem(itemIndex, rot).
asInstanceOf[PlayList]
playList = newPlayList
isModified = true
publish(PlayListChangeItem(this,oldPlayList,newPlayList,itemIndex))
}
private def selectItem(itemIndex:Int) {
//no change to the playlist, we just publish a message
currentIndex = itemIndex
val pre = PlayListPreSelectItem(this,playList,itemIndex)
logger.debug("PlayListTracker.selectItem publishing PreSelect")
//First we publish a pre-select event so that everyone knows
//we are about to select. This should be handled quickly.
publish(pre)
//register at least one selector
registerSelector(pre)
logger.debug("PlayListTracker.selectItem publishing Select")
//We publish the select event, which may take a while to process
//(such as by the image viewer that has to load the image)
publish(PlayListSelectItem(this,playList,itemIndex))
//unregister; if nobody else registered, this will cause
//the PostSelect to be sent; if sombody else registered,
//the PostSelect will be sent only after they unregister.
unregisterSelector(pre)
logger.debug("PlayListTracker.selectItem done")
}
lazy val selectorMap = Map[PlayListPreSelectItem,Int]()
//If a subscriber will take a long time to process the SelectItem message,
//it can call this method when it gets a PreSelectItem, so that the
//PostSelectItem will not be sent out until after it has called
//unregisterSelector.
def registerSelector(ev:PlayListPreSelectItem) {
selectorMap.synchronized {
val n = selectorMap.getOrElse(ev,0)
logger.debug("PlayListTracker.registerSelector("+ev+")="+n)
selectorMap.put(ev,n+1)
}
}
def unregisterSelector(ev:PlayListPreSelectItem) {
selectorMap.synchronized {
val n = selectorMap.getOrElse(ev,0) - 1
logger.debug("PlayListTracker.unregisterSelector("+ev+")="+n)
if (n>0) {
selectorMap.put(ev,n)
} else {
logger.debug("PlayListTracker.unregisterSelector publishing PostSelect")
//Last we publish post-select event so that everyone knows
//that the selection is done. This should be handled quickly.
selectorMap.put(ev,n - 1)
selectorMap.remove(ev)
if (n==0)
publish(PlayListPostSelectItem(this,ev.list,ev.index))
}
}
}
private def selectUp() {
if (currentIndex>0)
selectItem(currentIndex - 1)
else {
val prompt = "At beginning of "+playList.baseDir+";\\n"
val newDir:File = FileUtilS.getPreviousDirectory(playList.baseDir)
if (newDir==null) {
val eMsg = prompt + "No previous directory"
ui.invokeUi(ui.errorDialog(eMsg))
} else {
val msg = prompt + "move to previous directory "+newDir+"?"
val leftMsg = PlayListRequestLeft(playList)
ui.invokeUi {
if (ui.confirmDialog(msg))
this ! leftMsg
}
}
}
}
private def selectDown() {
if (currentIndex< playList.size - 1)
selectItem(currentIndex + 1)
else {
val prompt = "At end of "+playList.baseDir+";\\n"
val newDir:File = FileUtilS.getNextDirectory(playList.baseDir)
if (newDir==null) {
val eMsg = prompt + "No next directory"
ui.invokeUi(ui.errorDialog(eMsg))
} else {
val msg = prompt + "move to next directory "+newDir+"?"
val rightMsg = PlayListRequestRight(playList)
ui.invokeUi {
if (ui.confirmDialog(msg))
this ! rightMsg
}
}
}
}
private def selectLeft() {
if (!saveChangesAndContinue())
return //canceled
val newDir:File = FileUtilS.getPreviousDirectory(playList.baseDir)
if (newDir==null) {
val eMsg = "No previous directory"
ui.invokeUi(ui.errorDialog(eMsg))
} else {
load(newDir.getPath,true)
}
}
private def selectRight() {
if (!saveChangesAndContinue())
return //canceled
val newDir:File = FileUtilS.getNextDirectory(playList.baseDir)
if (newDir==null) {
val eMsg = "No next directory"
ui.invokeUi(ui.errorDialog(eMsg))
} else {
load(newDir.getPath,false)
}
}
///So we can see what file we are dealing with
def fileName = SomeOrNone(lastLoadFileName)
///Save our playlist to a file.
def save(filename:String):Boolean = {
val b =playList.save(filename)
if (b) isModified = false
b
}
def save(absolute:Boolean):Boolean = {
save(lastLoadFileName,absolute)
}
def saveAs(defaultName:String, absolute:Boolean):Boolean = {
val prompt = ui.getResourceString("dialog.PlayList.SaveAs.prompt")
ui.fileSaveDialog(prompt,defaultName) match {
case None => false
case Some(f) => save(f,absolute)
}
}
def save(filename:String,absolute:Boolean):Boolean = {
val b = playList.save(filename,absolute)
if (b) isModified = false
b
}
def save(f:File):Boolean = save(f,false)
def save(f:File, absolute:Boolean):Boolean = {
val b = playList.save(f,absolute)
if (b) isModified = false
b
}
def save(out:PrintWriter, baseDir:File):Boolean = {
val b = playList.save(out, baseDir)
if (b) isModified = false
b
}
def load(fileName:String):Unit = {
if (!saveChangesAndContinue())
return //canceled
load(fileName,false)
}
def load(fileName:String, selectLast:Boolean) {
if (!saveChangesAndContinue())
return //canceled
val oldPlayList = playList
val newPlayList = PlayList.load(ui,fileName).asInstanceOf[PlayList]
lastLoadFileName =
if ((new File(fileName)).isDirectory) {
if (fileName.endsWith(File.separator))
fileName+"index.mpr" //don't double up the separator
else
fileName+File.separator+"index.mpr"
} else
fileName
playList = newPlayList
isModified = false
publish(PlayListChangeList(this,oldPlayList,newPlayList))
val idx = if (selectLast) newPlayList.size - 1 else 0
//Auto select the first/last item in the list if it is an image file
if (newPlayList.size>0 &&
FileInfo.isImageFileName(newPlayList.getItem(idx).fileName))
selectItem(idx)
}
//If our playlist has changed AND the askSaveOnChanges flag is true,
//we ask the user if he wants to save the playlist.
//Return true if the user saved successfully or declined to save;
//return false if the user canceled.
def saveChangesAndContinue():Boolean = {
if (!askSaveOnChanges)
return true //ignore changes at this point
if (!isModified)
return true //no changes, no need to save
val prefix = "dialog.PlayList.SaveChanges."
val prompt = ui.getResourceString(prefix+"prompt")
//TODO - put default filename into prompt
val title = ui.getResourceString(prefix+"title")
//val labels = ui.getResourceString(prefix+"buttons").split("\\\\|")
//Labels are: Save, Save As, Discard, Cancel
val buttonKeys = ui.getResourceString(prefix+"buttonKeys").split("\\\\|")
ui.multiButtonDialogR(prompt, title, prefix, buttonKeys) match {
case 0 => //Save to default location
if (lastLoadFileName!=null)
save(lastLoadFileName)
else
saveAs(null,false)
case 1 => //Save As
saveAs(lastLoadFileName,false)
case 2 => //Ignore changes
isModified = false //we don't care about the changes
true
case 3 => //Cancel button
false //caller should not continue
case -1 => //window close button, treat as cancel
false
}
}
}
| jimmc/mimprint | src/net/jimmc/mimprint/PlayListTracker.scala | Scala | gpl-2.0 | 13,712 |
package ildl
package benchmark
package hamming
package step1
import collection.mutable.Queue
//
// You can read about this benchmark on the following wiki page:
// https://github.com/miniboxing/ildl-plugin/wiki/Sample-%7E-Efficient-Collections
//
/**
* A transformation object that transforms the Queue[BigInt] to a [[FunnyQueue]].
* @see the comment in [[ildl.benchmark.hamming.HammingNumbers]] for more information
*/
object QueueOfLongAsFunnyQueue extends TransformationDescription {
// coercions:
def toRepr(in: Queue[BigInt]): FunnyQueue @high =
throw new Exception("We shouldn't need this!")
def toHigh(q: FunnyQueue @high): Queue[BigInt] =
throw new Exception("We shouldn't need this!")
// constructor:
def ctor_Queue(): FunnyQueue @high =
new FunnyQueue()
// extension methods and implicits:
def implicit_QueueWithEnqueue1_enqueue1(q: FunnyQueue @high)(bi: BigInt): Unit = {
q.enqueue(bi)
}
def extension_enqueue(q: FunnyQueue @high, bis: BigInt*): Unit = {
// we don't support more than one element :)
assert(bis.size == 1)
val bi = bis.apply(0)
assert(bi.isValidLong)
q.enqueue(bi.longValue())
}
def extension_dequeue(q: FunnyQueue @high): BigInt = q.dequeue()
def extension_head(q: FunnyQueue @high): BigInt = q.head()
} | miniboxing/ildl-plugin | tests/benchmarks/src/ildl/benchmark/hamming/step1/DescrObject.scala | Scala | bsd-3-clause | 1,306 |
package com.adendamedia.cornucopia.graph
import java.util
import java.util.concurrent.atomic.AtomicInteger
import akka.NotUsed
import akka.actor._
import akka.stream.{FlowShape, ThrottleMode}
import com.adendamedia.cornucopia.redis.Connection.{CodecType, Salad, getConnection, newSaladAPI}
import org.slf4j.LoggerFactory
import com.adendamedia.cornucopia.redis._
import com.adendamedia.salad.SaladClusterAPI
import com.adendamedia.cornucopia.Config.ReshardTableConfig._
import com.adendamedia.cornucopia.redis.ReshardTable._
import com.lambdaworks.redis.{RedisException, RedisURI}
import com.lambdaworks.redis.cluster.models.partitions.RedisClusterNode
import com.lambdaworks.redis.models.role.RedisInstance.Role
import collection.JavaConverters._
import scala.collection.mutable
import scala.language.implicitConversions
import scala.concurrent.{ExecutionContext, Future}
import akka.stream.scaladsl.{Flow, GraphDSL, Merge, MergePreferred, Partition, Sink}
import com.adendamedia.cornucopia.Config
import com.adendamedia.cornucopia.actors.{RedisCommandRouter, SharedActorSystem}
trait CornucopiaGraph {
import scala.concurrent.ExecutionContext.Implicits.global
import com.adendamedia.cornucopia.CornucopiaException._
protected val logger = LoggerFactory.getLogger(this.getClass)
protected def getNewSaladApi: Salad = newSaladAPI
def partitionEvents(key: String) = key.trim.toLowerCase match {
case ADD_MASTER.key => ADD_MASTER.ordinal
case ADD_SLAVE.key => ADD_SLAVE.ordinal
case REMOVE_NODE.key => REMOVE_NODE.ordinal
case RESHARD.key => RESHARD.ordinal
case _ => UNSUPPORTED.ordinal
}
def partitionNodeRemoval(key: String) = key.trim.toLowerCase match {
case REMOVE_MASTER.key => REMOVE_MASTER.ordinal
case REMOVE_SLAVE.key => REMOVE_SLAVE.ordinal
case UNSUPPORTED.key => UNSUPPORTED.ordinal
}
/**
* Stream definitions for the graph.
*/
// Extract a tuple of the key and value from a Kafka record.
case class KeyValue(key: String, value: String, senderRef: Option[ActorRef] = None, newMasterURI: Option[RedisURI] = None)
// Allows to create Redis URI from the following forms:
// host OR host:port
// e.g., redis://127.0.0.1 OR redis://127.0.0.1:7006
protected def createRedisUri(uri: String): RedisURI = {
val parts = uri.split(":")
if (parts.size == 3) {
val host = parts(1).foldLeft("")((acc, ch) => if (ch != '/') acc + ch else acc)
RedisURI.create(host, parts(2).toInt)
}
else RedisURI.create(uri)
}
protected def streamAddMaster(implicit executionContext: ExecutionContext): Flow[KeyValue, KeyValue, NotUsed]
// Add a slave node to the cluster, replicating the master that has the fewest slaves.
protected def streamAddSlave(implicit executionContext: ExecutionContext): Flow[KeyValue, KeyValue, NotUsed]
// Emit a key-value pair indicating the node type and URI.
protected def streamRemoveNode(implicit executionContext: ExecutionContext) = Flow[KeyValue]
.map(_.value)
.map(createRedisUri)
.map(getNewSaladApi.canonicalizeURI)
.mapAsync(1)(emitNodeType)
// Remove a slave node from the cluster.
protected def streamRemoveSlave(implicit executionContext: ExecutionContext) = Flow[KeyValue]
.map(_.value)
.groupedWithin(100, Config.Cornucopia.batchPeriod)
.mapAsync(1)(forgetNodes)
.mapAsync(1)(waitForTopologyRefresh[Unit])
.mapAsync(1)(_ => logTopology)
.map(_ => KeyValue("", ""))
// Throw for keys indicating unsupported operations.
protected def unsupportedOperation = Flow[KeyValue]
.map(record => throw new IllegalArgumentException(s"Unsupported operation ${record.key} for ${record.value}"))
/**
* Wait for the new cluster topology view to propagate to all nodes in the cluster. May not be strictly necessary
* since this microservice immediately attempts to notify all nodes of topology updates.
*
* @param passthrough The value that will be passed through to the next map stage.
* @param executionContext The thread dispatcher context.
* @tparam T
* @return The unmodified input value.
*/
protected def waitForTopologyRefresh[T](passthrough: T)(implicit executionContext: ExecutionContext): Future[T] = Future {
scala.concurrent.blocking(Thread.sleep(Config.Cornucopia.refreshTimeout))
passthrough
}
/**
* Wait for the new cluster topology view to propagate to all nodes in the cluster. Same version as above, but this
* time takes two passthroughs and returns tuple of them as future.
*
* @param passthrough1 The first value that will be passed through to the next map stage.
* @param passthrough2 The second value that will be passed through to the next map stage.
* @param executionContext The thread dispatcher context.
* @tparam T
* @tparam U
* @return The unmodified input value.
*/
protected def waitForTopologyRefresh2[T, U](passthrough1: T, passthrough2: U)(implicit executionContext: ExecutionContext): Future[(T, U)] = Future {
scala.concurrent.blocking(Thread.sleep(Config.Cornucopia.refreshTimeout))
(passthrough1, passthrough2)
}
/**
* Log the current view of the cluster topology.
*
* @param executionContext The thread dispatcher context.
* @return
*/
protected def logTopology(implicit executionContext: ExecutionContext): Future[Unit] = {
implicit val saladAPI = getNewSaladApi
saladAPI.clusterNodes.map { allNodes =>
val masterNodes = allNodes.filter(Role.MASTER == _.getRole)
val slaveNodes = allNodes.filter(Role.SLAVE == _.getRole)
logger.info(s"Master nodes: $masterNodes")
logger.info(s"Slave nodes: $slaveNodes")
}
}
/**
* The entire cluster will meet the new nodes at the given URIs. If the connection to a node fails, then retry
* until it succeeds.
*
* @param redisURIList The list of URI of the new nodes.
* @param executionContext The thread dispatcher context.
* @return The list of URI if the nodes were met. TODO: emit only the nodes that were successfully added.
*/
protected def addNodesToCluster(redisURIList: Seq[RedisURI], retries: Int = 0)(implicit executionContext: ExecutionContext): Future[Seq[RedisURI]] = {
addNodesToClusterPrime(redisURIList).recoverWith {
case e: CornucopiaRedisConnectionException =>
logger.error(s"${e.message}: retrying for number ${retries + 1}", e)
addNodesToCluster(redisURIList, retries + 1)
}
}
protected def addNodesToClusterPrime(redisURIList: Seq[RedisURI])(implicit executionContext: ExecutionContext): Future[Seq[RedisURI]] = {
implicit val saladAPI = getNewSaladApi
def getRedisConnection(nodeId: String): Future[Salad] = {
getConnection(nodeId).recoverWith {
case e: RedisException => throw CornucopiaRedisConnectionException(s"Add nodes to cluster failed to get connection to node", e)
}
}
saladAPI.clusterNodes.flatMap { allNodes =>
val getConnectionsToLiveNodes = allNodes.filter(_.isConnected).map(node => getRedisConnection(node.getNodeId))
Future.sequence(getConnectionsToLiveNodes).flatMap { connections =>
// Meet every new node from every old node.
val metResults = for {
conn <- connections
uri <- redisURIList
} yield {
conn.clusterMeet(uri)
}
Future.sequence(metResults).map(_ => redisURIList)
}
}
}
/**
* Set the n new slave nodes to replicate the poorest (fewest slaves) n masters.
*
* @param redisURIList The list of ip addresses of the slaves that will be added to the cluster. Hostnames are not acceptable.
* @param executionContext The thread dispatcher context.
* @return Indicate that the n new slaves are replicating the poorest n masters.
*/
protected def findMasters(redisURIList: Seq[RedisURI])(implicit executionContext: ExecutionContext): Future[Unit] = {
implicit val saladAPI = getNewSaladApi
saladAPI.clusterNodes.flatMap { allNodes =>
// Node ids for nodes that are currently master nodes but will become slave nodes.
val newSlaveIds = allNodes.filter(node => redisURIList.contains(node.getUri)).map(_.getNodeId)
// The master nodes (the nodes that will become slaves are still master nodes at this point and must be filtered out).
val masterNodes = saladAPI.masterNodes(allNodes)
.filterNot(node => newSlaveIds.contains(node.getNodeId))
// HashMap of master node ids to the number of slaves for that master.
val masterSlaveCount = new util.HashMap[String, AtomicInteger](masterNodes.length + 1, 1)
// Populate the hash map.
masterNodes.map(_.getNodeId).foreach(nodeId => masterSlaveCount.put(nodeId, new AtomicInteger(0)))
allNodes.map { node =>
Option.apply(node.getSlaveOf)
.map(master => masterSlaveCount.get(master).incrementAndGet())
}
// Find the poorest n masters for n slaves.
val poorestMasters = new MaxNHeapMasterSlaveCount(redisURIList.length)
masterSlaveCount.asScala.foreach(poorestMasters.offer)
assert(redisURIList.length >= poorestMasters.underlying.length)
// Create a list so that we can circle back to the first element if the new slaves outnumber the existing masters.
val poorMasterList = poorestMasters.underlying.toList
val poorMasterIndex = new AtomicInteger(0)
// Choose a master for every slave.
val listFuturesResults = redisURIList.map { slaveURI =>
getConnection(slaveURI).map(_.clusterReplicate(
poorMasterList(poorMasterIndex.getAndIncrement() % poorMasterList.length)._1))
}
Future.sequence(listFuturesResults).map(x => x)
}
}
/**
* Emit a key-value representing the node-type and the node-id.
* @param redisURI
* @param executionContext
* @return the node type and id.
*/
def emitNodeType(redisURI:RedisURI)(implicit executionContext: ExecutionContext): Future[KeyValue] = {
implicit val saladAPI = getNewSaladApi
saladAPI.clusterNodes.map { allNodes =>
val removalNodeOpt = allNodes.find(node => node.getUri.equals(redisURI))
if (removalNodeOpt.isEmpty) throw new Exception(s"Node not in cluster: $redisURI")
val kv = removalNodeOpt.map { node =>
node.getRole match {
case Role.MASTER => KeyValue(RESHARD.key, node.getNodeId)
case Role.SLAVE => KeyValue(REMOVE_SLAVE.key, node.getNodeId)
case _ => KeyValue(UNSUPPORTED.key, node.getNodeId)
}
}
kv.get
}
}
/**
* Notify all nodes in the cluster to forget this node.
*
* @param withoutNodes The list of ids of nodes to be forgotten by the cluster.
* @param executionContext The thread dispatcher context.
* @return A future indicating that the node was forgotten by all nodes in the cluster.
*/
def forgetNodes(withoutNodes: Seq[String])(implicit executionContext: ExecutionContext): Future[Unit] =
if (!withoutNodes.exists(_.nonEmpty))
Future(Unit)
else {
implicit val saladAPI = getNewSaladApi
saladAPI.clusterNodes.flatMap { allNodes =>
logger.info(s"Forgetting nodes: $withoutNodes")
// Reset the nodes to be removed.
val validWithoutNodes = withoutNodes.filter(_.nonEmpty)
validWithoutNodes.map(getConnection).map(_.map(_.clusterReset(true)))
// The nodes that will remain in the cluster should forget the nodes that will be removed.
val withNodes = allNodes
.filterNot(node => validWithoutNodes.contains(node.getNodeId)) // Node cannot forget itself.
// For the cross product of `withNodes` and `withoutNodes`; to remove the nodes in `withoutNodes`.
val forgetResults = for {
operatorNode <- withNodes
operandNodeId <- validWithoutNodes
} yield {
if (operatorNode.getSlaveOf == operandNodeId)
Future(Unit) // Node cannot forget its master.
else
getConnection(operatorNode.getNodeId).flatMap(_.clusterForget(operandNodeId))
}
Future.sequence(forgetResults).map(x => x)
}
}
/**
* Migrate all keys in a slot from the source node to the destination node and update the slot assignment on the
* affected nodes.
*
* @param slot The slot to migrate.
* @param sourceNodeId The current location of the slot data.
* @param destinationNodeId The target location of the slot data.
* @param masters The list of nodes in the cluster that will be assigned hash slots.
* @param clusterConnections The list of connections to nodes in the cluster.
* @param executionContext The thread dispatcher context.
* @return Future indicating success.
*/
protected def migrateSlot(slot: Int, sourceNodeId: String, destinationNodeId: String, destinationURI: RedisURI,
masters: List[RedisClusterNode],
clusterConnections: util.HashMap[String,Future[SaladClusterAPI[CodecType,CodecType]]])
(implicit saladAPI: Salad, executionContext: ExecutionContext): Future[Unit] = {
logger.debug(s"Migrate slot for slot $slot from source node $sourceNodeId to target node $destinationNodeId")
// Follows redis-trib.rb
def migrateSlotKeys(sourceConn: SaladClusterAPI[CodecType, CodecType],
destinationConn: SaladClusterAPI[CodecType, CodecType], attempts: Int = 1): Future[Unit] = {
import com.adendamedia.salad.serde.ByteArraySerdes._
// get all the keys in the given slot
val keyList = for {
keyCount <- sourceConn.clusterCountKeysInSlot(slot)
keyList <- sourceConn.clusterGetKeysInSlot[CodecType](slot, keyCount.toInt)
} yield keyList
// migrate over all the keys in the slot from source to destination node
val migrate = for {
keys <- keyList
result <- sourceConn.migrate[CodecType](destinationURI, keys.toList)
} yield result
def handleFailedMigration(error: Throwable): Future[Unit] = {
val errorString = error.toString
def findError(e: String, identifier: String): Boolean = {
identifier.r.findFirstIn(e) match {
case Some(_) => true
case _ => false
}
}
if (findError(errorString, "BUSYKEY")) {
logger.warn(s"Problem migrating slot $slot from $sourceNodeId to $destinationNodeId at ${destinationURI.getHost} (BUSYKEY): Target key exists. Replacing it for FIX.")
def migrateReplace: Future[Unit] = for {
keys <- keyList
result <- sourceConn.migrate[CodecType](destinationURI, keys.toList, replace = true)
} yield result
migrateReplace
} else if (findError(errorString, "CLUSTERDOWN")) {
logger.error(s"Failed to migrate slot $slot from $sourceNodeId to $destinationNodeId at ${destinationURI.getHost} (CLUSTERDOWN): Retrying for attempt $attempts")
for {
src <- clusterConnections.get(sourceNodeId)
dst <- clusterConnections.get(destinationNodeId)
msk <- migrateSlotKeys(src, dst, attempts + 1)
} yield msk
} else if (findError(errorString, "MOVED")) {
logger.error(s"Failed to migrate slot $slot from $sourceNodeId to $destinationNodeId at ${destinationURI.getHost} (MOVED): Ignoring on attempt $attempts")
Future(Unit)
} else {
logger.error(s"Failed to migrate slot $slot from $sourceNodeId to $destinationNodeId at ${destinationURI.getHost}", error)
Future(Unit)
}
}
migrate map { _ =>
logger.info(s"Successfully migrated slot $slot from $sourceNodeId to $destinationNodeId at ${destinationURI.getHost} on attempt $attempts")
} recoverWith { case e => handleFailedMigration(e) }
}
def setSlotAssignment(sourceConn: SaladClusterAPI[CodecType, CodecType],
destinationConn: SaladClusterAPI[CodecType, CodecType],
attempts: Int = 1): Future[Unit] = {
val ssa = for {
_ <- destinationConn.clusterSetSlotImporting(slot, sourceNodeId)
_ <- sourceConn.clusterSetSlotMigrating(slot, destinationNodeId)
} yield { }
ssa map { _ =>
logger.info(s"Successfully set slot assignment for slot $slot on attempt $attempts")
} recover { case e =>
logger.error(s"There was a problem setting slot assignment for slot $slot, retrying for attempt $attempts: ${e.toString}")
setSlotAssignment(sourceConn, destinationConn, attempts + 1)
}
}
destinationNodeId match {
case `sourceNodeId` =>
// Don't migrate if the source and destination are the same.
logger.warn(s"Ignoring attempt to migrate slot $slot because source and destination node are the same")
Future(Unit)
case _ =>
for {
src <- clusterConnections.get(sourceNodeId)
dst <- clusterConnections.get(destinationNodeId)
_ <- setSlotAssignment(src, dst)
_ <- migrateSlotKeys(src, dst)
_ <- notifySlotAssignment(slot, destinationNodeId, masters)
} yield {
logger.info(s"Migrate slot successful for slot $slot from source node $sourceNodeId to target node $destinationNodeId, notifying masters of new slot assignment")
}
}
}
/**
* Notify all master nodes of a slot assignment so that they will immediately be able to redirect clients.
*
* @param masters The list of nodes in the cluster that will be assigned hash slots.
* @param assignedNodeId The node that should be assigned the slot
* @param executionContext The thread dispatcher context.
* @return Future indicating success.
*/
protected def notifySlotAssignment(slot: Int, assignedNodeId: String, masters: List[RedisClusterNode])
(implicit saladAPI: Salad, executionContext: ExecutionContext)
: Future[Unit] = {
val getMasterConnections = masters.map(master => getConnection(master.getNodeId))
Future.sequence(getMasterConnections).flatMap { masterConnections =>
val notifyResults = masterConnections.map(_.clusterSetSlotNode(slot, assignedNodeId))
Future.sequence(notifyResults).map(x => x)
}
}
/**
* Store the n poorest masters.
* Implemented on scala.mutable.PriorityQueue.
*
* @param n
*/
sealed case class MaxNHeapMasterSlaveCount(n: Int) {
private type MSTuple = (String, AtomicInteger)
private object MSOrdering extends Ordering[MSTuple] {
def compare(a: MSTuple, b: MSTuple) = a._2.intValue compare b._2.intValue
}
implicit private val ordering = MSOrdering
val underlying = new mutable.PriorityQueue[MSTuple]
/**
* O(1) if the entry is not a candidate for the being one of the poorest n masters.
* O(log(n)) if the entry is a candidate.
*
* @param entry The candidate master-slavecount tuple.
*/
def offer(entry: MSTuple) =
if (n > underlying.length) {
underlying.enqueue(entry)
}
else if (entry._2.intValue < underlying.head._2.intValue) {
underlying.dequeue()
underlying.enqueue(entry)
}
}
}
class CornucopiaActorSource extends CornucopiaGraph {
import Config.materializer
import com.adendamedia.cornucopia.actors.CornucopiaSource.Task
import scala.concurrent.ExecutionContext.Implicits.global
protected type ActorRecord = Task
val actorSystem = SharedActorSystem.sharedActorSystem
protected val redisCommandRouter = actorSystem.actorOf(RedisCommandRouter.props, "redisCommandRouter")
// Add a master node to the cluster.
override protected def streamAddMaster(implicit executionContext: ExecutionContext): Flow[KeyValue, KeyValue, NotUsed] = Flow[KeyValue]
.map(kv => (kv.value, kv.senderRef))
.map(t => (createRedisUri(t._1), t._2) )
.map(t => (getNewSaladApi.canonicalizeURI(t._1), t._2))
.groupedWithin(1, Config.Cornucopia.batchPeriod)
.mapAsync(1)(t => {
val t1 = t.unzip
val redisURIs = t1._1
val actorRefs = t1._2
addNodesToCluster(redisURIs) flatMap { uris =>
waitForTopologyRefresh2[Seq[RedisURI], Seq[Option[ActorRef]]](uris, actorRefs)
}
})
.map{ case (redisURIs, actorRef) =>
val ref = actorRef.head
val uri = redisURIs.head
KeyValue(RESHARD.key, "", ref, Some(uri))
}
// Add a slave node to the cluster, replicating the master that has the fewest slaves.
override protected def streamAddSlave(implicit executionContext: ExecutionContext): Flow[KeyValue, KeyValue, NotUsed] = Flow[KeyValue]
.map(kv => (kv.value, kv.senderRef))
.map(t => (createRedisUri(t._1), t._2) )
.map(t => (getNewSaladApi.canonicalizeURI(t._1), t._2))
.groupedWithin(1, Config.Cornucopia.batchPeriod)
.mapAsync(1)(t => {
val t1 = t.unzip
val redisURIs = t1._1
val actorRefs = t1._2
addNodesToCluster(redisURIs) flatMap { uris =>
waitForTopologyRefresh2[Seq[RedisURI], Seq[Option[ActorRef]]](uris, actorRefs)
}
})
.mapAsync(1)(t => {
val redisURIs = t._1
val actorRefs = t._2
findMasters(redisURIs) map { _ =>
(redisURIs, actorRefs)
}
})
.mapAsync(1)(t => {
val redisURIs = t._1
val actorRefs = t._2
waitForTopologyRefresh2[Seq[RedisURI], Seq[Option[ActorRef]]](redisURIs, actorRefs)
})
.mapAsync(1)(t => signalSlavesAdded(t._1, t._2))
.map(_ => KeyValue("", ""))
private def signalSlavesAdded(uris: Seq[RedisURI], senders: Seq[Option[ActorRef]]): Future[Unit] = {
def signal(uri: RedisURI, ref: Option[ActorRef]): Future[Unit] = {
Future {
ref match {
case Some(sender) => sender ! Right(("slave", uri.getHost))
case None => Unit
}
}
}
val flattened = senders.flatten
if (flattened.isEmpty) Future(Unit)
else {
val zipped = uris zip senders
Future.reduce(
zipped map {
case (uri: RedisURI, sender: Option[ActorRef]) => signal(uri, sender)
}
)((_, _) => Unit)
}
}
protected def streamReshard(implicit executionContext: ExecutionContext): Flow[KeyValue, KeyValue, NotUsed] = Flow[KeyValue]
.map(kv => (kv.senderRef, kv.newMasterURI))
.throttle(1, Config.Cornucopia.minReshardWait, 1, ThrottleMode.Shaping)
.mapAsync(1)(t => {
val senderRef = t._1
val newMasterURI = t._2
reshardClusterPrime(senderRef, newMasterURI)
})
.mapAsync(1)(waitForTopologyRefresh[Unit])
.mapAsync(1)(_ => logTopology)
.map(_ => KeyValue("", ""))
protected def reshardClusterPrime(sender: Option[ActorRef], newMasterURI: Option[RedisURI], retries: Int = 0): Future[Unit] = {
def reshard(ref: ActorRef, uri: RedisURI): Future[Unit] = {
reshardClusterWithNewMaster(uri) map { _: Unit =>
logger.info(s"Successfully resharded cluster ($retries retries), informing Kubernetes controller")
ref ! Right(("master", uri.getHost))
} recover {
case e: ReshardTableException =>
logger.error(s"There was a problem computing the reshard table, retrying for retry number ${retries + 1}:", e)
reshardClusterPrime(sender, newMasterURI, retries + 1)
case ex: Throwable =>
logger.error("Failed to reshard cluster, informing Kubernetes controller", ex)
ref ! Left(s"${ex.toString}")
}
}
val result = for {
ref <- sender
uri <- newMasterURI
} yield reshard(ref, uri)
result match {
case Some(f) => f
case None =>
// this should never happen though
logger.error("There was a problem resharding the cluster: sender actor or new redis master URI missing")
Future(Unit)
}
}
private def printReshardTable(reshardTable: Map[String, List[Int]]) = {
logger.info(s"Reshard Table:")
reshardTable foreach { case (nodeId, slots) =>
logger.info(s"Migrating slots from node '$nodeId': ${slots.mkString(", ")}")
}
}
protected def reshardClusterWithNewMaster(newMasterURI: RedisURI)
: Future[Unit] = {
// Execute futures using a thread pool so we don't run out of memory due to futures.
implicit val executionContext = Config.actorSystem.dispatchers.lookup("akka.actor.resharding-dispatcher")
implicit val saladAPI = getNewSaladApi
saladAPI.masterNodes.flatMap { mn =>
val masterNodes = mn.toList
logger.debug(s"Reshard table with new master nodes: ${masterNodes.map(_.getNodeId)}")
val liveMasters = masterNodes.filter(_.isConnected)
logger.debug(s"Reshard cluster with new master live masters: ${liveMasters.map(_.getNodeId)}")
lazy val idToURI = new util.HashMap[String,RedisURI](liveMasters.length + 1, 1)
// Re-use cluster connections so we don't exceed file-handle limit or waste resources.
lazy val clusterConnections = new util.HashMap[String,Future[SaladClusterAPI[CodecType,CodecType]]](liveMasters.length + 1, 1)
val targetNode = masterNodes.filter(_.getUri == newMasterURI).head
logger.debug(s"Reshard cluster with new master target node: ${targetNode.getNodeId}")
liveMasters.map { master =>
idToURI.put(master.getNodeId, master.getUri)
val connection = getConnection(master.getNodeId)
clusterConnections.put(master.getNodeId, connection)
}
logger.debug(s"Reshard cluster with new master cluster connections for nodes: ${clusterConnections.keySet().toString}")
val sourceNodes = masterNodes.filterNot(_ == targetNode)
logger.debug(s"Reshard cluster with new master source nodes: ${sourceNodes.map(_.getNodeId)}")
val reshardTable = computeReshardTable(sourceNodes)
printReshardTable(reshardTable)
def doReshard: Future[Unit] = {
// Since migrating many slots causes many requests to redis cluster nodes, we should have a way to throttle
// the number of parallel futures executing at any given time so that we don't flood redis nodes with too many
// simultaneous requests.
import akka.pattern.ask
import akka.util.Timeout
import scala.concurrent.duration._
import RedisCommandRouter._
implicit val timeout = Timeout(Config.reshardTimeout seconds)
val migrateSlotFn = migrateSlot(_: Int, _: String, _: String, newMasterURI, liveMasters, clusterConnections)
val future = redisCommandRouter ? ReshardCluster(targetNode.getNodeId, reshardTable, migrateSlotFn)
future.mapTo[String].map { msg: String =>
logger.info(s"Reshard cluster was a success: $msg")
Unit
}
}
def waitForNewNodeToBeOk(conn: SaladClusterAPI[Connection.CodecType, Connection.CodecType]): Future[Unit] = {
def isOk(info: Map[String,String]): Boolean = info("cluster_state") == "ok"
conn.clusterInfo flatMap { info: Map[String,String] =>
if (isOk(info)) {
logger.info(s"New node is ready for resharding")
doReshard
}
else {
logger.warn(s"New node is not yet ready for resharding, keep waiting")
Thread.sleep(100)
waitForNewNodeToBeOk(conn)
}
}
}
clusterConnections.get(targetNode.getNodeId) flatMap waitForNewNodeToBeOk
}
}
protected def extractKeyValue = Flow[ActorRecord]
.map[KeyValue](record => KeyValue(record.operation, record.redisNodeIp, record.ref))
protected val processTask = Flow.fromGraph(GraphDSL.create() { implicit builder =>
import GraphDSL.Implicits._
val taskSource = builder.add(Flow[Task])
val mergeFeedback = builder.add(MergePreferred[KeyValue](2))
val partition = builder.add(Partition[KeyValue](
5, kv => partitionEvents(kv.key)))
val kv = builder.add(extractKeyValue)
val partitionRm = builder.add(Partition[KeyValue](
3, kv => partitionNodeRemoval(kv.key)
))
val fanIn = builder.add(Merge[KeyValue](5))
taskSource.out ~> kv
kv ~> mergeFeedback.preferred
mergeFeedback.out ~> partition
partition.out(ADD_MASTER.ordinal) ~> streamAddMaster ~> mergeFeedback.in(0)
partition.out(ADD_SLAVE.ordinal) ~> streamAddSlave ~> fanIn
partition.out(REMOVE_NODE.ordinal) ~> streamRemoveNode ~> partitionRm
partitionRm.out(REMOVE_MASTER.ordinal) ~> mergeFeedback.in(1)
partitionRm.out(REMOVE_SLAVE.ordinal) ~> streamRemoveSlave ~> fanIn
partitionRm.out(UNSUPPORTED.ordinal) ~> unsupportedOperation ~> fanIn
partition.out(RESHARD.ordinal) ~> streamReshard ~> fanIn
partition.out(UNSUPPORTED.ordinal) ~> unsupportedOperation ~> fanIn
FlowShape(taskSource.in, fanIn.out)
})
protected val cornucopiaSource = Config.cornucopiaActorSource
def ref: ActorRef = processTask
.to(Sink.ignore)
.runWith(cornucopiaSource)
}
| sjking/cornucopia | src/main/scala/com/adendamedia/cornucopia/graph/Graph.scala | Scala | lgpl-3.0 | 29,245 |
/*
* Copyright 2010 LinkedIn
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.server
import java.nio.channels._
import org.apache.log4j.Logger
import kafka.producer._
import kafka.consumer._
import kafka.log._
import kafka.network._
import kafka.message._
import kafka.server._
import kafka.api._
import kafka.common.{WrongPartitionException, ErrorMapping}
import kafka.utils.SystemTime
/**
* Logic to handle the various Kafka requests
*/
class KafkaRequestHandlers(val logManager: LogManager) {
private val logger = Logger.getLogger(classOf[KafkaRequestHandlers])
def handlerFor(requestTypeId: Short, request: Receive): Handler.Handler = {
requestTypeId match {
case RequestKeys.Produce => handleProducerRequest _
case RequestKeys.Fetch => handleFetchRequest _
case RequestKeys.MultiFetch => handleMultiFetchRequest _
case RequestKeys.MultiProduce => handleMultiProducerRequest _
case RequestKeys.Offsets => handleOffsetRequest _
case _ => throw new IllegalStateException("No mapping found for handler id " + requestTypeId)
}
}
def handleProducerRequest(receive: Receive): Option[Send] = {
val sTime = SystemTime.milliseconds
if(logger.isTraceEnabled)
logger.trace("Handling producer request")
val request = ProducerRequest.readFrom(receive.buffer)
val partition = request.getTranslatedPartition(logManager.chooseRandomPartition)
try {
logManager.getOrCreateLog(request.topic, partition).append(request.messages)
if(logger.isTraceEnabled)
logger.trace(request.messages.sizeInBytes + " bytes written to logs.")
}
catch {
case e: WrongPartitionException => // let it go for now
}
if (logger.isDebugEnabled)
logger.debug("kafka produce time " + (SystemTime.milliseconds - sTime) + " ms")
None
}
def handleMultiProducerRequest(receive: Receive): Option[Send] = {
if(logger.isTraceEnabled)
logger.trace("Handling multiproducer request")
val request = MultiProducerRequest.readFrom(receive.buffer)
try {
for (produce <- request.produces) {
val partition = produce.getTranslatedPartition(logManager.chooseRandomPartition)
logManager.getOrCreateLog(produce.topic, partition).append(produce.messages)
if(logger.isTraceEnabled)
logger.trace(produce.messages.sizeInBytes + " bytes written to logs.")
}
}
catch {
case e: WrongPartitionException => // let it go for now
}
None
}
def handleFetchRequest(request: Receive): Option[Send] = {
if(logger.isTraceEnabled)
logger.trace("Handling fetch request")
val fetchRequest = FetchRequest.readFrom(request.buffer)
Some(readMessageSet(fetchRequest))
}
def handleMultiFetchRequest(request: Receive): Option[Send] = {
if(logger.isTraceEnabled)
logger.trace("Handling multifetch request")
val multiFetchRequest = MultiFetchRequest.readFrom(request.buffer)
var responses = multiFetchRequest.fetches.map(fetch =>
readMessageSet(fetch)).toList
Some(new MultiMessageSetSend(responses))
}
private def readMessageSet(fetchRequest: FetchRequest): MessageSetSend = {
var response: MessageSetSend = null
try {
val log = logManager.getOrCreateLog(fetchRequest.topic, fetchRequest.partition)
response = new MessageSetSend(log.read(fetchRequest.offset, fetchRequest.maxSize))
}
catch {
case e: RuntimeException =>
response=new MessageSetSend(MessageSet.Empty, ErrorMapping.codeFor(e.getClass.asInstanceOf[Class[Exception]]))
case e2 => throw e2
}
response
}
def handleOffsetRequest(request: Receive): Option[Send] = {
if(logger.isTraceEnabled)
logger.trace("Handling offset request")
val offsetRequest = OffsetRequest.readFrom(request.buffer)
val log = logManager.getOrCreateLog(offsetRequest.topic, offsetRequest.partition)
val offsets = log.getOffsetsBefore(offsetRequest)
val response = new OffsetArraySend(offsets)
Some(response)
}
}
| jinfei21/kafka | src/kafka/server/KafkaRequestHandlers.scala | Scala | apache-2.0 | 4,586 |
/*
* -╥⌐⌐⌐⌐ -⌐⌐⌐⌐-
* ≡╢░░░░⌐\\░░░φ ╓╝░░░░⌐░░░░╪╕
* ╣╬░░` `░░░╢┘ φ▒╣╬╝╜ ░░╢╣Q
* ║╣╬░⌐ ` ╤▒▒▒Å` ║╢╬╣
* ╚╣╬░⌐ ╔▒▒▒▒`«╕ ╢╢╣▒
* ╫╬░░╖ .░ ╙╨╨ ╣╣╬░φ ╓φ░╢╢Å
* ╙╢░░░░⌐"░░░╜ ╙Å░░░░⌐░░░░╝`
* ``˚¬ ⌐ ˚˚⌐´
*
* Copyright © 2016 Flipkart.com
*/
package com.flipkart.connekt.busybees.tests.streams.topologies
import akka.stream.scaladsl.{Sink, Source}
import com.flipkart.connekt.busybees.streams.flows.RenderFlow
import com.flipkart.connekt.busybees.streams.flows.dispatchers.{SMTPDispatcher, SMTPDispatcherPrepare}
import com.flipkart.connekt.busybees.streams.flows.formaters.EmailChannelFormatter
import com.flipkart.connekt.busybees.tests.streams.TopologyUTSpec
import com.flipkart.connekt.commons.iomodels.ConnektRequest
import com.flipkart.connekt.commons.services.KeyChainManager
import com.flipkart.connekt.commons.utils.StringUtils._
import scala.concurrent.Await
import scala.concurrent.duration._
class SMTPEmailTopologyTest extends TopologyUTSpec {
"SMTPEmailTopology Test" should "run" in {
val credentials = KeyChainManager.getSimpleCredential("Flipkart-SMTP").get
println("Credentials = " + credentials)
val cRequest = s"""
|{ "id" : "123456789",
| "channel": "EMAIL",
| "sla": "H",
| "channelData": {
| "type": "EMAIL",
| "subject": "Hello Kinshuk. GoodLuck!",
| "text": "Text",
| "html" : "<b>html</b>"
|
| },
| "channelInfo" : {
| "type" : "EMAIL",
| "appName" : "FKProd",
| "to" : [{ "name": "Kinshuk", "address": "kinshuk1989@gmail.com" }]
| },
| "clientId" : "123456",
| "meta": {}
|}
""".stripMargin.getObj[ConnektRequest]
val result = Source.single(cRequest)
.via(new RenderFlow().flow)
.via(new EmailChannelFormatter(64)(system.dispatchers.lookup("akka.actor.io-dispatcher")).flow)
.via(new SMTPDispatcherPrepare().flow)
.via(new SMTPDispatcher("10.33.102.104",credentials,10).flow)
.runWith(Sink.foreach(println))
val response = Await.result(result, 80.seconds)
println(response)
assert(response != null)
}
}
| Flipkart/connekt | busybees/src/test/scala/com/flipkart/connekt/busybees/tests/streams/topologies/SMTPEmailTopologyTest.scala | Scala | mit | 2,806 |
package com.twitter.finagle.stats
import com.twitter.finagle.util.LoadService
/**
* A global StatsReceiver for generic finagle metrics.
*/
private[finagle] object FinagleStatsReceiver extends StatsReceiverProxy {
val self: StatsReceiver = LoadedStatsReceiver.scope("finagle")
override def repr: FinagleStatsReceiver.type = this
def get: StatsReceiver = this
}
/**
* A [[com.twitter.finagle.stats.HostStatsReceiver]] that loads
* all service-loadable receivers and broadcasts stats to them.
*/
object LoadedHostStatsReceiver extends {
@volatile var self: StatsReceiver = BroadcastStatsReceiver(LoadService[HostStatsReceiver]())
} with HostStatsReceiver
/**
* A client-specific StatsReceiver. All stats recorded using this receiver
* are prefixed with the string "clnt" by default.
*/
object ClientStatsReceiver extends StatsReceiverProxy {
@volatile protected var self: StatsReceiver =
RoleConfiguredStatsReceiver(LoadedStatsReceiver.scope("clnt"), Client)
def setRootScope(rootScope: String): Unit = {
self = RoleConfiguredStatsReceiver(LoadedStatsReceiver.scope(rootScope), Client)
}
override def repr: ClientStatsReceiver.type = this
def get: StatsReceiver = this
}
/**
* A server-specific StatsReceiver. All stats recorded using this receiver
* are prefixed with the string "srv" by default.
*/
object ServerStatsReceiver extends StatsReceiverProxy {
@volatile protected var self: StatsReceiver =
RoleConfiguredStatsReceiver(LoadedStatsReceiver.scope("srv"), Server)
def setRootScope(rootScope: String): Unit = {
self = RoleConfiguredStatsReceiver(LoadedStatsReceiver.scope(rootScope), Server)
}
override def repr: ServerStatsReceiver.type = this
def get: StatsReceiver = this
}
| twitter/finagle | finagle-core/src/main/scala/com/twitter/finagle/stats/FinagleStatsReceiver.scala | Scala | apache-2.0 | 1,749 |
package tests.rescala.testtools
import org.scalatest.Assertions
import org.scalatest.matchers.should.Matchers
import rescala.interface.RescalaInterface
class ReevaluationBundle[T <: RescalaInterface](val api: T) {
import api._
class ReevaluationTracker[A] private () extends Matchers {
var results: List[A] = Nil
/* should be private but is unused */
var strongRef: AnyRef = _ // to prevent fake observers from being prematurely gc'd
def this(signal: Signal[A])(implicit turnSource: CreationTicket) = {
this()
strongRef = signal.map(reev)(turnSource)
}
def this(event: Event[A])(implicit turnSource: CreationTicket) = {
this()
strongRef = event.map(reev)(turnSource)
}
def reev(v1: A): A = {
results ::= v1
v1
}
def assert(elements: A*)(implicit pos: org.scalactic.source.Position): Unit = {
Assertions.assert(results === elements.toList)
}
def assertClear(elements: A*)(implicit pos: org.scalactic.source.Position): Unit = {
assert(elements: _*)
results = Nil
}
}
}
| guidosalva/REScala | Code/Main/shared/src/test/scala-2/tests/rescala/testtools/ReevaluationTracker.scala | Scala | apache-2.0 | 1,083 |
package almond.display
import java.awt.image.RenderedImage
import java.io.{ByteArrayInputStream, ByteArrayOutputStream, IOException}
import java.net.{HttpURLConnection, URL, URLConnection}
import java.util.Base64
import javax.imageio.ImageIO
import scala.util.Try
final class Image private (
val width: Option[String],
val height: Option[String],
val format: Option[Image.Format],
byteArrayOrUrl: Either[URL, Array[Byte]],
val embed: Boolean,
val displayId: String
) extends UpdatableDisplay {
def byteArrayOpt: Option[Array[Byte]] =
byteArrayOrUrl.toOption
def urlOpt: Option[URL] =
byteArrayOrUrl.left.toOption
private def copy(
width: Option[String] = width,
height: Option[String] = height,
format: Option[Image.Format] = format,
byteArrayOrUrl: Either[URL, Array[Byte]] = byteArrayOrUrl,
embed: Boolean = embed
): Image =
new Image(
width,
height,
format,
byteArrayOrUrl,
embed,
displayId
)
def withByteArray(data: Array[Byte]): Image =
copy(byteArrayOrUrl = Right(data))
def withUrl(url: URL): Image =
copy(byteArrayOrUrl = Left(url))
def withUrl(url: String): Image =
copy(byteArrayOrUrl = Left(new URL(url)))
def withHeight(height: Int): Image =
copy(height = Some(height.toString))
def withHeight(heightOpt: Option[String]): Image =
copy(height = heightOpt)
def withWidth(width: Int): Image =
copy(width = Some(width.toString))
def withWidth(widthOpt: Option[String]): Image =
copy(width = widthOpt)
def withFormat(format: Image.Format): Image =
copy(format = Some(format))
def withFormat(formatOpt: Option[Image.Format]): Image =
copy(format = formatOpt)
def withEmbed(): Image =
copy(embed = true)
def withEmbed(embed: Boolean): Image =
copy(embed = embed)
override def metadata(): Map[String, String] =
Map() ++
width.map("width" -> _) ++
height.map("height" -> _)
def data(): Map[String, String] =
byteArrayOrUrl match {
case Left(url) =>
if (embed) {
val (contentTypeOpt, b) = Image.urlContent(url)
val contentType = format
.map(_.contentType)
.orElse(contentTypeOpt)
.orElse(Option(URLConnection.guessContentTypeFromStream(new ByteArrayInputStream(b))))
.getOrElse {
throw new Exception(s"Cannot detect format or unrecognizable format for image at $url")
}
if (!Image.imageTypes.contains(contentType))
throw new IOException("Unknown or unsupported content type: " + contentType)
val b0 = Base64.getEncoder.encodeToString(b)
Map(contentType -> b0)
} else {
val attrs = metadata().map { case (k, v) => s"$k=$v" }.mkString(" ")
Map(Html.mimeType -> s"<img src='${url.toExternalForm}' $attrs />")
}
case Right(b) =>
val contentType = format
.map(_.contentType)
.orElse(Option(URLConnection.guessContentTypeFromStream(new ByteArrayInputStream(b))))
.getOrElse {
throw new Exception("Cannot detect image format or unrecognizable image format")
}
if (!Image.imageTypes.contains(contentType))
throw new IOException("Unknown or unsupported content type: " + contentType)
val b0 = Base64.getEncoder.encodeToString(b)
Map(contentType -> b0)
}
}
object Image extends Display.Builder[Array[Byte], Image] {
def fromRenderedImage(image: RenderedImage): Image =
fromRenderedImage(image, format = JPG)
def fromRenderedImage(image: RenderedImage, format: Format): Image = {
val output: ByteArrayOutputStream = new ByteArrayOutputStream()
ImageIO.write(image, format.toString, output)
new Image(
width = Some(image.getWidth.toString),
height = Some(image.getHeight.toString),
format = Some(format),
byteArrayOrUrl = Right(output.toByteArray),
embed = true,
displayId = UpdatableDisplay.generateId()
)
}
protected def build(contentOrUrl: Either[URL, Array[Byte]]): Image =
new Image(
width = None,
height = None,
format = None,
byteArrayOrUrl = contentOrUrl,
embed = contentOrUrl.left.exists(_.getProtocol == "file"),
displayId = UpdatableDisplay.generateId()
)
sealed abstract class Format(val contentType: String) extends Product with Serializable
case object JPG extends Format("image/jpeg")
case object PNG extends Format("image/png")
case object GIF extends Format("image/gif")
private val imageTypes = Set(JPG, PNG, GIF).map(_.contentType)
private def urlContent(url: URL): (Option[String], Array[Byte]) = {
var conn: URLConnection = null
val (rawContent, contentTypeOpt) = try {
conn = url.openConnection()
conn.setConnectTimeout(5000) // allow users to tweak that?
val b = TextDisplay.readFully(conn.getInputStream)
val contentTypeOpt0 = conn match {
case conn0: HttpURLConnection =>
Option(conn0.getContentType)
case _ =>
None
}
(b, contentTypeOpt0)
} finally {
if (conn != null) {
Try(conn.getInputStream.close())
conn match {
case conn0: HttpURLConnection =>
Try(conn0.getErrorStream.close())
Try(conn0.disconnect())
case _ =>
}
}
}
(contentTypeOpt, rawContent)
}
}
| alexarchambault/jupyter-scala | modules/scala/jupyter-api/src/main/scala/almond/display/Image.scala | Scala | apache-2.0 | 5,465 |
/*
* Copyright 2012-2014 Kieron Wilkinson.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package viper.source.log.regular
import java.io.{BufferedReader, Reader}
import collection.mutable
import viper.domain._
import viper.source.log.jul.{JULLogParser, JULConsumer}
import java.text.SimpleDateFormat
class JULSimpleConsumer(reader: => Reader) extends JULConsumer {
/*
Apr 02, 2013 9:58:34 AM viper.util.LogFileGenerator$ main
WARNING: normal message
Apr 02, 2013 9:58:34 AM viper.util.LogFileGenerator$ main
SEVERE: my message
java.lang.IllegalStateException: my exception
at viper.util.LogFileGenerator$.main(LogFileGenerator.scala:17)
at viper.util.LogFileGenerator.main(LogFileGenerator.scala)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:601)
at com.intellij.rt.execution.application.AppMain.main(AppMain.java:120)
Caused by: java.lang.Exception: my cause
... 7 more
*/
/** E.g. Apr 02, 2013 9:58:34 AM */
private val DateTimePatternJava7 = """([A-Z][a-z]{2} \\d\\d, \\d{4} \\d{1,2}:\\d\\d:\\d\\d [AP]M) .+""".r
/** E.g. 04-Apr-2013 09:31:43 */
private val DateTimePatternJava6 = """(\\d\\d-[A-Z][a-z]{2}-\\d{4} \\d\\d:\\d\\d:\\d\\d) .+""".r
/** E.g. INFO: message */
private val LevelMessagePattern = """(FINEST|FINER|FINE|CONFIG|INFO|WARNING|SEVERE): (.+)""".r
/** E.g. Apr 02, 2013 9:58:34 AM */
private[log] val dateFormatJava7 = new SimpleDateFormat("MMM dd, yyyy hh:mm:ss aa")
/** E.g. 04-Apr-2013 09:31:43 */
private[log] val dateFormatJava6 = new SimpleDateFormat("dd-MMM-yyyy hh:mm:ss")
private lazy val buffer = new BufferedReader(reader)
/** Temporary storage for node values, needs to be cleared after each record. */
private val map = new mutable.HashMap[String, String]()
/** Implicit sequence value, used to order events with the same time. */
private var sequence = 0
def nextExpected(): Record = next() match {
case Some(x) => x
case None => nextExpected()
}
/**
* Read the next available record.
* @return the next record, or none if no records are available or ready
*/
def next(): Option[Record] = {
val line = buffer.readLine()
// Indicate when we have a completed record at the end of the stream
if (line == null && isPopulated) {
val record = Some(JULLogParser.parse(map))
map.clear() // Indicate record consumed
return record
}
// We've read something, but we don't quite know if it is the complete record yet
if (line != null) {
val read = pull(line)
// Indicate we have a completed record at the start of the next
if (isStartOfRecord(read) && isPopulated) {
val previous = JULLogParser.parse(map)
consume(read)
return Some(previous)
}
// Consume the current line information
else {
consume(read)
// This might be the end, but we can't tell
}
}
None
}
private def isStartOfRecord(read: Read) = read.isInstanceOf[TimeHeader]
private def isPopulated = map.contains("millis") && map.contains("message")
private def pull(line: String): Read = {
line match {
case DateTimePatternJava7(dateStr) => TimeHeader(millisJava7(dateStr))
case DateTimePatternJava6(dateStr) => TimeHeader(millisJava6(dateStr))
case LevelMessagePattern(level, message) => LevelMessage(level, message)
case exceptionLine: String => Message(exceptionLine)
}
}
private def consume(read: Read) {
read match {
case TimeHeader(millis) => {
map.clear()
map.put("millis", millis)
map.put("sequence", nextSequence())
}
case LevelMessage(level, message) => {
map.put("level", level)
map.put("message", message)
}
case Message(message) => {
map.put("message", map.get("message").map(_ + " \\n").getOrElse("") + message)
}
}
}
private def millisJava7(dateStr: String): String = {
dateFormatJava7.parse(dateStr).getTime.toString
}
private def millisJava6(dateStr: String): String = {
dateFormatJava6.parse(dateStr).getTime.toString
}
private def nextSequence(): String = {
sequence += 1
sequence.toString
}
sealed trait Read
/** When we know we just read the first line of a record. */
case class TimeHeader(millis: String) extends Read
/** When we don't know for sure if there is more of the record or not. */
case class LevelMessage(level: String, message: String) extends Read
/** When we don't know for sure if there is more of the record or not. */
case class Message(message: String) extends Read
}
| vyadh/viper | source-log/src/main/scala/viper/source/log/jul/plain/JULSimpleConsumer.scala | Scala | apache-2.0 | 5,361 |
package com.github.gigurra.glasciia
import com.badlogic.gdx.{ApplicationListener, Gdx}
import com.github.gigurra.glasciia.GameEvent._
import com.github.gigurra.glasciia.Glasciia._
import com.github.gigurra.math.Vec2
import scala.language.implicitConversions
/**
* Created by johan on 2016-10-30.
* Helper class to not have to wait for an Init before creating OpenGL/GDX resources/objects
*/
class GameLauncher[R <: Resources, C <: Canvas](gameFactory: GameFactory[R, C]) extends ApplicationListener with Logging {
private var stage: Game = _
private var loadingScreen: Game = _
private var resources: R = _
private var canvas: Canvas = _
private var firstFrame: Boolean = true
override def create(): Unit = {
log.info("Loading..")
canvas = gameFactory.canvas()
loadingScreen = gameFactory.loadingScreen(canvas)
stage = loadingScreen
resources = gameFactory.resources(canvas)
Gdx.input.setListener(consumeEvent)
}
override def render(): Unit = {
if (!firstFrame) {
checkFinishedLoading()
}
canvas.setDrawTime()
consumeEvent(Render(canvas.time, canvas))
firstFrame = false
}
override def resize(width: Int, height: Int): Unit = consumeEvent(Resize(canvas.time, canvas, Vec2(width, height)))
override def dispose(): Unit = consumeEvent(Exit(canvas.time, canvas))
override def pause(): Unit = consumeEvent(Pause(canvas.time, canvas))
override def resume(): Unit = consumeEvent(Resume(canvas.time, canvas))
private def consumeEvent(event: GameEvent): Boolean = {
stage.consume(event)
}
private def checkFinishedLoading(): Unit = {
if (!resources.finished) {
resources.load(gameFactory.loadingScreenFrameTime())
}
if (resources.finished && (stage eq loadingScreen)) {
log.info("Loading done - launching game!")
consumeEvent(Exit(canvas.time, canvas))
stage = gameFactory.launch(resources, canvas)
}
}
}
object GameLauncher {
def apply[R <: Resources, C <: Canvas](javaIfc: GameFactory[R, C]): GameLauncher[R, C] = new GameLauncher[R, C](javaIfc)
}
abstract class GameFactory[R <: Resources, C <: Canvas] {
def canvas(): C
def resources(canvas: Canvas): R
def loadingScreen(canvas: Canvas): Game
def loadingScreenFrameTime(): Long = 100
def launch(resources: R, canvas: Canvas): Game
}
| GiGurra/glasciia | glasciia-core/src/main/scala/com/github/gigurra/glasciia/GameLauncher.scala | Scala | mit | 2,337 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.carbondata.spark.testsuite.createTable
import java.util
import org.apache.spark.sql.CarbonEnv
import org.apache.spark.sql.catalyst.TableIdentifier
import org.apache.spark.sql.catalyst.analysis.TableAlreadyExistsException
import org.apache.spark.sql.test.util.QueryTest
import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach}
import org.apache.carbondata.core.metadata.schema.table.column.ColumnSchema
class TestCreateTableLike extends QueryTest with BeforeAndAfterEach with BeforeAndAfterAll{
override protected def beforeAll(): Unit = {
sql("drop table if exists pt_tbl")
sql("drop table if exists hive_pt")
sql("drop table if exists bkt_tbl")
sql("drop table if exists stream_tbl")
sql("drop table if exists sourceTable")
sql(
"""create table sourceTable
|(a int, b string)
|STORED AS carbondata
|TBLPROPERTIES(
| 'SORT_COLUMNS'='b',
| 'SORT_SCOPE'='GLOBAL_SORT',
| 'LOCAL_DICTIONARY_ENABLE'='false',
| 'carbon.column.compress'='zstd',
| 'CACHE_LEVEL'='blocklet',
| 'COLUMN_META_CACHE'='a',
| 'TABLE_BLOCKSIZE'='256',
| 'TABLE_BLOCKLET_SIZE'='16')
| """.stripMargin)
sql("insert into sourceTable values(5,'bb'),(6,'cc')")
}
override protected def afterAll(): Unit = {
sql("drop table if exists pt_tbl")
sql("drop table if exists hive_pt")
sql("drop table if exists bkt_tbl")
sql("drop table if exists stream_tbl")
sql("drop table if exists sourceTable")
}
override protected def beforeEach(): Unit = {
sql("drop table if exists targetTable")
}
override protected def afterEach(): Unit = {
sql("drop table if exists targetTable")
}
def checkColumns(left: util.List[ColumnSchema], right: util.List[ColumnSchema]): Boolean = {
if (left.size != right.size) {
false
} else {
for (i <- 0 until left.size()) {
if (!left.get(i).equals(right.get(i))) {
return false
}
}
true
}
}
def checkTableProperties(src: TableIdentifier, dst: TableIdentifier): Unit = {
val info_src = CarbonEnv.getCarbonTable(src)(sqlContext.sparkSession).getTableInfo
val info_dst = CarbonEnv.getCarbonTable(dst)(sqlContext.sparkSession).getTableInfo
info_src.getFactTable.getTableProperties.remove(info_src.getFactTable.getTableId)
val fact_src = info_src.getFactTable
val fact_dst = info_dst.getFactTable
// check column schemas same
assert(checkColumns(fact_src.getListOfColumns, fact_dst.getListOfColumns))
// check table properties same
assert(fact_src.getTableProperties.equals(fact_dst.getTableProperties))
// check transaction same
assert(!(info_src.isTransactionalTable ^ info_dst.isTransactionalTable))
// check bucket info same
if (null == fact_src.getBucketingInfo) {
assert(null == fact_dst.getBucketingInfo)
} else {
assert(null != fact_dst.getBucketingInfo)
assert(fact_src.getBucketingInfo.getNumOfRanges == fact_dst.getBucketingInfo.getNumOfRanges)
assert(checkColumns(fact_src.getBucketingInfo.getListOfColumns,
fact_dst.getBucketingInfo.getListOfColumns))
}
// check partition info same
if (null == fact_src.getPartitionInfo) {
assert(null == fact_dst.getPartitionInfo)
} else {
assert(null != fact_dst.getPartitionInfo)
assert(
fact_src.getPartitionInfo.getPartitionType == fact_dst.getPartitionInfo.getPartitionType)
assert(checkColumns(fact_src.getPartitionInfo.getColumnSchemaList,
fact_dst.getPartitionInfo.getColumnSchemaList))
}
// check different id
assert(!info_src.getTableUniqueName.equals(info_dst.getTableUniqueName))
assert(!info_src.getOrCreateAbsoluteTableIdentifier()
.getTablePath
.equals(info_dst.getOrCreateAbsoluteTableIdentifier.getTablePath))
assert(!info_src.getFactTable.getTableId.equals(info_dst.getFactTable.getTableId))
assert(!info_src.getFactTable.getTableName.equals(info_dst.getFactTable.getTableName))
}
test("create table like simple table") {
sql("create table targetTable like sourceTable")
checkTableProperties(TableIdentifier("sourceTable"), TableIdentifier("targetTable"))
}
test("test same table name") {
val exception = intercept[TableAlreadyExistsException] {
sql("create table sourceTable like sourceTable")
}
assert(exception.getMessage.contains("already exists in database"))
}
// ignore this test case since Spark 2.1 does not support specify location
// and also current implementation in carbon does not use this parameter.
ignore("command with location") {
sql(s"create table targetTable like sourceTable location '$warehouse/tbl_with_loc' ")
checkTableProperties(TableIdentifier("sourceTable"), TableIdentifier("targetTable"))
}
test("table with index") {
// indexSchema relation does not store in parent table
sql(
s"""
| CREATE INDEX dm1
| ON TABLE sourceTable (B)
| AS 'bloomfilter'
| Properties('BLOOM_SIZE'='32000')
""".stripMargin)
sql("create table targetTable like sourceTable")
checkTableProperties(TableIdentifier("sourceTable"), TableIdentifier("targetTable"))
}
test("table with hive partition") {
sql(
"""
| CREATE TABLE hive_pt (
| a int, b string)
| PARTITIONED BY (id int)
| STORED AS carbondata
""".stripMargin)
sql("create table targetTable like hive_pt")
checkTableProperties(TableIdentifier("hive_pt"), TableIdentifier("targetTable"))
}
test("table with bucket") {
sql("""
| CREATE TABLE IF NOT EXISTS bkt_tbl (
| a int, b string
| ) STORED AS carbondata
| TBLPROPERTIES ('BUCKET_NUMBER'='4', 'BUCKET_COLUMNS'='b')
| """.stripMargin)
sql("create table targetTable like bkt_tbl")
checkTableProperties(TableIdentifier("bkt_tbl"), TableIdentifier("targetTable"))
}
test("table with streaming") {
sql("""
| CREATE TABLE IF NOT EXISTS stream_tbl (
| a int, b string
| ) STORED AS carbondata
| TBLPROPERTIES ('streaming' = 'true')
| """.stripMargin)
sql("create table targetTable like stream_tbl")
checkTableProperties(TableIdentifier("stream_tbl"), TableIdentifier("targetTable"))
}
test("table with schema changed") {
sql("ALTER TABLE sourceTable ADD COLUMNS(charField STRING) " +
"TBLPROPERTIES ('DEFAULT.VALUE.charfield'='def')")
sql("create table targetTable like sourceTable")
checkTableProperties(TableIdentifier("sourceTable"), TableIdentifier("targetTable"))
}
}
| zzcclp/carbondata | integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestCreateTableLike.scala | Scala | apache-2.0 | 7,536 |
package jigg.nlp.ccg
import lexicon._
import tagger.{LF => Feature, _}
import jigg.ml._
import breeze.config.{CommandLineParser, Help}
import scala.collection.mutable.{HashMap, ArrayBuffer}
import java.io.File
object LoadDumpedTaggerModel {
case class Params(
@Help(text="Load model path") model: File = new File(""),
@Help(text="Save model path") output: File = new File(""),
@Help(text="Feature extractor") feat: FeatureExtractor = new DefaultExtractor(),
@Help(text="Additional extractors") more: Seq[FeatureExtractor] = List(),
bank: Opts.BankInfo, // these settings should be consistent with
dict: Opts.DictParams // the dumped model on the (previous) version.
)
def main(args: Array[String]) = {
val params = CommandLineParser.readIn[Params](args)
val model = defaultModel(params)
val (featureMap, weights) = readModel(params.model)
val newModel = model.copy(featureMap = featureMap, weights = weights)
SuperTaggerModel.saveTo(params.output.getPath, newModel)
}
// obtain the default model (with no learned params) using SuperTaggerTrainer
def defaultModel(params: Params) = {
val trainParams = new SuperTaggerTrainer.Params(
new File(""), 0.1, 0.2, 0.000000005, "adaGradL1", 20, params.feat, params.more,
params.bank, params.dict
)
val trainer = new JapaneseSuperTaggerTrainer(trainParams)
val trainSentences = trainer.ccgbank.trainSentences
trainer.setCategoryDictionary(trainSentences)
trainer.newModel()
}
def readModel(input: File): (HashMap[Feature, Int], WeightVector[Float]) = {
val in = jigg.util.IOUtil.openIterator(input.getPath)
val featureMap = new HashMap[Feature, Int]
val weights = new ArrayBuffer[Float]
// Each line looks like:
// SuperTaggingFeature(BigramPoSFeature(58,116,pPrev2_pPrev1),187) 578695
def addFeat(line: String): Unit = {
val items = line.split(" ")
val featStr = items(0)
val idx = items(1).toInt
val feat = toFeature(featStr)
featureMap += feat -> idx
}
def addWeight(line: String): Unit = {
weights += line.toFloat
}
var addFun: String=>Unit = addFeat
for (line <- in) {
if (line.isEmpty) addFun = addWeight
else addFun(line)
}
(featureMap, new FixedWeightVector(weights.toArray))
}
// featStr looks like:
// SuperTaggingFeature(BigramPoSFeature(58,116,pPrev2_pPrev1),187)
def toFeature(featStr: String) = {
val firstP = featStr.indexOf('(', 0)
val secondP = featStr.indexOf('(', firstP+1)
val firstE = featStr.indexOf(')', secondP)
val basename = featStr.substring(0, firstP)
assert(basename == "SuperTaggingFeature")
val name = featStr.substring(firstP+1, secondP)
val argsStr = featStr.substring(secondP+1, firstE)
val args: Array[String] = argsStr.split(",")
val label = featStr.substring(firstE+2, featStr.size-1)
val unlabeled = name match {
case "RawFeature" =>
RawFeature(argsStr)
case "BiasFeature" =>
BiasFeature(convT(args(0)))
case "UnigramWordFeature" =>
UnigramWordFeature(args(0).toInt, convT(args(1)))
case "BigramWordFeature" =>
BigramWordFeature(args(0).toInt, args(1).toInt, convT(args(2)))
case "TrigramWordFeature" =>
TrigramWordFeature(args(0).toInt, args(1).toInt, args(2).toInt, convT(args(3)))
case "UnigramPoSFeature" =>
UnigramPoSFeature(args(0).toInt, convT(args(1)))
case "BigramPoSFeature" =>
BigramPoSFeature(args(0).toInt, args(1).toInt, convT(args(2)))
case "TrigramPoSFeature" =>
TrigramPoSFeature(args(0).toInt, args(1).toInt, args(2).toInt, convT(args(3)))
}
unlabeled.assignLabel(label.toInt)
}
def convT(tmpl: String) = {
import Template.Template
tmpl match {
case "bias" => Template.bias
case "w" => Template.w
case "wPrev1" => Template.wPrev1
case "wPrev2" => Template.wPrev2
case "wNext1" => Template.wNext1
case "wNext2" => Template.wNext2
case "wPrev2_wPrev1" => Template.wPrev2_wPrev1
case "wPrev1_w" => Template.wPrev1_w
case "w_wNext1" => Template.w_wNext1
case "wNext1_wNext2" => Template.wNext1_wNext2
case "p" => Template.p
case "pPrev1" => Template.pPrev1
case "pPrev2" => Template.pPrev2
case "pNext1" => Template.pNext1
case "pNext2" => Template.pNext2
case "pPrev2_pPrev1" => Template.pPrev2_pPrev1
case "pPrev1_p" => Template.pPrev1_p
case "p_pNext1" => Template.p_pNext1
case "pNext1_pNext2" => Template.pNext1_pNext2
case "pPrev2_pPrev1_p" => Template.pPrev2_pPrev1_p
case "pPrev1_p_pNext1" => Template.pPrev1_p_pNext1
case "p_pNext1_pNext2" => Template.p_pNext1_pNext2
}
}
}
| mynlp/jigg | src/main/scala/jigg/nlp/ccg/LoadDumpedTaggerModel.scala | Scala | apache-2.0 | 4,814 |
package edu.rice.habanero.benchmarks.concsll
import java.util.Random
import akka.actor.{ActorRef, Props}
import edu.rice.habanero.actors.{AkkaActor, AkkaActorState}
import edu.rice.habanero.benchmarks.concsll.SortedListConfig.{DoWorkMessage, EndWorkMessage}
import edu.rice.habanero.benchmarks.{Benchmark, BenchmarkRunner}
/**
*
* @author <a href="http://shams.web.rice.edu/">Shams Imam</a> (shams@rice.edu)
*/
object SortedListAkkaActorBenchmark {
def main(args: Array[String]) {
BenchmarkRunner.runBenchmark(args, new SortedListAkkaActorBenchmark)
}
private final class SortedListAkkaActorBenchmark extends Benchmark {
def initialize(args: Array[String]) {
SortedListConfig.parseArgs(args)
}
def printArgInfo() {
SortedListConfig.printArgs()
}
def runIteration() {
val numWorkers: Int = SortedListConfig.NUM_ENTITIES
val numMessagesPerWorker: Int = SortedListConfig.NUM_MSGS_PER_WORKER
val system = AkkaActorState.newActorSystem("SortedList")
val master = system.actorOf(Props(new Master(numWorkers, numMessagesPerWorker)))
AkkaActorState.startActor(master)
AkkaActorState.awaitTermination(system)
}
def cleanupIteration(lastIteration: Boolean, execTimeMillis: Double) {
}
}
private class Master(numWorkers: Int, numMessagesPerWorker: Int) extends AkkaActor[AnyRef] {
private final val workers = new Array[ActorRef](numWorkers)
private final val sortedList = context.system.actorOf(Props(new SortedList()))
private var numWorkersTerminated: Int = 0
override def onPostStart() {
AkkaActorState.startActor(sortedList)
var i: Int = 0
while (i < numWorkers) {
workers(i) = context.system.actorOf(Props(new Worker(self, sortedList, i, numMessagesPerWorker)))
AkkaActorState.startActor(workers(i))
workers(i) ! DoWorkMessage.ONLY
i += 1
}
}
override def process(msg: AnyRef) {
if (msg.isInstanceOf[SortedListConfig.EndWorkMessage]) {
numWorkersTerminated += 1
if (numWorkersTerminated == numWorkers) {
sortedList ! EndWorkMessage.ONLY
exit()
}
}
}
}
private class Worker(master: ActorRef, sortedList: ActorRef, id: Int, numMessagesPerWorker: Int) extends AkkaActor[AnyRef] {
private final val writePercent = SortedListConfig.WRITE_PERCENTAGE
private final val sizePercent = SortedListConfig.SIZE_PERCENTAGE
private var messageCount: Int = 0
private final val random = new Random(id + numMessagesPerWorker + writePercent + sizePercent)
override def process(msg: AnyRef) {
messageCount += 1
if (messageCount <= numMessagesPerWorker) {
val anInt: Int = random.nextInt(100)
if (anInt < sizePercent) {
sortedList ! new SortedListConfig.SizeMessage(self)
} else if (anInt < (sizePercent + writePercent)) {
sortedList ! new SortedListConfig.WriteMessage(self, random.nextInt)
} else {
sortedList ! new SortedListConfig.ContainsMessage(self, random.nextInt)
}
} else {
master ! EndWorkMessage.ONLY
exit()
}
}
}
private class SortedList extends AkkaActor[AnyRef] {
private[concsll] final val dataList = new SortedLinkedList[Integer]
override def process(msg: AnyRef) {
msg match {
case writeMessage: SortedListConfig.WriteMessage =>
val value: Int = writeMessage.value
dataList.add(value)
val sender = writeMessage.sender.asInstanceOf[ActorRef]
sender ! new SortedListConfig.ResultMessage(self, value)
case containsMessage: SortedListConfig.ContainsMessage =>
val value: Int = containsMessage.value
val result: Int = if (dataList.contains(value)) 1 else 0
val sender = containsMessage.sender.asInstanceOf[ActorRef]
sender ! new SortedListConfig.ResultMessage(self, result)
case readMessage: SortedListConfig.SizeMessage =>
val value: Int = dataList.size
val sender = readMessage.sender.asInstanceOf[ActorRef]
sender ! new SortedListConfig.ResultMessage(self, value)
case _: SortedListConfig.EndWorkMessage =>
printf(BenchmarkRunner.argOutputFormat, "List Size", dataList.size)
exit()
case _ =>
System.err.println("Unsupported message: " + msg)
}
}
}
}
| smarr/savina | src/main/scala/edu/rice/habanero/benchmarks/concsll/SortedListAkkaActorBenchmark.scala | Scala | gpl-2.0 | 4,446 |
package com.besuikerd.autologistics.common.lib.dsl
import com.besuikerd.autologistics.common.lib.dsl.parser._
object AutoLogisticsParser extends DSLParser
with AutoLogisticsParserExtensions
trait AutoLogisticsParserExtensions extends PluggableParsers
with ParserImplicits
{ this: DSLParser =>
lazy val itemRef:Parser[Expression] = ("<" ~> ident <~ ":") ~ ident ~ ((":" ~> naturalNumber).? <~ ">") ^^ {
case mod ~ name ~ meta => {
val mapping = Map[String, Expression](
"type" -> StringLiteral("item"),
"mod" -> StringLiteral(mod),
"name" -> StringLiteral(name)
)
ObjectExpression(meta.map(x => mapping + ("meta" -> x)).getOrElse(mapping))
}
}
// lazy val filtered:Parser[Expression] = referrable ~ ("@" ~> (listExp | referrable)) ^^ {
// case e ~ filter => {
// Application("_filter", List(e, filter))
// }
// }
lazy val stringCharacters:Parser[String] = """([^<>\\p{Cntrl}\\\\]|\\\\[\\\\'<>bfnrt]|\\\\u[a-fA-F0-9]{4})*+""".r
lazy val itemName:Parser[Expression] = "<" ~> stringCharacters <~ ">"^^ {
case name => ObjectExpression(Map(
"type" -> StringLiteral("name"),
"name" -> StringLiteral(name)
))
}
lazy val coordinate = "(" ~> expression ~ ("," ~> expression) ~ ("," ~> expression) <~ ")"
lazy val relativeCoordinate = "~" ~> coordinate ^^ {
case x ~ y ~ z => new ObjectExpression(Map(
"type" -> StringLiteral("relative"),
"x" -> x,
"y" -> y,
"z" -> z
))
}
lazy val absCoordinate:Parser[Expression] = coordinate ^^ {
case x ~ y ~ z => new ObjectExpression(Map(
"type" -> StringLiteral("absolute"),
"x" -> x,
"y" -> y,
"z" -> z
))
}
lazy val itemType: Parser[Expression] = itemRef | itemName | referrable
lazy val transferTo: Parser[Expression] = repNsep(2, itemFilter | itemType | listExp, ">>") ^^ {
case items => items.tail.foldLeft(items.head){(acc, cur) => Application(acc, "_transfer", cur)
}
}
lazy val itemFilter: Parser[Expression] = (itemType <~ "@") ~ (listExp | operand | beforeExpressions) ^^ {
case item ~ (filter@ListExpression(_)) => Application(item, "_filter", filter)
case item ~ filter => Application(item, "_filter", ListExpression(List(filter)))
}
override def expressions: Seq[Parser[Expression]] = Seq(transferTo | itemFilter | itemRef | itemName) ++ super.expressions
override def operands:Seq[Parser[Expression]] = Seq(relativeCoordinate, absCoordinate) ++ super.operands
// override def binaryOperators:Map[Int, Seq[(String, (Expression, String, Expression) => Expression)]] = super.binaryOperators ++ Map(
// )
}
| besuikerd/AutoLogistics | src/main/scala/com/besuikerd/autologistics/common/lib/dsl/AutoLogisticsParser.scala | Scala | gpl-2.0 | 2,655 |
package is.hail.types.physical.stypes.interfaces
import is.hail.annotations.Region
import is.hail.asm4s._
import is.hail.expr.ir.EmitCodeBuilder
import is.hail.types.physical.stypes.primitives.SInt32Value
import is.hail.types.physical.stypes.{SCode, SType, SValue}
import is.hail.types.{RPrimitive, TypeWithRequiredness}
trait SString extends SType {
def constructFromString(cb: EmitCodeBuilder, r: Value[Region], s: Code[String]): SStringValue
override def _typeWithRequiredness: TypeWithRequiredness = RPrimitive()
}
trait SStringValue extends SValue {
override def hash(cb: EmitCodeBuilder): SInt32Value =
new SInt32Value(cb.memoize(loadString(cb).invoke[Int]("hashCode")))
def loadLength(cb: EmitCodeBuilder): Value[Int]
def loadString(cb: EmitCodeBuilder): Value[String]
def toBytes(cb: EmitCodeBuilder): SBinaryValue
}
| hail-is/hail | hail/src/main/scala/is/hail/types/physical/stypes/interfaces/SString.scala | Scala | mit | 847 |
package edu.rice.habanero.benchmarks.logmap
import java.util
import edu.rice.habanero.actors.{ScalazActor, ScalazActorState}
import edu.rice.habanero.benchmarks.logmap.LogisticMapConfig._
import edu.rice.habanero.benchmarks.{Benchmark, BenchmarkRunner}
/**
*
* @author <a href="http://shams.web.rice.edu/">Shams Imam</a> (shams@rice.edu)
*/
object LogisticMapScalazManualStashActorBenchmark {
def main(args: Array[String]) {
BenchmarkRunner.runBenchmark(args, new LogisticMapScalazManualStashActorBenchmark)
}
private final class LogisticMapScalazManualStashActorBenchmark extends Benchmark {
def initialize(args: Array[String]) {
LogisticMapConfig.parseArgs(args)
}
def printArgInfo() {
LogisticMapConfig.printArgs()
}
def runIteration() {
val master = new Master()
master.start()
master.send(StartMessage.ONLY)
ScalazActorState.awaitTermination()
}
def cleanupIteration(lastIteration: Boolean, execTimeMillis: Double) {
}
}
private class Master extends ScalazActor[AnyRef] {
private final val self = this
private final val numComputers: Int = LogisticMapConfig.numSeries
private final val computers = Array.tabulate[RateComputer](numComputers)(i => {
val rate = LogisticMapConfig.startRate + (i * LogisticMapConfig.increment)
new RateComputer(rate)
})
private final val numWorkers: Int = LogisticMapConfig.numSeries
private final val workers = Array.tabulate[SeriesWorker](numWorkers)(i => {
val rateComputer = computers(i % numComputers)
val startTerm = i * LogisticMapConfig.increment
new SeriesWorker(i, self, rateComputer, startTerm)
})
private var numWorkRequested: Int = 0
private var numWorkReceived: Int = 0
private var termsSum: Double = 0
protected override def onPostStart() {
computers.foreach(loopComputer => {
loopComputer.start()
})
workers.foreach(loopWorker => {
loopWorker.start()
})
}
override def process(theMsg: AnyRef) {
theMsg match {
case _: StartMessage =>
var i: Int = 0
while (i < LogisticMapConfig.numTerms) {
// request each worker to compute the next term
workers.foreach(loopWorker => {
loopWorker.send(NextTermMessage.ONLY)
})
i += 1
}
// workers should stop after all items have been computed
workers.foreach(loopWorker => {
loopWorker.send(GetTermMessage.ONLY)
numWorkRequested += 1
})
case rm: ResultMessage =>
termsSum += rm.term
numWorkReceived += 1
if (numWorkRequested == numWorkReceived) {
println("Terms sum: " + termsSum)
computers.foreach(loopComputer => {
loopComputer.send(StopMessage.ONLY)
})
workers.foreach(loopWorker => {
loopWorker.send(StopMessage.ONLY)
})
exit()
}
case message =>
val ex = new IllegalArgumentException("Unsupported message: " + message)
ex.printStackTrace(System.err)
}
}
}
private class SeriesWorker(id: Int, master: Master, computer: RateComputer, startTerm: Double) extends ScalazActor[AnyRef] {
private final val self = this
private final val curTerm = Array.tabulate[Double](1)(i => startTerm)
private var inReplyMode = false
private val stashedMessages = new util.LinkedList[AnyRef]()
override def process(theMsg: AnyRef) {
if (inReplyMode) {
theMsg match {
case resultMessage: ResultMessage =>
inReplyMode = false
curTerm(0) = resultMessage.term
case message =>
stashedMessages.add(message)
}
} else {
// process the message
theMsg match {
case computeMessage: NextTermMessage =>
val sender = self
val newMessage = new ComputeMessage(sender, curTerm(0))
computer.send(newMessage)
inReplyMode = true
case message: GetTermMessage =>
// do not reply to master if stash is not empty
if (stashedMessages.isEmpty) {
master.send(new ResultMessage(curTerm(0)))
} else {
stashedMessages.add(message)
}
case _: StopMessage =>
exit()
case message =>
val ex = new IllegalArgumentException("Unsupported message: " + message)
ex.printStackTrace(System.err)
}
}
// recycle stashed messages
if (!inReplyMode && !stashedMessages.isEmpty) {
val newMsg = stashedMessages.remove(0)
self.send(newMsg)
}
}
}
private class RateComputer(rate: Double) extends ScalazActor[AnyRef] {
override def process(theMsg: AnyRef) {
theMsg match {
case computeMessage: ComputeMessage =>
val result = computeNextTerm(computeMessage.term, rate)
val resultMessage = new ResultMessage(result)
val sender = computeMessage.sender.asInstanceOf[ScalazActor[AnyRef]]
sender.send(resultMessage)
case _: StopMessage =>
exit()
case message =>
val ex = new IllegalArgumentException("Unsupported message: " + message)
ex.printStackTrace(System.err)
}
}
}
}
| shamsmahmood/savina | src/main/scala/edu/rice/habanero/benchmarks/logmap/LogisticMapScalazManualStashActorBenchmark.scala | Scala | gpl-2.0 | 5,487 |
package inloopio.util
/**
*
* @author Caoyuan Deng
* @version 1.0, November 24, 2006, 5:06 PM
* @since 1.0.4
*/
import scala.collection.mutable
import scala.collection.mutable.ListBuffer
trait ChangeSubject {
@transient
private val observerToOwner = mutable.Map[ChangeObserver, AnyRef]()
def addObserver(owner: AnyRef, observer: ChangeObserver) {
synchronized { observerToOwner(observer) = owner }
}
def removeObserver(observer: ChangeObserver) {
if (observer == null) {
return
}
if (observerToOwner.keySet.contains(observer)) {
synchronized { observerToOwner -= observer }
}
}
def removeObserversOf(owner: AnyRef) {
val toRemove = new ListBuffer[ChangeObserver]
for ((observer, ownerx) <- observerToOwner if ownerx == owner) {
toRemove += observer
}
synchronized { observerToOwner --= toRemove }
}
def removeObservers {
synchronized { observerToOwner.clear }
}
/**
* A ChangeObservable implement can support may type of ChangeObserver, so
* we only apply O here, the implement class can choose to notify this type
* of observers
*
* @Note since Class[T] is not co-variant, we have to explicitly use [T <: ChangeObserver]
*/
def notifyChanged[T <: ChangeObserver](observerType: Class[T]) {
for (observer <- observerToOwner.keysIterator if observerType.isInstance(observer)) {
if (observer.updater isDefinedAt this) observer.updater(this)
}
}
def notifyChanged {
for (observer <- observerToOwner.keysIterator) {
if (observer.updater isDefinedAt this) observer.updater(this)
}
}
/**
* for use of wrap class
*/
// def notifyObservers(subject: Observable): Unit = synchronized {
// if (changed) {
// /** must clone the observers in case deleteObserver is called */
// val clone = new Array[ObserverRef](observerRefs.size)
// observerRefs.copyToArray(clone, 0)
// clearChanged
// clone foreach {_.get.update(subject)}
// }
// }
def observers: Iterable[ChangeObserver] = {
observerToOwner.keySet
}
def observersOf[T <: ChangeObserver](observerType: Class[T]): Iterable[T] = {
val result = new ListBuffer[T]
for (observer <- observerToOwner.keysIterator if observerType.isInstance(observer)) {
result += observer.asInstanceOf[T]
}
result
}
/**
* Returns the total number of obervers.
*/
def observerCount: Int = {
observerToOwner.size
}
private def observerCountOf[T <: ChangeObserver](observerType: Class[T]): Int = {
var count = 0
for (observer <- observerToOwner.keysIterator if observerType.isInstance(observer)) {
count += 1
}
count
}
override def toString = {
val sb = new StringBuilder("ChangeObserverList: ")
sb.append(observerToOwner.size).append(" observers: ")
for (observer <- observerToOwner.keysIterator) {
sb.append(" type ").append(observer.getClass.getName)
sb.append(" observer ").append(observer)
}
sb.toString
}
}
| dcaoyuan/inloopio-libs | inloopio-util/src/main/scala/inloopio/util/ChangeSubject.scala | Scala | bsd-3-clause | 3,059 |
package org.apache.spark.ml.uav
import com.tribbloids.spookystuff.uav.actions.UAVNavigation
import com.tribbloids.spookystuff.uav.spatial.point.NED
object TwoLines {
def apply(
A1: UAVNavigation#WSchema,
B1: UAVNavigation#WSchema,
A2: UAVNavigation#WSchema,
B2: UAVNavigation#WSchema
): TwoLines =
TwoLines(A1.coordinate, B1.coordinate, A2.coordinate, B2.coordinate)
}
case class TwoLines(
A1: NED.Coordinate,
B1: NED.Coordinate,
A2: NED.Coordinate,
B2: NED.Coordinate
) {
val (t1, t2) = {
val M = A1.vector - A2.vector
val C1 = B1.vector - A1.vector
val C2 = B2.vector - A2.vector
val CC1 = C1.t * C1
val CC2 = C2.t * C2
def clamp(_t1: Double, _t2: Double) = {
val t1 = Math.max(Math.min(1.0, _t1), 0.0)
val t2 = Math.max(Math.min(1.0, _t2), 0.0)
t1 -> t2
}
(CC1, CC2) match {
case (0.0, 0.0) =>
(0.0, 0.0)
case (0.0, _) =>
val t1 = 0
val t2 = C2.t * M / CC2
clamp(t1, t2)
case (_, 0.0) =>
val t2 = 0
val t1 = -C1.t * M / CC1
clamp(t1, t2)
case _ =>
val C21 = C2 * C1.t
val G = C21 - C21.t
val C1TGC2 = C1.t * G * C2
if (C1TGC2 == 0) {
(0.0, 0.0)
} else {
def t1 = -(M.t * G * C2) / C1TGC2
def t2 = -(M.t * G * C1) / C1TGC2
clamp(t1, t2)
}
}
}
val M = A1.vector - A2.vector
val C1 = B1.vector - A1.vector
val C2 = B2.vector - A2.vector
val P: Vec = M + t1 * C1 - t2 * C2
val DSquare = P dot P
val D = Math.sqrt(DSquare)
}
| tribbloid/spookystuff | uav/src/main/scala/org/apache/spark/ml/uav/TwoLines.scala | Scala | apache-2.0 | 1,621 |
package org.http4s
package testing
import cats.implicits._
import cats.effect._
import cats.effect.laws.util.TestContext
import org.http4s.headers.{`Content-Length`, `Transfer-Encoding`}
import org.scalacheck.{Arbitrary, Prop, Shrink}
import org.typelevel.discipline.Laws
trait EntityEncoderLaws[F[_], A] extends ToIOSyntax {
implicit def effect: Effect[F]
implicit def encoder: EntityEncoder[F, A]
def accurateContentLengthIfDefined(a: A) =
(for {
entity <- encoder.toEntity(a)
body <- entity.body.compile.toVector
bodyLength = body.size.toLong
contentLength = entity.length
} yield contentLength.fold(true)(_ === bodyLength)).toIO
def noContentLengthInStaticHeaders =
encoder.headers.get(`Content-Length`).isEmpty
def noTransferEncodingInStaticHeaders =
encoder.headers.get(`Transfer-Encoding`).isEmpty
}
object EntityEncoderLaws {
def apply[F[_], A](
implicit effectF: Effect[F],
entityEncoderFA: EntityEncoder[F, A]
): EntityEncoderLaws[F, A] = new EntityEncoderLaws[F, A] {
val effect = effectF
val encoder = entityEncoderFA
}
}
trait EntityEncoderTests[F[_], A] extends Laws {
def laws: EntityEncoderLaws[F, A]
def entityEncoder(
implicit
arbitraryA: Arbitrary[A],
shrinkA: Shrink[A],
testContext: TestContext): RuleSet = new DefaultRuleSet(
name = "EntityEncoder",
parent = None,
"accurateContentLength" -> Prop.forAll { (a: A) =>
ioBooleanToProp(laws.accurateContentLengthIfDefined(a))
},
"noContentLengthInStaticHeaders" -> laws.noContentLengthInStaticHeaders,
"noTransferEncodingInStaticHeaders" -> laws.noTransferEncodingInStaticHeaders
)
}
object EntityEncoderTests {
def apply[F[_], A](
implicit effectF: Effect[F],
entityEncoderFA: EntityEncoder[F, A]
): EntityEncoderTests[F, A] = new EntityEncoderTests[F, A] {
val laws: EntityEncoderLaws[F, A] = EntityEncoderLaws.apply[F, A]
}
}
| reactormonk/http4s | testing/src/main/scala/org/http4s/testing/EntityEncoderTests.scala | Scala | apache-2.0 | 1,964 |
/*
* La Trobe University - Distributed Deep Learning System
* Copyright 2015 Matthias Langer (t3l@threelights.de)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package edu.latrobe.blaze.modules
import edu.latrobe._
import edu.latrobe.blaze._
import edu.latrobe.blaze.modules.jvm._
import edu.latrobe.blaze.TensorDependency._
/**
* f(x_a) = log(x_a)
*
* -1
* f(x_a) = exp(x_a)
*
* d f(x_a) 1
* -------- = ---
* d x_a x_a
*
* d f(x_a)
* ----------- = 0
* d x_b, a!=b
*
*
* ---
* D f(x_a) \ d f(x_a) 1
* -------- = / -------- = ---
* D x_a --- d x_i x_a
* i
*
*/
abstract class Log
extends NonTrainableMapLayer[LogBuilder]
with NonPenalizing {
// ---------------------------------------------------------------------------
// Forward propagation related.
// ---------------------------------------------------------------------------
final override protected def doPredict(mode: Mode,
inPlaceAllowed: Boolean,
input: Tensor,
reference: Tensor)
: (Tensor, PredictContext) = {
val out = doPredict(input)
(out, EmptyContext)
}
protected def doPredict(input: Tensor)
: Tensor
final override protected def doPredictInv(output: Tensor,
context: PredictContext)
: Tensor = doPredictInv(output)
protected def doPredictInv(output: Tensor)
: Tensor
// ---------------------------------------------------------------------------
// Back propagation related.
// ---------------------------------------------------------------------------
final override val backpropagationRequirementsForInput
: TensorDependency = TensorDependency.Required
final override val backpropagationRequirementsForOutput
: TensorDependency = TensorDependency.NotRequired
final override protected def doDeriveInputError(input: Tensor,
reference: Tensor,
output: Tensor,
context: PredictContext,
error: Tensor)
: Tensor = {
error :/= input
error
}
}
final class LogBuilder
extends NonTrainableMapLayerBuilder[LogBuilder] {
override def repr
: LogBuilder = this
override def canEqual(that: Any)
: Boolean = that.isInstanceOf[LogBuilder]
override protected def doCopy()
: LogBuilder = LogBuilder()
override def outputPlatformFor(hints: BuildHints)
: Platform = LogBuilder.outputPlatformFor(this, hints)
override def build(hints: BuildHints,
seed: InstanceSeed,
weightsBuilder: ValueTensorBufferBuilder)
: Module = LogBuilder.lookupAndBuild(this, hints, seed, weightsBuilder)
}
object LogBuilder extends ModuleVariantTable[LogBuilder] {
register(2, Log_JVM_ApacheCommons_Description)
register(4, Log_JVM_Baseline_Description)
final def apply()
: LogBuilder = new LogBuilder
}
| bashimao/ltudl | blaze/src/main/scala/edu/latrobe/blaze/modules/Log.scala | Scala | apache-2.0 | 3,744 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.runtime.harness
import org.apache.flink.api.scala._
import org.apache.flink.streaming.api.scala.DataStream
import org.apache.flink.streaming.runtime.streamrecord.StreamRecord
import org.apache.flink.streaming.util.KeyedOneInputStreamOperatorTestHarness
import org.apache.flink.table.api._
import org.apache.flink.table.api.bridge.scala._
import org.apache.flink.table.api.config.OptimizerConfigOptions
import org.apache.flink.table.data.{RowData, TimestampData}
import org.apache.flink.table.planner.factories.TestValuesTableFactory
import org.apache.flink.table.planner.runtime.utils.StreamingWithStateTestBase.{HEAP_BACKEND, ROCKSDB_BACKEND, StateBackendMode}
import org.apache.flink.table.planner.runtime.utils.TestData
import org.apache.flink.table.runtime.typeutils.RowDataSerializer
import org.apache.flink.table.runtime.util.RowDataHarnessAssertor
import org.apache.flink.table.runtime.util.StreamRecordUtils.binaryRecord
import org.apache.flink.table.runtime.util.TimeWindowUtil.toUtcTimestampMills
import org.apache.flink.table.types.logical.LogicalType
import org.apache.flink.types.Row
import org.apache.flink.types.RowKind.INSERT
import org.junit.runner.RunWith
import org.junit.runners.Parameterized
import org.junit.{Before, Test}
import java.time.{LocalDateTime, ZoneId}
import java.util.concurrent.ConcurrentLinkedQueue
import java.util.{Collection => JCollection}
import scala.collection.JavaConversions._
/**
* Harness tests for processing-time window aggregate. We can't test them in
* [[WindowAggregateITCase]] because the result is non-deterministic, therefore
* we use harness to test them.
*/
@RunWith(classOf[Parameterized])
class WindowAggregateHarnessTest(backend: StateBackendMode, shiftTimeZone: ZoneId)
extends HarnessTestBase(backend) {
private val UTC_ZONE_ID = ZoneId.of("UTC")
@Before
override def before(): Unit = {
super.before()
val dataId = TestValuesTableFactory.registerData(TestData.windowDataWithTimestamp)
tEnv.getConfig.setLocalTimeZone(shiftTimeZone)
tEnv.executeSql(
s"""
|CREATE TABLE T1 (
| `ts` STRING,
| `int` INT,
| `double` DOUBLE,
| `float` FLOAT,
| `bigdec` DECIMAL(10, 2),
| `string` STRING,
| `name` STRING,
| proctime AS PROCTIME()
|) WITH (
| 'connector' = 'values',
| 'data-id' = '$dataId'
|)
|""".stripMargin)
}
/**
* The produced result should be the same with
* [[WindowAggregateITCase.testEventTimeTumbleWindow()]].
*/
@Test
def testProcessingTimeTumbleWindow(): Unit = {
val (testHarness, outputTypes) = createProcessingTimeTumbleWindowOperator
val assertor = new RowDataHarnessAssertor(outputTypes)
testHarness.open()
ingestData(testHarness)
val expected = new ConcurrentLinkedQueue[Object]()
expected.add(record("a", 4L, 5.0D, 2L,
localMills("1970-01-01T00:00:00"), localMills("1970-01-01T00:00:05")))
expected.add(record("a", 1L, null, 1L,
localMills("1970-01-01T00:00:05"), localMills("1970-01-01T00:00:10")))
expected.add(record("b", 2L, 6.0D, 2L,
localMills("1970-01-01T00:00:05"), localMills("1970-01-01T00:00:10")))
expected.add(record("b", 1L, 4.0D, 1L,
localMills("1970-01-01T00:00:15"), localMills("1970-01-01T00:00:20")))
expected.add(record("b", 1L, 3.0D, 1L,
localMills("1970-01-01T00:00:30"), localMills("1970-01-01T00:00:35")))
expected.add(record(null, 1L, 7.0D, 0L,
localMills("1970-01-01T00:00:30"), localMills("1970-01-01T00:00:35")))
assertor.assertOutputEqualsSorted("result mismatch", expected, testHarness.getOutput)
testHarness.close()
}
private def createProcessingTimeTumbleWindowOperator()
: (KeyedOneInputStreamOperatorTestHarness[RowData, RowData, RowData], Array[LogicalType]) = {
val sql =
"""
|SELECT
| `name`,
| window_start,
| window_end,
| COUNT(*),
| MAX(`double`),
| COUNT(DISTINCT `string`)
|FROM TABLE(
| TUMBLE(TABLE T1, DESCRIPTOR(proctime), INTERVAL '5' SECOND))
|GROUP BY `name`, window_start, window_end
""".stripMargin
val t1 = tEnv.sqlQuery(sql)
val testHarness = createHarnessTester(t1.toAppendStream[Row], "WindowAggregate")
// window aggregate put window properties at the end of aggs
val outputTypes =
Array(
DataTypes.STRING().getLogicalType,
DataTypes.BIGINT().getLogicalType,
DataTypes.DOUBLE().getLogicalType,
DataTypes.BIGINT().getLogicalType,
DataTypes.TIMESTAMP_LTZ(3).getLogicalType,
DataTypes.TIMESTAMP_LTZ(3).getLogicalType)
(testHarness, outputTypes)
}
/**
* The produced result should be the same with
* [[WindowAggregateITCase.testEventTimeHopWindow()]].
*/
@Test
def testProcessingTimeHopWindow(): Unit = {
val sql =
"""
|SELECT
| `name`,
| window_start,
| window_end,
| COUNT(*),
| MAX(`double`),
| COUNT(DISTINCT `string`)
|FROM TABLE(
| HOP(TABLE T1, DESCRIPTOR(proctime), INTERVAL '5' SECOND, INTERVAL '10' SECOND))
|GROUP BY `name`, window_start, window_end
""".stripMargin
val t1 = tEnv.sqlQuery(sql)
val testHarness = createHarnessTester(t1.toAppendStream[Row], "WindowAggregate")
// window aggregate put window properties at the end of aggs
val assertor = new RowDataHarnessAssertor(
Array(
DataTypes.STRING().getLogicalType,
DataTypes.BIGINT().getLogicalType,
DataTypes.DOUBLE().getLogicalType,
DataTypes.BIGINT().getLogicalType,
DataTypes.TIMESTAMP_LTZ(3).getLogicalType,
DataTypes.TIMESTAMP_LTZ(3).getLogicalType))
testHarness.open()
ingestData(testHarness)
val expected = new ConcurrentLinkedQueue[Object]()
expected.add(record("a", 4L, 5.0D, 2L,
localMills("1969-12-31T23:59:55"), localMills("1970-01-01T00:00:05")))
expected.add(record("a", 5L, 5.0D, 3L,
localMills("1970-01-01T00:00:00"), localMills("1970-01-01T00:00:10")))
expected.add(record("a", 1L, null, 1L,
localMills("1970-01-01T00:00:05"), localMills("1970-01-01T00:00:15")))
expected.add(record("b", 2L, 6.0D, 2L,
localMills("1970-01-01T00:00:00"), localMills("1970-01-01T00:00:10")))
expected.add(record("b", 2L, 6.0D, 2L,
localMills("1970-01-01T00:00:05"), localMills("1970-01-01T00:00:15")))
expected.add(record("b", 1L, 4.0D, 1L,
localMills("1970-01-01T00:00:10"), localMills("1970-01-01T00:00:20")))
expected.add(record("b", 1L, 4.0D, 1L,
localMills("1970-01-01T00:00:15"), localMills("1970-01-01T00:00:25")))
expected.add(record("b", 1L, 3.0D, 1L,
localMills("1970-01-01T00:00:25"), localMills("1970-01-01T00:00:35")))
expected.add(record("b", 1L, 3.0D, 1L,
localMills("1970-01-01T00:00:30"), localMills("1970-01-01T00:00:40")))
expected.add(record(null, 1L, 7.0D, 0L,
localMills("1970-01-01T00:00:25"), localMills("1970-01-01T00:00:35")))
expected.add(record(null, 1L, 7.0D, 0L,
localMills("1970-01-01T00:00:30"), localMills("1970-01-01T00:00:40")))
assertor.assertOutputEqualsSorted("result mismatch", expected, testHarness.getOutput)
testHarness.close()
}
/**
* The produced result should be the same with
* [[WindowAggregateITCase.testEventTimeCumulateWindow()]].
*/
@Test
def testProcessingTimeCumulateWindow(): Unit = {
val sql =
"""
|SELECT
| `name`,
| window_start,
| window_end,
| COUNT(*),
| MAX(`double`),
| COUNT(DISTINCT `string`)
|FROM TABLE(
| CUMULATE(
| TABLE T1,
| DESCRIPTOR(proctime),
| INTERVAL '5' SECOND,
| INTERVAL '15' SECOND))
|GROUP BY `name`, window_start, window_end
""".stripMargin
val t1 = tEnv.sqlQuery(sql)
val testHarness = createHarnessTester(t1.toAppendStream[Row], "WindowAggregate")
// window aggregate put window properties at the end of aggs
val assertor = new RowDataHarnessAssertor(
Array(
DataTypes.STRING().getLogicalType,
DataTypes.BIGINT().getLogicalType,
DataTypes.DOUBLE().getLogicalType,
DataTypes.BIGINT().getLogicalType,
DataTypes.TIMESTAMP_LTZ(3).getLogicalType,
DataTypes.TIMESTAMP_LTZ(3).getLogicalType))
testHarness.open()
ingestData(testHarness)
val expected = new ConcurrentLinkedQueue[Object]()
expected.add(record("a", 4L, 5.0D, 2L,
localMills("1970-01-01T00:00:00"), localMills("1970-01-01T00:00:05")))
expected.add(record("a", 5L, 5.0D, 3L,
localMills("1970-01-01T00:00:00"), localMills("1970-01-01T00:00:10")))
expected.add(record("a", 5L, 5.0D, 3L,
localMills("1970-01-01T00:00:00"), localMills("1970-01-01T00:00:15")))
expected.add(record("b", 2L, 6.0D, 2L,
localMills("1970-01-01T00:00:00"), localMills("1970-01-01T00:00:10")))
expected.add(record("b", 2L, 6.0D, 2L,
localMills("1970-01-01T00:00:00"), localMills("1970-01-01T00:00:15")))
expected.add(record("b", 1L, 4.0D, 1L,
localMills("1970-01-01T00:00:15"), localMills("1970-01-01T00:00:20")))
expected.add(record("b", 1L, 4.0D, 1L,
localMills("1970-01-01T00:00:15"), localMills("1970-01-01T00:00:25")))
expected.add(record("b", 1L, 4.0D, 1L,
localMills("1970-01-01T00:00:15"), localMills("1970-01-01T00:00:30")))
expected.add(record("b", 1L, 3.0D, 1L,
localMills("1970-01-01T00:00:30"), localMills("1970-01-01T00:00:35")))
expected.add(record("b", 1L, 3.0D, 1L,
localMills("1970-01-01T00:00:30"), localMills("1970-01-01T00:00:40")))
expected.add(record("b", 1L, 3.0D, 1L,
localMills("1970-01-01T00:00:30"), localMills("1970-01-01T00:00:45")))
expected.add(record(null, 1L, 7.0D, 0L,
localMills("1970-01-01T00:00:30"), localMills("1970-01-01T00:00:35")))
expected.add(record(null, 1L, 7.0D, 0L,
localMills("1970-01-01T00:00:30"), localMills("1970-01-01T00:00:40")))
expected.add(record(null, 1L, 7.0D, 0L,
localMills("1970-01-01T00:00:30"), localMills("1970-01-01T00:00:45")))
assertor.assertOutputEqualsSorted("result mismatch", expected, testHarness.getOutput)
testHarness.close()
}
@Test
def testCloseWithoutOpen(): Unit = {
val (testHarness, outputTypes) = createProcessingTimeTumbleWindowOperator
testHarness.setup(new RowDataSerializer(outputTypes: _*))
// simulate a failover after a failed task open, expect no exception happens
testHarness.close()
}
/**
* Processing time window doesn't support two-phase, so add a single two-phase test.
*/
@Test
def testTwoPhaseWindowAggregateCloseWithoutOpen(): Unit = {
val timestampDataId = TestValuesTableFactory.registerData(TestData.windowDataWithTimestamp)
tEnv.executeSql(
s"""
|CREATE TABLE T2 (
| `ts` STRING,
| `int` INT,
| `double` DOUBLE,
| `float` FLOAT,
| `bigdec` DECIMAL(10, 2),
| `string` STRING,
| `name` STRING,
| `rowtime` AS
| TO_TIMESTAMP(`ts`),
| WATERMARK for `rowtime` AS `rowtime` - INTERVAL '1' SECOND
|) WITH (
| 'connector' = 'values',
| 'data-id' = '${timestampDataId}',
| 'failing-source' = 'false'
|)
|""".stripMargin)
tEnv.getConfig.set(OptimizerConfigOptions.TABLE_OPTIMIZER_AGG_PHASE_STRATEGY, "TWO_PHASE")
val sql =
"""
|SELECT
| `name`,
| window_start,
| window_end,
| COUNT(*),
| MAX(`double`),
| COUNT(DISTINCT `string`)
|FROM TABLE(
| TUMBLE(TABLE T2, DESCRIPTOR(rowtime), INTERVAL '5' SECOND))
|GROUP BY `name`, window_start, window_end
""".stripMargin
val t1 = tEnv.sqlQuery(sql)
val stream: DataStream[Row] = t1.toAppendStream[Row]
val testHarness = createHarnessTesterForNoState(stream, "LocalWindowAggregate")
// window aggregate put window properties at the end of aggs
val outputTypes = Array(
DataTypes.STRING().getLogicalType,
DataTypes.BIGINT().getLogicalType,
DataTypes.DOUBLE().getLogicalType,
DataTypes.BIGINT().getLogicalType,
DataTypes.TIMESTAMP_LTZ(3).getLogicalType,
DataTypes.TIMESTAMP_LTZ(3).getLogicalType)
testHarness.setup(new RowDataSerializer(outputTypes: _*))
// simulate a failover after a failed task open, expect no exception happens
testHarness.close()
val testHarness1 = createHarnessTester(stream, "GlobalWindowAggregate")
testHarness1.setup(new RowDataSerializer(outputTypes: _*))
// simulate a failover after a failed task open, expect no exception happens
testHarness1.close()
}
/**
* Ingests testing data, the input schema is [name, double, string, proctime].
* We follow the test data in [[TestData.windowDataWithTimestamp]] to have the same produced
* result. The only difference is we don't ingest the late data in this test, so they should
* produce same result.
*/
private def ingestData(
testHarness: KeyedOneInputStreamOperatorTestHarness[RowData, RowData, RowData]): Unit = {
// input schema: [name, double, string, proctime]
testHarness.setProcessingTime(1000L)
testHarness.processElement(record("a", 1d, "Hi", null))
testHarness.setProcessingTime(2000L)
testHarness.processElement(record("a", 2d, "Comment#1", null))
testHarness.setProcessingTime(3000L)
testHarness.processElement(record("a", 2d, "Comment#1", null))
testHarness.setProcessingTime(4000L)
testHarness.processElement(record("a", 5d, null, null))
testHarness.setProcessingTime(6000L)
testHarness.processElement(record("b", 6d, "Hi", null))
testHarness.setProcessingTime(7000L)
testHarness.processElement(record("b", 3d, "Hello", null))
testHarness.setProcessingTime(8000L)
testHarness.processElement(record("a", null, "Comment#2", null))
testHarness.setProcessingTime(16000L)
testHarness.processElement(record("b", 4d, "Hi", null))
testHarness.setProcessingTime(32000L)
testHarness.processElement(record(null, 7d, null, null))
testHarness.setProcessingTime(34000L)
testHarness.processElement(record("b", 3d, "Comment#3", null))
testHarness.setProcessingTime(50000L)
}
private def record(args: Any*): StreamRecord[RowData] = {
val objs = args.map {
case l: Long => Long.box(l)
case d: Double => Double.box(d)
case arg@_ => arg.asInstanceOf[Object]
}.toArray
binaryRecord(INSERT, objs: _*)
}
private def localMills(dateTime: String): TimestampData = {
val windowDateTime = LocalDateTime.parse(dateTime).atZone(UTC_ZONE_ID)
TimestampData.fromEpochMillis(
toUtcTimestampMills(windowDateTime.toInstant.toEpochMilli, shiftTimeZone))
}
}
object WindowAggregateHarnessTest {
@Parameterized.Parameters(name = "StateBackend={0}, TimeZone={1}")
def parameters(): JCollection[Array[java.lang.Object]] = {
Seq[Array[AnyRef]](
Array(HEAP_BACKEND, ZoneId.of("UTC")),
Array(HEAP_BACKEND, ZoneId.of("Asia/Shanghai")),
Array(ROCKSDB_BACKEND, ZoneId.of("UTC")),
Array(ROCKSDB_BACKEND, ZoneId.of("Asia/Shanghai")))
}
}
| xccui/flink | flink-table/flink-table-planner/src/test/scala/org/apache/flink/table/planner/runtime/harness/WindowAggregateHarnessTest.scala | Scala | apache-2.0 | 16,276 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.aggregate
import org.apache.spark.Logging
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.expressions.aggregate._
import scala.collection.mutable.ArrayBuffer
/**
* The base class of [[SortBasedAggregationIterator]].
* It mainly contains two parts:
* 1. It initializes aggregate functions.
* 2. It creates two functions, `processRow` and `generateOutput` based on [[AggregateMode]] of
* its aggregate functions. `processRow` is the function to handle an input. `generateOutput`
* is used to generate result.
*/
abstract class AggregationIterator(
groupingKeyAttributes: Seq[Attribute],
valueAttributes: Seq[Attribute],
nonCompleteAggregateExpressions: Seq[AggregateExpression],
nonCompleteAggregateAttributes: Seq[Attribute],
completeAggregateExpressions: Seq[AggregateExpression],
completeAggregateAttributes: Seq[Attribute],
initialInputBufferOffset: Int,
resultExpressions: Seq[NamedExpression],
newMutableProjection: (Seq[Expression], Seq[Attribute]) => (() => MutableProjection),
outputsUnsafeRows: Boolean)
extends Iterator[InternalRow] with Logging {
///////////////////////////////////////////////////////////////////////////
// Initializing functions.
///////////////////////////////////////////////////////////////////////////
// An Seq of all AggregateExpressions.
// It is important that all AggregateExpressions with the mode Partial, PartialMerge or Final
// are at the beginning of the allAggregateExpressions.
protected val allAggregateExpressions =
nonCompleteAggregateExpressions ++ completeAggregateExpressions
require(
allAggregateExpressions.map(_.mode).distinct.length <= 2,
s"$allAggregateExpressions are not supported becuase they have more than 2 distinct modes.")
/**
* The distinct modes of AggregateExpressions. Right now, we can handle the following mode:
* - Partial-only: all AggregateExpressions have the mode of Partial;
* - PartialMerge-only: all AggregateExpressions have the mode of PartialMerge);
* - Final-only: all AggregateExpressions have the mode of Final;
* - Final-Complete: some AggregateExpressions have the mode of Final and
* others have the mode of Complete;
* - Complete-only: nonCompleteAggregateExpressions is empty and we have AggregateExpressions
* with mode Complete in completeAggregateExpressions; and
* - Grouping-only: there is no AggregateExpression.
*/
protected val aggregationMode: (Option[AggregateMode], Option[AggregateMode]) =
nonCompleteAggregateExpressions.map(_.mode).distinct.headOption ->
completeAggregateExpressions.map(_.mode).distinct.headOption
// Initialize all AggregateFunctions by binding references if necessary,
// and set inputBufferOffset and mutableBufferOffset.
protected val allAggregateFunctions: Array[AggregateFunction] = {
var mutableBufferOffset = 0
var inputBufferOffset: Int = initialInputBufferOffset
val functions = new Array[AggregateFunction](allAggregateExpressions.length)
var i = 0
while (i < allAggregateExpressions.length) {
val func = allAggregateExpressions(i).aggregateFunction
val funcWithBoundReferences: AggregateFunction = allAggregateExpressions(i).mode match {
case Partial | Complete if func.isInstanceOf[ImperativeAggregate] =>
// We need to create BoundReferences if the function is not an
// expression-based aggregate function (it does not support code-gen) and the mode of
// this function is Partial or Complete because we will call eval of this
// function's children in the update method of this aggregate function.
// Those eval calls require BoundReferences to work.
BindReferences.bindReference(func, valueAttributes)
case _ =>
// We only need to set inputBufferOffset for aggregate functions with mode
// PartialMerge and Final.
val updatedFunc = func match {
case function: ImperativeAggregate =>
function.withNewInputAggBufferOffset(inputBufferOffset)
case function => function
}
inputBufferOffset += func.aggBufferSchema.length
updatedFunc
}
val funcWithUpdatedAggBufferOffset = funcWithBoundReferences match {
case function: ImperativeAggregate =>
// Set mutableBufferOffset for this function. It is important that setting
// mutableBufferOffset happens after all potential bindReference operations
// because bindReference will create a new instance of the function.
function.withNewMutableAggBufferOffset(mutableBufferOffset)
case function => function
}
mutableBufferOffset += funcWithUpdatedAggBufferOffset.aggBufferSchema.length
functions(i) = funcWithUpdatedAggBufferOffset
i += 1
}
functions
}
// Positions of those imperative aggregate functions in allAggregateFunctions.
// For example, we have func1, func2, func3, func4 in aggregateFunctions, and
// func2 and func3 are imperative aggregate functions.
// ImperativeAggregateFunctionPositions will be [1, 2].
private[this] val allImperativeAggregateFunctionPositions: Array[Int] = {
val positions = new ArrayBuffer[Int]()
var i = 0
while (i < allAggregateFunctions.length) {
allAggregateFunctions(i) match {
case agg: DeclarativeAggregate =>
case _ => positions += i
}
i += 1
}
positions.toArray
}
// All AggregateFunctions functions with mode Partial, PartialMerge, or Final.
private[this] val nonCompleteAggregateFunctions: Array[AggregateFunction] =
allAggregateFunctions.take(nonCompleteAggregateExpressions.length)
// All imperative aggregate functions with mode Partial, PartialMerge, or Final.
private[this] val nonCompleteImperativeAggregateFunctions: Array[ImperativeAggregate] =
nonCompleteAggregateFunctions.collect { case func: ImperativeAggregate => func }
// The projection used to initialize buffer values for all expression-based aggregates.
private[this] val expressionAggInitialProjection = {
val initExpressions = allAggregateFunctions.flatMap {
case ae: DeclarativeAggregate => ae.initialValues
// For the positions corresponding to imperative aggregate functions, we'll use special
// no-op expressions which are ignored during projection code-generation.
case i: ImperativeAggregate => Seq.fill(i.aggBufferAttributes.length)(NoOp)
}
newMutableProjection(initExpressions, Nil)()
}
// All imperative AggregateFunctions.
private[this] val allImperativeAggregateFunctions: Array[ImperativeAggregate] =
allImperativeAggregateFunctionPositions
.map(allAggregateFunctions)
.map(_.asInstanceOf[ImperativeAggregate])
///////////////////////////////////////////////////////////////////////////
// Methods and fields used by sub-classes.
///////////////////////////////////////////////////////////////////////////
// Initializing functions used to process a row.
protected val processRow: (MutableRow, InternalRow) => Unit = {
val rowToBeProcessed = new JoinedRow
val aggregationBufferSchema = allAggregateFunctions.flatMap(_.aggBufferAttributes)
aggregationMode match {
// Partial-only
case (Some(Partial), None) =>
val updateExpressions = nonCompleteAggregateFunctions.flatMap {
case ae: DeclarativeAggregate => ae.updateExpressions
case agg: AggregateFunction => Seq.fill(agg.aggBufferAttributes.length)(NoOp)
}
val expressionAggUpdateProjection =
newMutableProjection(updateExpressions, aggregationBufferSchema ++ valueAttributes)()
(currentBuffer: MutableRow, row: InternalRow) => {
expressionAggUpdateProjection.target(currentBuffer)
// Process all expression-based aggregate functions.
expressionAggUpdateProjection(rowToBeProcessed(currentBuffer, row))
// Process all imperative aggregate functions.
var i = 0
while (i < nonCompleteImperativeAggregateFunctions.length) {
nonCompleteImperativeAggregateFunctions(i).update(currentBuffer, row)
i += 1
}
}
// PartialMerge-only or Final-only
case (Some(PartialMerge), None) | (Some(Final), None) =>
val inputAggregationBufferSchema = if (initialInputBufferOffset == 0) {
// If initialInputBufferOffset, the input value does not contain
// grouping keys.
// This part is pretty hacky.
allAggregateFunctions.flatMap(_.inputAggBufferAttributes).toSeq
} else {
groupingKeyAttributes ++ allAggregateFunctions.flatMap(_.inputAggBufferAttributes)
}
// val inputAggregationBufferSchema =
// groupingKeyAttributes ++
// allAggregateFunctions.flatMap(_.cloneBufferAttributes)
val mergeExpressions = nonCompleteAggregateFunctions.flatMap {
case ae: DeclarativeAggregate => ae.mergeExpressions
case agg: AggregateFunction => Seq.fill(agg.aggBufferAttributes.length)(NoOp)
}
// This projection is used to merge buffer values for all expression-based aggregates.
val expressionAggMergeProjection =
newMutableProjection(
mergeExpressions,
aggregationBufferSchema ++ inputAggregationBufferSchema)()
(currentBuffer: MutableRow, row: InternalRow) => {
// Process all expression-based aggregate functions.
expressionAggMergeProjection.target(currentBuffer)(rowToBeProcessed(currentBuffer, row))
// Process all imperative aggregate functions.
var i = 0
while (i < nonCompleteImperativeAggregateFunctions.length) {
nonCompleteImperativeAggregateFunctions(i).merge(currentBuffer, row)
i += 1
}
}
// Final-Complete
case (Some(Final), Some(Complete)) =>
val completeAggregateFunctions: Array[AggregateFunction] =
allAggregateFunctions.takeRight(completeAggregateExpressions.length)
// All imperative aggregate functions with mode Complete.
val completeImperativeAggregateFunctions: Array[ImperativeAggregate] =
completeAggregateFunctions.collect { case func: ImperativeAggregate => func }
// The first initialInputBufferOffset values of the input aggregation buffer is
// for grouping expressions and distinct columns.
val groupingAttributesAndDistinctColumns = valueAttributes.take(initialInputBufferOffset)
val completeOffsetExpressions =
Seq.fill(completeAggregateFunctions.map(_.aggBufferAttributes.length).sum)(NoOp)
// We do not touch buffer values of aggregate functions with the Final mode.
val finalOffsetExpressions =
Seq.fill(nonCompleteAggregateFunctions.map(_.aggBufferAttributes.length).sum)(NoOp)
val mergeInputSchema =
aggregationBufferSchema ++
groupingAttributesAndDistinctColumns ++
nonCompleteAggregateFunctions.flatMap(_.inputAggBufferAttributes)
val mergeExpressions =
nonCompleteAggregateFunctions.flatMap {
case ae: DeclarativeAggregate => ae.mergeExpressions
case agg: AggregateFunction => Seq.fill(agg.aggBufferAttributes.length)(NoOp)
} ++ completeOffsetExpressions
val finalExpressionAggMergeProjection =
newMutableProjection(mergeExpressions, mergeInputSchema)()
val updateExpressions =
finalOffsetExpressions ++ completeAggregateFunctions.flatMap {
case ae: DeclarativeAggregate => ae.updateExpressions
case agg: AggregateFunction => Seq.fill(agg.aggBufferAttributes.length)(NoOp)
}
val completeExpressionAggUpdateProjection =
newMutableProjection(updateExpressions, aggregationBufferSchema ++ valueAttributes)()
(currentBuffer: MutableRow, row: InternalRow) => {
val input = rowToBeProcessed(currentBuffer, row)
// For all aggregate functions with mode Complete, update buffers.
completeExpressionAggUpdateProjection.target(currentBuffer)(input)
var i = 0
while (i < completeImperativeAggregateFunctions.length) {
completeImperativeAggregateFunctions(i).update(currentBuffer, row)
i += 1
}
// For all aggregate functions with mode Final, merge buffers.
finalExpressionAggMergeProjection.target(currentBuffer)(input)
i = 0
while (i < nonCompleteImperativeAggregateFunctions.length) {
nonCompleteImperativeAggregateFunctions(i).merge(currentBuffer, row)
i += 1
}
}
// Complete-only
case (None, Some(Complete)) =>
val completeAggregateFunctions: Array[AggregateFunction] =
allAggregateFunctions.takeRight(completeAggregateExpressions.length)
// All imperative aggregate functions with mode Complete.
val completeImperativeAggregateFunctions: Array[ImperativeAggregate] =
completeAggregateFunctions.collect { case func: ImperativeAggregate => func }
val updateExpressions =
completeAggregateFunctions.flatMap {
case ae: DeclarativeAggregate => ae.updateExpressions
case agg: AggregateFunction => Seq.fill(agg.aggBufferAttributes.length)(NoOp)
}
val completeExpressionAggUpdateProjection =
newMutableProjection(updateExpressions, aggregationBufferSchema ++ valueAttributes)()
(currentBuffer: MutableRow, row: InternalRow) => {
val input = rowToBeProcessed(currentBuffer, row)
// For all aggregate functions with mode Complete, update buffers.
completeExpressionAggUpdateProjection.target(currentBuffer)(input)
var i = 0
while (i < completeImperativeAggregateFunctions.length) {
completeImperativeAggregateFunctions(i).update(currentBuffer, row)
i += 1
}
}
// Grouping only.
case (None, None) => (currentBuffer: MutableRow, row: InternalRow) => {}
case other =>
sys.error(
s"Could not evaluate ${nonCompleteAggregateExpressions} because we do not " +
s"support evaluate modes $other in this iterator.")
}
}
// Initializing the function used to generate the output row.
protected val generateOutput: (InternalRow, MutableRow) => InternalRow = {
val rowToBeEvaluated = new JoinedRow
val safeOutputRow = new SpecificMutableRow(resultExpressions.map(_.dataType))
val mutableOutput = if (outputsUnsafeRows) {
UnsafeProjection.create(resultExpressions.map(_.dataType).toArray).apply(safeOutputRow)
} else {
safeOutputRow
}
aggregationMode match {
// Partial-only or PartialMerge-only: every output row is basically the values of
// the grouping expressions and the corresponding aggregation buffer.
case (Some(Partial), None) | (Some(PartialMerge), None) =>
// Because we cannot copy a joinedRow containing a UnsafeRow (UnsafeRow does not
// support generic getter), we create a mutable projection to output the
// JoinedRow(currentGroupingKey, currentBuffer)
val bufferSchema = nonCompleteAggregateFunctions.flatMap(_.aggBufferAttributes)
val resultProjection =
newMutableProjection(
groupingKeyAttributes ++ bufferSchema,
groupingKeyAttributes ++ bufferSchema)()
resultProjection.target(mutableOutput)
(currentGroupingKey: InternalRow, currentBuffer: MutableRow) => {
resultProjection(rowToBeEvaluated(currentGroupingKey, currentBuffer))
// rowToBeEvaluated(currentGroupingKey, currentBuffer)
}
// Final-only, Complete-only and Final-Complete: every output row contains values representing
// resultExpressions.
case (Some(Final), None) | (Some(Final) | None, Some(Complete)) =>
val bufferSchemata =
allAggregateFunctions.flatMap(_.aggBufferAttributes)
val evalExpressions = allAggregateFunctions.map {
case ae: DeclarativeAggregate => ae.evaluateExpression
case agg: AggregateFunction => NoOp
}
val expressionAggEvalProjection = newMutableProjection(evalExpressions, bufferSchemata)()
val aggregateResultSchema = nonCompleteAggregateAttributes ++ completeAggregateAttributes
// TODO: Use unsafe row.
val aggregateResult = new SpecificMutableRow(aggregateResultSchema.map(_.dataType))
expressionAggEvalProjection.target(aggregateResult)
val resultProjection =
newMutableProjection(
resultExpressions, groupingKeyAttributes ++ aggregateResultSchema)()
resultProjection.target(mutableOutput)
(currentGroupingKey: InternalRow, currentBuffer: MutableRow) => {
// Generate results for all expression-based aggregate functions.
expressionAggEvalProjection(currentBuffer)
// Generate results for all imperative aggregate functions.
var i = 0
while (i < allImperativeAggregateFunctions.length) {
aggregateResult.update(
allImperativeAggregateFunctionPositions(i),
allImperativeAggregateFunctions(i).eval(currentBuffer))
i += 1
}
resultProjection(rowToBeEvaluated(currentGroupingKey, aggregateResult))
}
// Grouping-only: we only output values of grouping expressions.
case (None, None) =>
val resultProjection =
newMutableProjection(resultExpressions, groupingKeyAttributes)()
resultProjection.target(mutableOutput)
(currentGroupingKey: InternalRow, currentBuffer: MutableRow) => {
resultProjection(currentGroupingKey)
}
case other =>
sys.error(
s"Could not evaluate ${nonCompleteAggregateExpressions} because we do not " +
s"support evaluate modes $other in this iterator.")
}
}
/** Initializes buffer values for all aggregate functions. */
protected def initializeBuffer(buffer: MutableRow): Unit = {
expressionAggInitialProjection.target(buffer)(EmptyRow)
var i = 0
while (i < allImperativeAggregateFunctions.length) {
allImperativeAggregateFunctions(i).initialize(buffer)
i += 1
}
}
/**
* Creates a new aggregation buffer and initializes buffer values
* for all aggregate functions.
*/
protected def newBuffer: MutableRow
}
| chenc10/Spark-PAF | sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/AggregationIterator.scala | Scala | apache-2.0 | 19,611 |
package com.twitter.inject.thrift.internal.filters
import com.twitter.finagle.{FailureFlags, Service, SimpleFilter}
import com.twitter.finagle.stats.StatsReceiver
import com.twitter.util.{Throw, Try, Stopwatch, Future}
import java.util.concurrent.TimeUnit
private[thrift] class LatencyFilter[Req, Rep](
statsReceiver: StatsReceiver,
statName: String = "latency",
timeUnit: TimeUnit = TimeUnit.MILLISECONDS)
extends SimpleFilter[Req, Rep] {
private val latencyStat = statsReceiver.stat(s"${statName}_$latencyStatSuffix")
override def apply(request: Req, service: Service[Req, Rep]): Future[Rep] = {
val elapsed = Stopwatch.start()
service(request).respond { response =>
if (!isBlackHoleResponse(response)) {
latencyStat.add(elapsed().inUnit(timeUnit).toFloat)
}
}
}
/* Private */
// Based on `c.t.finagle.service.StatsFilter#isBlackholeResponse`
private def isBlackHoleResponse(rep: Try[Rep]): Boolean = rep match {
case Throw(f: FailureFlags[_]) if f.isFlagged(FailureFlags.Ignorable) =>
// We blackhole this request. It doesn't count for anything.
true
case _ =>
false
}
// Based on `c.t.finagle.service.StatsFilter#latencyStatSuffix`
private def latencyStatSuffix: String = {
timeUnit match {
case TimeUnit.NANOSECONDS => "ns"
case TimeUnit.MICROSECONDS => "us"
case TimeUnit.MILLISECONDS => "ms"
case TimeUnit.SECONDS => "secs"
case _ => timeUnit.toString.toLowerCase
}
}
}
| twitter/finatra | inject/inject-thrift-client/src/main/scala/com/twitter/inject/thrift/internal/filters/LatencyFilter.scala | Scala | apache-2.0 | 1,511 |
package com.twitter.finagle.thrift.transport.netty3
import org.jboss.netty.buffer.ChannelBuffer
import org.junit.runner.RunWith
import org.mockito.Mockito.{verify, when}
import org.scalatest.FunSuite
import org.scalatest.junit.JUnitRunner
import org.scalatest.mock.MockitoSugar
@RunWith(classOf[JUnitRunner])
class ChannelBufferTransportTest extends FunSuite with MockitoSugar {
class ChannelContext{
val buf = mock[ChannelBuffer]
lazy val t = new ChannelBufferToTransport(buf)
}
val bb = "hello".getBytes
test("ChannelBufferToTransport writes bytes to the underlying ChannelBuffer") {
val c = new ChannelContext
import c._
t.write(bb, 0, 1)
verify(buf).writeBytes(bb, 0, 1)
t.write(bb, 1, 2)
verify(buf).writeBytes(bb, 1, 2)
t.write(bb, 0, 5)
verify(buf).writeBytes(bb, 1, 2)
}
test("ChannelBufferToTransport reads bytes from the underlying ChannelBuffer") {
val c = new ChannelContext
import c._
val nReadable = 5
when(buf.readableBytes).thenReturn(nReadable)
val b = new Array[Byte](nReadable)
assert(t.read(b, 0, 10) == nReadable)
assert(t.read(b, 0, 3) == 3)
}
}
| spockz/finagle | finagle-thrift/src/test/scala/com/twitter/finagle/thrift/transport/netty3/ChannelBufferTransportTest.scala | Scala | apache-2.0 | 1,160 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib.evaluation
import org.scalatest.FunSuite
import org.apache.spark.mllib.util.MLlibTestSparkContext
import org.apache.spark.rdd.RDD
class MultilabelMetricsSuite extends FunSuite with MLlibTestSparkContext {
test("Multilabel evaluation metrics") {
/*
* Documents true labels (5x class0, 3x class1, 4x class2):
* doc 0 - predict 0, 1 - class 0, 2
* doc 1 - predict 0, 2 - class 0, 1
* doc 2 - predict none - class 0
* doc 3 - predict 2 - class 2
* doc 4 - predict 2, 0 - class 2, 0
* doc 5 - predict 0, 1, 2 - class 0, 1
* doc 6 - predict 1 - class 1, 2
*
* predicted classes
* class 0 - doc 0, 1, 4, 5 (total 4)
* class 1 - doc 0, 5, 6 (total 3)
* class 2 - doc 1, 3, 4, 5 (total 4)
*
* true classes
* class 0 - doc 0, 1, 2, 4, 5 (total 5)
* class 1 - doc 1, 5, 6 (total 3)
* class 2 - doc 0, 3, 4, 6 (total 4)
*
*/
val scoreAndLabels: RDD[(Array[Double], Array[Double])] = sc.parallelize(
Seq((Array(0.0, 1.0), Array(0.0, 2.0)),
(Array(0.0, 2.0), Array(0.0, 1.0)),
(Array(), Array(0.0)),
(Array(2.0), Array(2.0)),
(Array(2.0, 0.0), Array(2.0, 0.0)),
(Array(0.0, 1.0, 2.0), Array(0.0, 1.0)),
(Array(1.0), Array(1.0, 2.0))), 2)
val metrics = new MultilabelMetrics(scoreAndLabels)
val delta = 0.00001
val precision0 = 4.0 / (4 + 0)
val precision1 = 2.0 / (2 + 1)
val precision2 = 2.0 / (2 + 2)
val recall0 = 4.0 / (4 + 1)
val recall1 = 2.0 / (2 + 1)
val recall2 = 2.0 / (2 + 2)
val f1measure0 = 2 * precision0 * recall0 / (precision0 + recall0)
val f1measure1 = 2 * precision1 * recall1 / (precision1 + recall1)
val f1measure2 = 2 * precision2 * recall2 / (precision2 + recall2)
val sumTp = 4 + 2 + 2
assert(sumTp == (1 + 1 + 0 + 1 + 2 + 2 + 1))
val microPrecisionClass = sumTp.toDouble / (4 + 0 + 2 + 1 + 2 + 2)
val microRecallClass = sumTp.toDouble / (4 + 1 + 2 + 1 + 2 + 2)
val microF1MeasureClass = 2.0 * sumTp.toDouble /
(2 * sumTp.toDouble + (1 + 1 + 2) + (0 + 1 + 2))
val macroPrecisionDoc = 1.0 / 7 *
(1.0 / 2 + 1.0 / 2 + 0 + 1.0 / 1 + 2.0 / 2 + 2.0 / 3 + 1.0 / 1.0)
val macroRecallDoc = 1.0 / 7 *
(1.0 / 2 + 1.0 / 2 + 0 / 1 + 1.0 / 1 + 2.0 / 2 + 2.0 / 2 + 1.0 / 2)
val macroF1MeasureDoc = (1.0 / 7) *
2 * ( 1.0 / (2 + 2) + 1.0 / (2 + 2) + 0 + 1.0 / (1 + 1) +
2.0 / (2 + 2) + 2.0 / (3 + 2) + 1.0 / (1 + 2) )
val hammingLoss = (1.0 / (7 * 3)) * (2 + 2 + 1 + 0 + 0 + 1 + 1)
val strictAccuracy = 2.0 / 7
val accuracy = 1.0 / 7 * (1.0 / 3 + 1.0 /3 + 0 + 1.0 / 1 + 2.0 / 2 + 2.0 / 3 + 1.0 / 2)
assert(math.abs(metrics.precision(0.0) - precision0) < delta)
assert(math.abs(metrics.precision(1.0) - precision1) < delta)
assert(math.abs(metrics.precision(2.0) - precision2) < delta)
assert(math.abs(metrics.recall(0.0) - recall0) < delta)
assert(math.abs(metrics.recall(1.0) - recall1) < delta)
assert(math.abs(metrics.recall(2.0) - recall2) < delta)
assert(math.abs(metrics.f1Measure(0.0) - f1measure0) < delta)
assert(math.abs(metrics.f1Measure(1.0) - f1measure1) < delta)
assert(math.abs(metrics.f1Measure(2.0) - f1measure2) < delta)
assert(math.abs(metrics.microPrecision - microPrecisionClass) < delta)
assert(math.abs(metrics.microRecall - microRecallClass) < delta)
assert(math.abs(metrics.microF1Measure - microF1MeasureClass) < delta)
assert(math.abs(metrics.precision - macroPrecisionDoc) < delta)
assert(math.abs(metrics.recall - macroRecallDoc) < delta)
assert(math.abs(metrics.f1Measure - macroF1MeasureDoc) < delta)
assert(math.abs(metrics.hammingLoss - hammingLoss) < delta)
assert(math.abs(metrics.subsetAccuracy - strictAccuracy) < delta)
assert(math.abs(metrics.accuracy - accuracy) < delta)
assert(metrics.labels.sameElements(Array(0.0, 1.0, 2.0)))
}
}
| trueyao/spark-lever | mllib/src/test/scala/org/apache/spark/mllib/evaluation/MultilabelMetricsSuite.scala | Scala | apache-2.0 | 4,745 |
/**
* Copyright (C) 2007 Orbeon, Inc.
*
* This program is free software; you can redistribute it and/or modify it under the terms of the
* GNU Lesser General Public License as published by the Free Software Foundation; either version
* 2.1 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Lesser General Public License for more details.
*
* The full text of the license is available at http://www.gnu.org/copyleft/lesser.html
*/
package org.orbeon.oxf.xforms.xbl
import org.apache.commons.lang3.StringUtils
import org.orbeon.oxf.util.ScalaUtils.CodePointsOps
// Poor man's CSS selector parser. See XBLTransformerTest for the supported subset of CSS.
// TODO: handle [att], [att=val], [att~=val], [att|=val]
object CSSParser {
// Convert a CSS selector to XPath
def toXPath(cssSelector: String): String = {
val sb = new StringBuilder
val selectors = StringUtils.split(cssSelector, ',')
var firstSelector = true
for (selector ← selectors) {
if (! firstSelector)
sb append '|'
val pathsElements = StringUtils.split(selector.trimAllToEmpty, ' ')
var firstElement = true
var wasChildAxis = false
for (pathElement ← pathsElements) {
def appendPathElement() = {
if (Set(":root", "*:root")(pathElement))
sb append "."
else
sb append pathElement.replace('|', ':').trimAllToEmpty
false
}
wasChildAxis =
if (firstElement) {
// First path element
if (Set(":root", "*:root")(pathElement)) {
appendPathElement()
} else if (pathElement == ">") {
sb append "./"
true
} else {
sb append "descendant-or-self::"
appendPathElement()
}
} else {
// Subsequent path element
if (pathElement == ">") {
sb append '/'
true
} else if (! wasChildAxis) {
sb append "//"
appendPathElement()
} else {
appendPathElement()
}
}
firstElement = false
}
firstSelector = false
}
sb.toString
}
}
| joansmith/orbeon-forms | src/main/scala/org/orbeon/oxf/xforms/xbl/CSSParser.scala | Scala | lgpl-2.1 | 2,444 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.runtime.harness
import org.apache.flink.api.common.time.Time
import org.apache.flink.api.scala._
import org.apache.flink.streaming.api.scala.StreamExecutionEnvironment
import org.apache.flink.streaming.runtime.streamrecord.StreamRecord
import org.apache.flink.table.api._
import org.apache.flink.table.api.bridge.scala._
import org.apache.flink.table.api.bridge.scala.internal.StreamTableEnvironmentImpl
import org.apache.flink.table.runtime.types.CRow
import org.apache.flink.table.utils.{Top3WithEmitRetractValue, Top3WithMapView}
import org.apache.flink.types.Row
import org.junit.Test
import java.lang.{Integer => JInt}
import java.util.concurrent.ConcurrentLinkedQueue
import scala.collection.mutable
class TableAggregateHarnessTest extends HarnessTestBase {
private val tableConfig = new TableConfig {
override def getMinIdleStateRetentionTime: Long = Time.seconds(2).toMilliseconds
override def getMaxIdleStateRetentionTime: Long = Time.seconds(2).toMilliseconds
}
val data = new mutable.MutableList[(Int, Int)]
@Test
def testTableAggregate(): Unit = {
val env = StreamExecutionEnvironment.getExecutionEnvironment
val tEnv = StreamTableEnvironmentImpl.create(
env,
EnvironmentSettings.newInstance().useOldPlanner().build(),
tableConfig)
val top3 = new Top3WithMapView
tEnv.registerFunction("top3", top3)
val source = env.fromCollection(data).toTable(tEnv, 'a, 'b)
val resultTable = source
.groupBy('a)
.flatAggregate(top3('b) as ('b1, 'b2))
.select('a, 'b1, 'b2)
val testHarness = createHarnessTester[Int, CRow, CRow](
resultTable.toRetractStream[Row], "groupBy: (a)")
testHarness.open()
val expectedOutput = new ConcurrentLinkedQueue[Object]()
// register cleanup timer with 3001
testHarness.setProcessingTime(1)
// input with two columns: key and value
testHarness.processElement(new StreamRecord(CRow(1: JInt, 1: JInt), 1))
// output with three columns: key, value, value. The value is in the top3 of the key
expectedOutput.add(new StreamRecord(CRow(1: JInt, 1: JInt, 1: JInt), 1))
testHarness.processElement(new StreamRecord(CRow(1: JInt, 2: JInt), 1))
expectedOutput.add(new StreamRecord(CRow(false, 1: JInt, 1: JInt, 1: JInt), 1))
expectedOutput.add(new StreamRecord(CRow(1: JInt, 1: JInt, 1: JInt), 1))
expectedOutput.add(new StreamRecord(CRow(1: JInt, 2: JInt, 2: JInt), 1))
testHarness.processElement(new StreamRecord(CRow(1: JInt, 3: JInt), 1))
expectedOutput.add(new StreamRecord(CRow(false, 1: JInt, 1: JInt, 1: JInt), 1))
expectedOutput.add(new StreamRecord(CRow(false, 1: JInt, 2: JInt, 2: JInt), 1))
expectedOutput.add(new StreamRecord(CRow(1: JInt, 1: JInt, 1: JInt), 1))
expectedOutput.add(new StreamRecord(CRow(1: JInt, 2: JInt, 2: JInt), 1))
expectedOutput.add(new StreamRecord(CRow(1: JInt, 3: JInt, 3: JInt), 1))
testHarness.processElement(new StreamRecord(CRow(1: JInt, 2: JInt), 1))
expectedOutput.add(new StreamRecord(CRow(false, 1: JInt, 1: JInt, 1: JInt), 1))
expectedOutput.add(new StreamRecord(CRow(false, 1: JInt, 2: JInt, 2: JInt), 1))
expectedOutput.add(new StreamRecord(CRow(false, 1: JInt, 3: JInt, 3: JInt), 1))
expectedOutput.add(new StreamRecord(CRow(1: JInt, 2: JInt, 2: JInt), 1))
expectedOutput.add(new StreamRecord(CRow(1: JInt, 2: JInt, 2: JInt), 1))
expectedOutput.add(new StreamRecord(CRow(1: JInt, 3: JInt, 3: JInt), 1))
// ingest data with key value of 2
testHarness.processElement(new StreamRecord(CRow(2: JInt, 2: JInt), 1))
expectedOutput.add(new StreamRecord(CRow(2: JInt, 2: JInt, 2: JInt), 1))
// trigger cleanup timer
testHarness.setProcessingTime(3002)
testHarness.processElement(new StreamRecord(CRow(1: JInt, 2: JInt), 1))
expectedOutput.add(new StreamRecord(CRow(1: JInt, 2: JInt, 2: JInt), 1))
val result = testHarness.getOutput
verify(expectedOutput, result)
testHarness.close()
}
@Test
def testTableAggregateEmitRetractValueIncrementally(): Unit = {
val env = StreamExecutionEnvironment.getExecutionEnvironment
val tEnv = StreamTableEnvironmentImpl.create(
env,
EnvironmentSettings.newInstance().useOldPlanner().build(),
tableConfig)
val top3 = new Top3WithEmitRetractValue
val source = env.fromCollection(data).toTable(tEnv, 'a, 'b)
val resultTable = source
.groupBy('a)
.flatAggregate(top3('b) as ('b1, 'b2))
.select('a, 'b1, 'b2)
val testHarness = createHarnessTester[Int, CRow, CRow](
resultTable.toRetractStream[Row], "groupBy: (a)")
testHarness.open()
val expectedOutput = new ConcurrentLinkedQueue[Object]()
// register cleanup timer with 3001
testHarness.setProcessingTime(1)
// input with two columns: key and value
testHarness.processElement(new StreamRecord(CRow(1: JInt, 1: JInt), 1))
// output with three columns: key, value, value. The value is in the top3 of the key
expectedOutput.add(new StreamRecord(CRow(1: JInt, 1: JInt, 1: JInt), 1))
testHarness.processElement(new StreamRecord(CRow(1: JInt, 2: JInt), 1))
expectedOutput.add(new StreamRecord(CRow(1: JInt, 2: JInt, 2: JInt), 1))
testHarness.processElement(new StreamRecord(CRow(1: JInt, 3: JInt), 1))
expectedOutput.add(new StreamRecord(CRow(1: JInt, 3: JInt, 3: JInt), 1))
testHarness.processElement(new StreamRecord(CRow(1: JInt, 2: JInt), 1))
expectedOutput.add(new StreamRecord(CRow(false, 1: JInt, 1: JInt, 1: JInt), 1))
expectedOutput.add(new StreamRecord(CRow(1: JInt, 2: JInt, 2: JInt), 1))
// ingest data with key value of 2
testHarness.processElement(new StreamRecord(CRow(2: JInt, 2: JInt), 1))
expectedOutput.add(new StreamRecord(CRow(2: JInt, 2: JInt, 2: JInt), 1))
// trigger cleanup timer
testHarness.setProcessingTime(3002)
testHarness.processElement(new StreamRecord(CRow(1: JInt, 2: JInt), 1))
expectedOutput.add(new StreamRecord(CRow(1: JInt, 2: JInt, 2: JInt), 1))
val result = testHarness.getOutput
verify(expectedOutput, result)
testHarness.close()
}
@Test
def testTableAggregateWithRetractInput(): Unit = {
val env = StreamExecutionEnvironment.getExecutionEnvironment
val tEnv = StreamTableEnvironmentImpl.create(
env,
EnvironmentSettings.newInstance().useOldPlanner().build(),
tableConfig)
val top3 = new Top3WithMapView
tEnv.registerFunction("top3", top3)
val source = env.fromCollection(data).toTable(tEnv, 'a, 'b)
val resultTable = source
.groupBy('a)
.select('b.sum as 'b)
.flatAggregate(top3('b) as ('b1, 'b2))
.select('b1, 'b2)
val testHarness = createHarnessTester[Int, CRow, CRow](
resultTable.toRetractStream[Row], "select: (Top3WithMapView")
testHarness.open()
val expectedOutput = new ConcurrentLinkedQueue[Object]()
// register cleanup timer with 3001
testHarness.setProcessingTime(1)
// input with two columns: key and value
testHarness.processElement(new StreamRecord(CRow(1: JInt), 1))
// output with three columns: key, value, value. The value is in the top3 of the key
expectedOutput.add(new StreamRecord(CRow(1: JInt, 1: JInt), 1))
testHarness.processElement(new StreamRecord(CRow(false, 1: JInt), 1))
expectedOutput.add(new StreamRecord(CRow(false, 1: JInt, 1: JInt), 1))
testHarness.processElement(new StreamRecord(CRow(3: JInt), 1))
expectedOutput.add(new StreamRecord(CRow(3: JInt, 3: JInt), 1))
testHarness.processElement(new StreamRecord(CRow(4: JInt), 1))
expectedOutput.add(new StreamRecord(CRow(false, 3: JInt, 3: JInt), 1))
expectedOutput.add(new StreamRecord(CRow(3: JInt, 3: JInt), 1))
expectedOutput.add(new StreamRecord(CRow(4: JInt, 4: JInt), 1))
testHarness.processElement(new StreamRecord(CRow(false, 3: JInt), 1))
expectedOutput.add(new StreamRecord(CRow(false, 3: JInt, 3: JInt), 1))
expectedOutput.add(new StreamRecord(CRow(false, 4: JInt, 4: JInt), 1))
expectedOutput.add(new StreamRecord(CRow(4: JInt, 4: JInt), 1))
testHarness.processElement(new StreamRecord(CRow(5: JInt), 1))
expectedOutput.add(new StreamRecord(CRow(false, 4: JInt, 4: JInt), 1))
expectedOutput.add(new StreamRecord(CRow(4: JInt, 4: JInt), 1))
expectedOutput.add(new StreamRecord(CRow(5: JInt, 5: JInt), 1))
val result = testHarness.getOutput
verify(expectedOutput, result)
testHarness.close()
}
}
| GJL/flink | flink-table/flink-table-planner/src/test/scala/org/apache/flink/table/runtime/harness/TableAggregateHarnessTest.scala | Scala | apache-2.0 | 9,347 |
/*******************************************************************************/
/* */
/* Copyright (C) 2017 by Max Lv <max.c.lv@gmail.com> */
/* Copyright (C) 2017 by Mygod Studio <contact-shadowsocks-android@mygod.be> */
/* */
/* This program is free software: you can redistribute it and/or modify */
/* it under the terms of the GNU General Public License as published by */
/* the Free Software Foundation, either version 3 of the License, or */
/* (at your option) any later version. */
/* */
/* This program is distributed in the hope that it will be useful, */
/* but WITHOUT ANY WARRANTY; without even the implied warranty of */
/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
/* GNU General Public License for more details. */
/* */
/* You should have received a copy of the GNU General Public License */
/* along with this program. If not, see <http://www.gnu.org/licenses/>. */
/* */
/*******************************************************************************/
package com.github.shadowsocks.acl
import android.util.Log
import com.evernote.android.job.JobCreator
/**
* “I create jobs all day long.
* - Donald Trump, 2015
*
* Source: http://www.cnn.com/2015/09/24/politics/donald-trump-marco-rubio-foreign-policy/
*
* @author !Mygod
*/
object DonaldTrump extends JobCreator {
def create(tag: String): AclSyncJob = {
val parts = tag.split(":")
parts(0) match {
case AclSyncJob.TAG => new AclSyncJob(parts(1))
case _ =>
Log.w("DonaldTrump", "Unknown job tag: " + tag)
null
}
}
}
| hangox/shadowsocks-android | mobile/src/main/scala/com/github/shadowsocks/acl/DonaldTrump.scala | Scala | gpl-3.0 | 2,136 |
/*
* Copyright 2014 Treode, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.treode.disk.stubs
import scala.util.Random
import com.treode.async.{Async, Scheduler}
import com.treode.disk.Disk
trait StubDisk extends Disk {
/** Force a checkpoint. */
def checkpoint(): Async [Unit]
/** Simulate draining a disk; compacts some object generations. */
def drain()
}
object StubDisk {
def recover () (implicit random: Random, scheduler: Scheduler): StubDiskRecovery = {
implicit val events = new StubDiskEvents
new StubRecoveryAgent ()
}}
| Treode/store | disk/stub/com/treode/disk/stubs/StubDisk.scala | Scala | apache-2.0 | 1,091 |
// Copyright: 2010 - 2016 https://github.com/ensime/ensime-server/graphs
// Licence: http://www.gnu.org/licenses/gpl-3.0.en.html
package org.ensime.indexer
import org.ensime.fixture._
import org.scalatest._
import org.ensime.util.file._
import scala.concurrent._
import scala.concurrent.duration._
class SearchServiceSpec extends WordSpec with Matchers
with SharedTestKitFixture
with SharedSearchServiceFixture
with SearchServiceTestUtils {
def original = EnsimeConfigFixture.SimpleTestProject
"search refreshing" should {
"parse all files on a pristine structure" in withSearchService { implicit service =>
val (deleted, indexed) = refresh()
deleted shouldBe 0
indexed should be > 0
}
"not refresh files that have not changed" in withSearchService { implicit service =>
refresh() shouldBe ((0, 0))
}
"refresh files that have 'changed'" in {
withSearchService { (config, service) =>
implicit val s = service
val now = System.currentTimeMillis()
for {
m <- config.modules.values
r <- m.targetDirs ++ m.testTargetDirs
f <- r.tree
} {
// simulate a full recompile
f.setLastModified(now)
}
val (deleted, indexed) = refresh()
deleted should be > 0
indexed should be > 0
}
}
"remove classfiles that have been deleted" in {
withSearchService { (config, service) =>
implicit val s = service
val classfile = config.subprojects.head.targetDirs.head / "org/example/Foo.class"
classfile shouldBe 'exists
classfile.delete()
refresh() shouldBe ((1, 0))
}
}
}
"class searching" should {
"return results from J2SE" in withSearchService { implicit service =>
searchesClasses(
"java.lang.String",
"String", "string",
"j.l.str", "j l str"
)
}
"return results from dependencies" in withSearchService { implicit service =>
searchesClasses(
"org.scalatest.FunSuite",
"FunSuite", "funsuite", "funsu",
"o s Fun"
)
}
"return results from the project" in withSearchService { implicit service =>
searchesClasses(
"org.example.Bloo",
"o e bloo"
)
searchesClasses(
"org.example.Blue$",
"o e blue"
)
searchesClasses(
"org.example.CaseClassWithCamelCaseName",
"CaseClassWith", "caseclasswith",
"o e Case", "o.e.caseclasswith",
"CCWC" // <= CamelCaseAwesomeNess
)
}
"return results from package objects" in withSearchService { implicit service =>
searchClasses(
"org.example.Blip$",
"Blip"
)
searchClasses(
"org.example.Blop",
"Blop"
)
}
}
"class and method searching" should {
"return results from classes" in withSearchService { implicit service =>
searchesClassesAndMethods(
"java.lang.String",
"String", "string",
"j.l.str", "j l str"
)
}
"return results from static fields" in withSearchService { implicit service =>
searchesEmpty(
"CASE_INSENSITIVE", "case_insensitive",
"case_"
)
}
"not return results from instance fields" in withSearchService { implicit service =>
searchesEmpty(
"java.awt.Point.x"
)
}
"return results from static methods" in withSearchService { implicit service =>
searchesClassesAndMethods(
"java.lang.Runtime.addShutdownHook",
"addShutdownHook"
)
}
"return results from instance methods" in withSearchService { implicit service =>
searchesClassesAndMethods(
"java.lang.Runtime.availableProcessors",
"availableProcessors", "availableP"
)
}
}
"exact searches" should {
"find type aliases" in withSearchService { implicit service =>
assert(service.findUnique("org.scalatest.fixture.ConfigMapFixture$FixtureParam").isDefined)
}
}
}
trait SearchServiceTestUtils {
def refresh()(implicit service: SearchService): (Int, Int) =
Await.result(service.refresh(), Duration.Inf)
def searchClasses(expect: String, query: String)(implicit service: SearchService) = {
val max = 10
val info = s"'$query' expected '$expect')"
val results = service.searchClasses(query, max)
assert(results.size <= max, s"${results.size} $info")
assert(results.nonEmpty, s"$info but was empty")
// when we improve the search quality, we could
// make this really look only at #1
val got = results.map(_.fqn)
assert(got contains expect, s"$info got '$got'")
results
}
def searchesClasses(expect: String, queries: String*)(implicit service: SearchService) =
(expect :: queries.toList).foreach(searchClasses(expect, _))
def searchClassesAndMethods(expect: String, query: String)(implicit service: SearchService) = {
val max = 10
val info = s"'$query' expected '$expect')"
val results = service.searchClassesMethods(List(query), max)
assert(results.size <= max, s"${results.size} $info")
assert(results.nonEmpty, s"$info but was empty")
// when we improve the search quality, we could
// make this really look only at #1
val got = results.map(_.fqn)
assert(got contains expect, s"$info got '$got'")
results
}
def searchExpectEmpty(query: String)(implicit service: SearchService) = {
val max = 1
val results = service.searchClassesMethods(List(query), max)
assert(results.isEmpty, "expected empty results from %s".format(query))
results
}
def searchesEmpty(queries: String*)(implicit service: SearchService) =
queries.toList.foreach(searchExpectEmpty)
def searchesClassesAndMethods(expect: String, queries: String*)(implicit service: SearchService) =
(expect :: queries.toList).foreach(searchClassesAndMethods(expect, _))
}
| j-mckitrick/ensime-sbt | src/sbt-test/ensime-sbt/ensime-server/core/src/it/scala/org/ensime/indexer/SearchServiceSpec.scala | Scala | apache-2.0 | 5,962 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive.execution
import java.io.File
import java.util.{Locale, TimeZone}
import scala.util.Try
import org.scalatest.BeforeAndAfter
import org.apache.hadoop.hive.conf.HiveConf.ConfVars
import org.apache.spark.{SparkFiles, SparkException}
import org.apache.spark.sql.{AnalysisException, DataFrame, Row}
import org.apache.spark.sql.catalyst.expressions.Cast
import org.apache.spark.sql.catalyst.plans.logical.Project
import org.apache.spark.sql.hive._
import org.apache.spark.sql.hive.test.TestHive
import org.apache.spark.sql.hive.test.TestHive._
case class TestData(a: Int, b: String)
/**
* A set of test cases expressed in Hive QL that are not covered by the tests
* included in the hive distribution.
*/
class HiveQuerySuite extends HiveComparisonTest with BeforeAndAfter {
private val originalTimeZone = TimeZone.getDefault
private val originalLocale = Locale.getDefault
import org.apache.spark.sql.hive.test.TestHive.implicits._
override def beforeAll() {
TestHive.cacheTables = true
// Timezone is fixed to America/Los_Angeles for those timezone sensitive tests (timestamp_*)
TimeZone.setDefault(TimeZone.getTimeZone("America/Los_Angeles"))
// Add Locale setting
Locale.setDefault(Locale.US)
}
override def afterAll() {
TestHive.cacheTables = false
TimeZone.setDefault(originalTimeZone)
Locale.setDefault(originalLocale)
sql("DROP TEMPORARY FUNCTION udtf_count2")
}
test("SPARK-4908: concurrent hive native commands") {
(1 to 100).par.map { _ =>
sql("USE default")
sql("SHOW DATABASES")
}
}
createQueryTest("SPARK-8976 Wrong Result for Rollup #1",
"""
SELECT count(*) AS cnt, key % 5,GROUPING__ID FROM src group by key%5 WITH ROLLUP
""".stripMargin)
createQueryTest("SPARK-8976 Wrong Result for Rollup #2",
"""
SELECT
count(*) AS cnt,
key % 5 as k1,
key-5 as k2,
GROUPING__ID as k3
FROM src group by key%5, key-5
WITH ROLLUP ORDER BY cnt, k1, k2, k3 LIMIT 10
""".stripMargin)
createQueryTest("SPARK-8976 Wrong Result for Rollup #3",
"""
SELECT
count(*) AS cnt,
key % 5 as k1,
key-5 as k2,
GROUPING__ID as k3
FROM (SELECT key, key%2, key - 5 FROM src) t group by key%5, key-5
WITH ROLLUP ORDER BY cnt, k1, k2, k3 LIMIT 10
""".stripMargin)
createQueryTest("SPARK-8976 Wrong Result for CUBE #1",
"""
SELECT count(*) AS cnt, key % 5,GROUPING__ID FROM src group by key%5 WITH CUBE
""".stripMargin)
createQueryTest("SPARK-8976 Wrong Result for CUBE #2",
"""
SELECT
count(*) AS cnt,
key % 5 as k1,
key-5 as k2,
GROUPING__ID as k3
FROM (SELECT key, key%2, key - 5 FROM src) t group by key%5, key-5
WITH CUBE ORDER BY cnt, k1, k2, k3 LIMIT 10
""".stripMargin)
createQueryTest("SPARK-8976 Wrong Result for GroupingSet",
"""
SELECT
count(*) AS cnt,
key % 5 as k1,
key-5 as k2,
GROUPING__ID as k3
FROM (SELECT key, key%2, key - 5 FROM src) t group by key%5, key-5
GROUPING SETS (key%5, key-5) ORDER BY cnt, k1, k2, k3 LIMIT 10
""".stripMargin)
createQueryTest("insert table with generator with column name",
"""
| CREATE TABLE gen_tmp (key Int);
| INSERT OVERWRITE TABLE gen_tmp
| SELECT explode(array(1,2,3)) AS val FROM src LIMIT 3;
| SELECT key FROM gen_tmp ORDER BY key ASC;
""".stripMargin)
createQueryTest("insert table with generator with multiple column names",
"""
| CREATE TABLE gen_tmp (key Int, value String);
| INSERT OVERWRITE TABLE gen_tmp
| SELECT explode(map(key, value)) as (k1, k2) FROM src LIMIT 3;
| SELECT key, value FROM gen_tmp ORDER BY key, value ASC;
""".stripMargin)
createQueryTest("insert table with generator without column name",
"""
| CREATE TABLE gen_tmp (key Int);
| INSERT OVERWRITE TABLE gen_tmp
| SELECT explode(array(1,2,3)) FROM src LIMIT 3;
| SELECT key FROM gen_tmp ORDER BY key ASC;
""".stripMargin)
test("multiple generators in projection") {
intercept[AnalysisException] {
sql("SELECT explode(array(key, key)), explode(array(key, key)) FROM src").collect()
}
intercept[AnalysisException] {
sql("SELECT explode(array(key, key)) as k1, explode(array(key, key)) FROM src").collect()
}
}
createQueryTest("! operator",
"""
|SELECT a FROM (
| SELECT 1 AS a UNION ALL SELECT 2 AS a) t
|WHERE !(a>1)
""".stripMargin)
createQueryTest("constant object inspector for generic udf",
"""SELECT named_struct(
lower("AA"), "10",
repeat(lower("AA"), 3), "11",
lower(repeat("AA", 3)), "12",
printf("bb%d", 12), "13",
repeat(printf("s%d", 14), 2), "14") FROM src LIMIT 1""")
createQueryTest("NaN to Decimal",
"SELECT CAST(CAST('NaN' AS DOUBLE) AS DECIMAL(1,1)) FROM src LIMIT 1")
createQueryTest("constant null testing",
"""SELECT
|IF(FALSE, CAST(NULL AS STRING), CAST(1 AS STRING)) AS COL1,
|IF(TRUE, CAST(NULL AS STRING), CAST(1 AS STRING)) AS COL2,
|IF(FALSE, CAST(NULL AS INT), CAST(1 AS INT)) AS COL3,
|IF(TRUE, CAST(NULL AS INT), CAST(1 AS INT)) AS COL4,
|IF(FALSE, CAST(NULL AS DOUBLE), CAST(1 AS DOUBLE)) AS COL5,
|IF(TRUE, CAST(NULL AS DOUBLE), CAST(1 AS DOUBLE)) AS COL6,
|IF(FALSE, CAST(NULL AS BOOLEAN), CAST(1 AS BOOLEAN)) AS COL7,
|IF(TRUE, CAST(NULL AS BOOLEAN), CAST(1 AS BOOLEAN)) AS COL8,
|IF(FALSE, CAST(NULL AS BIGINT), CAST(1 AS BIGINT)) AS COL9,
|IF(TRUE, CAST(NULL AS BIGINT), CAST(1 AS BIGINT)) AS COL10,
|IF(FALSE, CAST(NULL AS FLOAT), CAST(1 AS FLOAT)) AS COL11,
|IF(TRUE, CAST(NULL AS FLOAT), CAST(1 AS FLOAT)) AS COL12,
|IF(FALSE, CAST(NULL AS SMALLINT), CAST(1 AS SMALLINT)) AS COL13,
|IF(TRUE, CAST(NULL AS SMALLINT), CAST(1 AS SMALLINT)) AS COL14,
|IF(FALSE, CAST(NULL AS TINYINT), CAST(1 AS TINYINT)) AS COL15,
|IF(TRUE, CAST(NULL AS TINYINT), CAST(1 AS TINYINT)) AS COL16,
|IF(FALSE, CAST(NULL AS BINARY), CAST("1" AS BINARY)) AS COL17,
|IF(TRUE, CAST(NULL AS BINARY), CAST("1" AS BINARY)) AS COL18,
|IF(FALSE, CAST(NULL AS DATE), CAST("1970-01-01" AS DATE)) AS COL19,
|IF(TRUE, CAST(NULL AS DATE), CAST("1970-01-01" AS DATE)) AS COL20,
|IF(FALSE, CAST(NULL AS TIMESTAMP), CAST(1 AS TIMESTAMP)) AS COL21,
|IF(TRUE, CAST(NULL AS TIMESTAMP), CAST(1 AS TIMESTAMP)) AS COL22,
|IF(FALSE, CAST(NULL AS DECIMAL), CAST(1 AS DECIMAL)) AS COL23,
|IF(TRUE, CAST(NULL AS DECIMAL), CAST(1 AS DECIMAL)) AS COL24
|FROM src LIMIT 1""".stripMargin)
createQueryTest("constant array",
"""
|SELECT sort_array(
| sort_array(
| array("hadoop distributed file system",
| "enterprise databases", "hadoop map-reduce")))
|FROM src LIMIT 1;
""".stripMargin)
createQueryTest("null case",
"SELECT case when(true) then 1 else null end FROM src LIMIT 1")
createQueryTest("single case",
"""SELECT case when true then 1 else 2 end FROM src LIMIT 1""")
createQueryTest("double case",
"""SELECT case when 1 = 2 then 1 when 2 = 2 then 3 else 2 end FROM src LIMIT 1""")
createQueryTest("case else null",
"""SELECT case when 1 = 2 then 1 when 2 = 2 then 3 else null end FROM src LIMIT 1""")
createQueryTest("having no references",
"SELECT key FROM src GROUP BY key HAVING COUNT(*) > 1")
createQueryTest("no from clause",
"SELECT 1, +1, -1")
createQueryTest("boolean = number",
"""
|SELECT
| 1 = true, 1L = true, 1Y = true, true = 1, true = 1L, true = 1Y,
| 0 = true, 0L = true, 0Y = true, true = 0, true = 0L, true = 0Y,
| 1 = false, 1L = false, 1Y = false, false = 1, false = 1L, false = 1Y,
| 0 = false, 0L = false, 0Y = false, false = 0, false = 0L, false = 0Y,
| 2 = true, 2L = true, 2Y = true, true = 2, true = 2L, true = 2Y,
| 2 = false, 2L = false, 2Y = false, false = 2, false = 2L, false = 2Y
|FROM src LIMIT 1
""".stripMargin)
test("CREATE TABLE AS runs once") {
sql("CREATE TABLE foo AS SELECT 1 FROM src LIMIT 1").collect()
assert(sql("SELECT COUNT(*) FROM foo").collect().head.getLong(0) === 1,
"Incorrect number of rows in created table")
}
createQueryTest("between",
"SELECT * FROM src WHERE key Between 1 and 2")
createQueryTest("div",
"SELECT 1 DIV 2, 1 div 2, 1 dIv 2, 100 DIV 51, 100 DIV 49 FROM src LIMIT 1")
// Jdk version leads to different query output for double, so not use createQueryTest here
test("division") {
val res = sql("SELECT 2 / 1, 1 / 2, 1 / 3, 1 / COUNT(*) FROM src LIMIT 1").collect().head
Seq(2.0, 0.5, 0.3333333333333333, 0.002).zip(res.toSeq).foreach( x =>
assert(x._1 == x._2.asInstanceOf[Double]))
}
createQueryTest("modulus",
"SELECT 11 % 10, IF((101.1 % 100.0) BETWEEN 1.01 AND 1.11, \\"true\\", \\"false\\"), " +
"(101 / 2) % 10 FROM src LIMIT 1")
test("Query expressed in SQL") {
setConf("spark.sql.dialect", "sql")
assert(sql("SELECT 1").collect() === Array(Row(1)))
setConf("spark.sql.dialect", "hiveql")
}
test("Query expressed in HiveQL") {
sql("FROM src SELECT key").collect()
}
test("Query with constant folding the CAST") {
sql("SELECT CAST(CAST('123' AS binary) AS binary) FROM src LIMIT 1").collect()
}
createQueryTest("Constant Folding Optimization for AVG_SUM_COUNT",
"SELECT AVG(0), SUM(0), COUNT(null), COUNT(value) FROM src GROUP BY key")
createQueryTest("Cast Timestamp to Timestamp in UDF",
"""
| SELECT DATEDIFF(CAST(value AS timestamp), CAST('2002-03-21 00:00:00' AS timestamp))
| FROM src LIMIT 1
""".stripMargin)
createQueryTest("Date comparison test 1",
"""
| SELECT
| CAST(CAST('1970-01-01 22:00:00' AS timestamp) AS date) ==
| CAST(CAST('1970-01-01 23:00:00' AS timestamp) AS date)
| FROM src LIMIT 1
""".stripMargin)
createQueryTest("Simple Average",
"SELECT AVG(key) FROM src")
createQueryTest("Simple Average + 1",
"SELECT AVG(key) + 1.0 FROM src")
createQueryTest("Simple Average + 1 with group",
"SELECT AVG(key) + 1.0, value FROM src group by value")
createQueryTest("string literal",
"SELECT 'test' FROM src")
createQueryTest("Escape sequences",
"""SELECT key, '\\\\\\t\\\\' FROM src WHERE key = 86""")
createQueryTest("IgnoreExplain",
"""EXPLAIN SELECT key FROM src""")
createQueryTest("trivial join where clause",
"SELECT * FROM src a JOIN src b WHERE a.key = b.key")
createQueryTest("trivial join ON clause",
"SELECT * FROM src a JOIN src b ON a.key = b.key")
createQueryTest("small.cartesian",
"SELECT a.key, b.key FROM (SELECT key FROM src WHERE key < 1) a JOIN " +
"(SELECT key FROM src WHERE key = 2) b")
createQueryTest("length.udf",
"SELECT length(\\"test\\") FROM src LIMIT 1")
createQueryTest("partitioned table scan",
"SELECT ds, hr, key, value FROM srcpart")
createQueryTest("hash",
"SELECT hash('test') FROM src LIMIT 1")
createQueryTest("create table as",
"""
|CREATE TABLE createdtable AS SELECT * FROM src;
|SELECT * FROM createdtable
""".stripMargin)
createQueryTest("create table as with db name",
"""
|CREATE DATABASE IF NOT EXISTS testdb;
|CREATE TABLE testdb.createdtable AS SELECT * FROM default.src;
|SELECT * FROM testdb.createdtable;
|DROP DATABASE IF EXISTS testdb CASCADE
""".stripMargin)
createQueryTest("create table as with db name within backticks",
"""
|CREATE DATABASE IF NOT EXISTS testdb;
|CREATE TABLE `testdb`.`createdtable` AS SELECT * FROM default.src;
|SELECT * FROM testdb.createdtable;
|DROP DATABASE IF EXISTS testdb CASCADE
""".stripMargin)
createQueryTest("insert table with db name",
"""
|CREATE DATABASE IF NOT EXISTS testdb;
|CREATE TABLE testdb.createdtable like default.src;
|INSERT INTO TABLE testdb.createdtable SELECT * FROM default.src;
|SELECT * FROM testdb.createdtable;
|DROP DATABASE IF EXISTS testdb CASCADE
""".stripMargin)
createQueryTest("insert into and insert overwrite",
"""
|CREATE TABLE createdtable like src;
|INSERT INTO TABLE createdtable SELECT * FROM src;
|INSERT INTO TABLE createdtable SELECT * FROM src1;
|SELECT * FROM createdtable;
|INSERT OVERWRITE TABLE createdtable SELECT * FROM src WHERE key = 86;
|SELECT * FROM createdtable;
""".stripMargin)
test("SPARK-7270: consider dynamic partition when comparing table output") {
sql(s"CREATE TABLE test_partition (a STRING) PARTITIONED BY (b BIGINT, c STRING)")
sql(s"CREATE TABLE ptest (a STRING, b BIGINT, c STRING)")
val analyzedPlan = sql(
"""
|INSERT OVERWRITE table test_partition PARTITION (b=1, c)
|SELECT 'a', 'c' from ptest
""".stripMargin).queryExecution.analyzed
assertResult(false, "Incorrect cast detected\\n" + analyzedPlan) {
var hasCast = false
analyzedPlan.collect {
case p: Project => p.transformExpressionsUp { case c: Cast => hasCast = true; c }
}
hasCast
}
}
createQueryTest("transform",
"SELECT TRANSFORM (key) USING 'cat' AS (tKey) FROM src")
createQueryTest("schema-less transform",
"""
|SELECT TRANSFORM (key, value) USING 'cat' FROM src;
|SELECT TRANSFORM (*) USING 'cat' FROM src;
""".stripMargin)
val delimiter = "'\\t'"
createQueryTest("transform with custom field delimiter",
s"""
|SELECT TRANSFORM (key) ROW FORMAT DELIMITED FIELDS TERMINATED BY ${delimiter}
|USING 'cat' AS (tKey) ROW FORMAT DELIMITED FIELDS TERMINATED BY ${delimiter} FROM src;
""".stripMargin.replaceAll("\\n", " "))
createQueryTest("transform with custom field delimiter2",
s"""
|SELECT TRANSFORM (key, value) ROW FORMAT DELIMITED FIELDS TERMINATED BY ${delimiter}
|USING 'cat' ROW FORMAT DELIMITED FIELDS TERMINATED BY ${delimiter} FROM src;
""".stripMargin.replaceAll("\\n", " "))
createQueryTest("transform with custom field delimiter3",
s"""
|SELECT TRANSFORM (*) ROW FORMAT DELIMITED FIELDS TERMINATED BY ${delimiter}
|USING 'cat' ROW FORMAT DELIMITED FIELDS TERMINATED BY ${delimiter} FROM src;
""".stripMargin.replaceAll("\\n", " "))
createQueryTest("transform with SerDe",
"""
|SELECT TRANSFORM (key, value) ROW FORMAT SERDE
|'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
|USING 'cat' AS (tKey, tValue) ROW FORMAT SERDE
|'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' FROM src;
""".stripMargin.replaceAll(System.lineSeparator(), " "))
test("transform with SerDe2") {
sql("CREATE TABLE small_src(key INT, value STRING)")
sql("INSERT OVERWRITE TABLE small_src SELECT key, value FROM src LIMIT 10")
val expected = sql("SELECT key FROM small_src").collect().head
val res = sql(
"""
|SELECT TRANSFORM (key) ROW FORMAT SERDE
|'org.apache.hadoop.hive.serde2.avro.AvroSerDe'
|WITH SERDEPROPERTIES ('avro.schema.literal'='{"namespace":
|"testing.hive.avro.serde","name": "src","type": "record","fields":
|[{"name":"key","type":"int"}]}') USING 'cat' AS (tKey INT) ROW FORMAT SERDE
|'org.apache.hadoop.hive.serde2.avro.AvroSerDe' WITH SERDEPROPERTIES
|('avro.schema.literal'='{"namespace": "testing.hive.avro.serde","name":
|"src","type": "record","fields": [{"name":"key","type":"int"}]}')
|FROM small_src
""".stripMargin.replaceAll(System.lineSeparator(), " ")).collect().head
assert(expected(0) === res(0))
}
createQueryTest("transform with SerDe3",
"""
|SELECT TRANSFORM (*) ROW FORMAT SERDE
|'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' WITH SERDEPROPERTIES
|('serialization.last.column.takes.rest'='true') USING 'cat' AS (tKey, tValue)
|ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
|WITH SERDEPROPERTIES ('serialization.last.column.takes.rest'='true') FROM src;
""".stripMargin.replaceAll(System.lineSeparator(), " "))
createQueryTest("transform with SerDe4",
"""
|SELECT TRANSFORM (*) ROW FORMAT SERDE
|'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' WITH SERDEPROPERTIES
|('serialization.last.column.takes.rest'='true') USING 'cat' ROW FORMAT SERDE
|'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' WITH SERDEPROPERTIES
|('serialization.last.column.takes.rest'='true') FROM src;
""".stripMargin.replaceAll(System.lineSeparator(), " "))
createQueryTest("LIKE",
"SELECT * FROM src WHERE value LIKE '%1%'")
createQueryTest("DISTINCT",
"SELECT DISTINCT key, value FROM src")
createQueryTest("empty aggregate input",
"SELECT SUM(key) FROM (SELECT * FROM src LIMIT 0) a")
createQueryTest("lateral view1",
"SELECT tbl.* FROM src LATERAL VIEW explode(array(1,2)) tbl as a")
createQueryTest("lateral view2",
"SELECT * FROM src LATERAL VIEW explode(array(1,2)) tbl")
createQueryTest("lateral view3",
"FROM src SELECT key, D.* lateral view explode(array(key+3, key+4)) D as CX")
// scalastyle:off
createQueryTest("lateral view4",
"""
|create table src_lv1 (key string, value string);
|create table src_lv2 (key string, value string);
|
|FROM src
|insert overwrite table src_lv1 SELECT key, D.* lateral view explode(array(key+3, key+4)) D as CX
|insert overwrite table src_lv2 SELECT key, D.* lateral view explode(array(key+3, key+4)) D as CX
""".stripMargin)
// scalastyle:on
createQueryTest("lateral view5",
"FROM src SELECT explode(array(key+3, key+4))")
createQueryTest("lateral view6",
"SELECT * FROM src LATERAL VIEW explode(map(key+3,key+4)) D as k, v")
createQueryTest("Specify the udtf output",
"SELECT d FROM (SELECT explode(array(1,1)) d FROM src LIMIT 1) t")
test("sampling") {
sql("SELECT * FROM src TABLESAMPLE(0.1 PERCENT) s")
sql("SELECT * FROM src TABLESAMPLE(100 PERCENT) s")
}
test("DataFrame toString") {
sql("SHOW TABLES").toString
sql("SELECT * FROM src").toString
}
createQueryTest("case statements with key #1",
"SELECT (CASE 1 WHEN 2 THEN 3 END) FROM src where key < 15")
createQueryTest("case statements with key #2",
"SELECT (CASE key WHEN 2 THEN 3 ELSE 0 END) FROM src WHERE key < 15")
createQueryTest("case statements with key #3",
"SELECT (CASE key WHEN 2 THEN 3 WHEN NULL THEN 4 END) FROM src WHERE key < 15")
createQueryTest("case statements with key #4",
"SELECT (CASE key WHEN 2 THEN 3 WHEN NULL THEN 4 ELSE 0 END) FROM src WHERE key < 15")
createQueryTest("case statements WITHOUT key #1",
"SELECT (CASE WHEN key > 2 THEN 3 END) FROM src WHERE key < 15")
createQueryTest("case statements WITHOUT key #2",
"SELECT (CASE WHEN key > 2 THEN 3 ELSE 4 END) FROM src WHERE key < 15")
createQueryTest("case statements WITHOUT key #3",
"SELECT (CASE WHEN key > 2 THEN 3 WHEN 2 > key THEN 2 END) FROM src WHERE key < 15")
createQueryTest("case statements WITHOUT key #4",
"SELECT (CASE WHEN key > 2 THEN 3 WHEN 2 > key THEN 2 ELSE 0 END) FROM src WHERE key < 15")
// Jdk version leads to different query output for double, so not use createQueryTest here
test("timestamp cast #1") {
val res = sql("SELECT CAST(CAST(1 AS TIMESTAMP) AS DOUBLE) FROM src LIMIT 1").collect().head
assert(0.001 == res.getDouble(0))
}
createQueryTest("timestamp cast #2",
"SELECT CAST(CAST(1.2 AS TIMESTAMP) AS DOUBLE) FROM src LIMIT 1")
createQueryTest("timestamp cast #3",
"SELECT CAST(CAST(1200 AS TIMESTAMP) AS INT) FROM src LIMIT 1")
createQueryTest("timestamp cast #4",
"SELECT CAST(CAST(1.2 AS TIMESTAMP) AS DOUBLE) FROM src LIMIT 1")
createQueryTest("timestamp cast #5",
"SELECT CAST(CAST(-1 AS TIMESTAMP) AS DOUBLE) FROM src LIMIT 1")
createQueryTest("timestamp cast #6",
"SELECT CAST(CAST(-1.2 AS TIMESTAMP) AS DOUBLE) FROM src LIMIT 1")
createQueryTest("timestamp cast #7",
"SELECT CAST(CAST(-1200 AS TIMESTAMP) AS INT) FROM src LIMIT 1")
createQueryTest("timestamp cast #8",
"SELECT CAST(CAST(-1.2 AS TIMESTAMP) AS DOUBLE) FROM src LIMIT 1")
createQueryTest("select null from table",
"SELECT null FROM src LIMIT 1")
createQueryTest("CTE feature #1",
"with q1 as (select key from src) select * from q1 where key = 5")
createQueryTest("CTE feature #2",
"""with q1 as (select * from src where key= 5),
|q2 as (select * from src s2 where key = 4)
|select value from q1 union all select value from q2
""".stripMargin)
createQueryTest("CTE feature #3",
"""with q1 as (select key from src)
|from q1
|select * where key = 4
""".stripMargin)
// test get_json_object again Hive, because the HiveCompatabilitySuite cannot handle result
// with newline in it.
createQueryTest("get_json_object #1",
"SELECT get_json_object(src_json.json, '$') FROM src_json")
createQueryTest("get_json_object #2",
"SELECT get_json_object(src_json.json, '$.owner'), get_json_object(src_json.json, '$.store')" +
" FROM src_json")
createQueryTest("get_json_object #3",
"SELECT get_json_object(src_json.json, '$.store.bicycle'), " +
"get_json_object(src_json.json, '$.store.book') FROM src_json")
createQueryTest("get_json_object #4",
"SELECT get_json_object(src_json.json, '$.store.book[0]'), " +
"get_json_object(src_json.json, '$.store.book[*]') FROM src_json")
createQueryTest("get_json_object #5",
"SELECT get_json_object(src_json.json, '$.store.book[0].category'), " +
"get_json_object(src_json.json, '$.store.book[*].category'), " +
"get_json_object(src_json.json, '$.store.book[*].isbn'), " +
"get_json_object(src_json.json, '$.store.book[*].reader') FROM src_json")
createQueryTest("get_json_object #6",
"SELECT get_json_object(src_json.json, '$.store.book[*].reader[0].age'), " +
"get_json_object(src_json.json, '$.store.book[*].reader[*].age') FROM src_json")
createQueryTest("get_json_object #7",
"SELECT get_json_object(src_json.json, '$.store.basket[0][1]'), " +
"get_json_object(src_json.json, '$.store.basket[*]'), " +
// Hive returns wrong result with [*][0], so this expression is change to make test pass
"get_json_object(src_json.json, '$.store.basket[0][0]'), " +
"get_json_object(src_json.json, '$.store.basket[0][*]'), " +
"get_json_object(src_json.json, '$.store.basket[*][*]'), " +
"get_json_object(src_json.json, '$.store.basket[0][2].b'), " +
"get_json_object(src_json.json, '$.store.basket[0][*].b') FROM src_json")
createQueryTest("get_json_object #8",
"SELECT get_json_object(src_json.json, '$.non_exist_key'), " +
"get_json_object(src_json.json, '$..no_recursive'), " +
"get_json_object(src_json.json, '$.store.book[10]'), " +
"get_json_object(src_json.json, '$.store.book[0].non_exist_key'), " +
"get_json_object(src_json.json, '$.store.basket[*].non_exist_key'), " +
"get_json_object(src_json.json, '$.store.basket[0][*].non_exist_key') FROM src_json")
createQueryTest("get_json_object #9",
"SELECT get_json_object(src_json.json, '$.zip code') FROM src_json")
createQueryTest("get_json_object #10",
"SELECT get_json_object(src_json.json, '$.fb:testid') FROM src_json")
test("predicates contains an empty AttributeSet() references") {
sql(
"""
|SELECT a FROM (
| SELECT 1 AS a FROM src LIMIT 1 ) t
|WHERE abs(20141202) is not null
""".stripMargin).collect()
}
test("implement identity function using case statement") {
val actual = sql("SELECT (CASE key WHEN key THEN key END) FROM src")
.map { case Row(i: Int) => i }
.collect()
.toSet
val expected = sql("SELECT key FROM src")
.map { case Row(i: Int) => i }
.collect()
.toSet
assert(actual === expected)
}
// TODO: adopt this test when Spark SQL has the functionality / framework to report errors.
// See https://github.com/apache/spark/pull/1055#issuecomment-45820167 for a discussion.
ignore("non-boolean conditions in a CaseWhen are illegal") {
intercept[Exception] {
sql("SELECT (CASE WHEN key > 2 THEN 3 WHEN 1 THEN 2 ELSE 0 END) FROM src").collect()
}
}
createQueryTest("case sensitivity when query Hive table",
"SELECT srcalias.KEY, SRCALIAS.value FROM sRc SrCAlias WHERE SrCAlias.kEy < 15")
test("case sensitivity: registered table") {
val testData =
TestHive.sparkContext.parallelize(
TestData(1, "str1") ::
TestData(2, "str2") :: Nil)
testData.toDF().registerTempTable("REGisteredTABle")
assertResult(Array(Row(2, "str2"))) {
sql("SELECT tablealias.A, TABLEALIAS.b FROM reGisteredTABle TableAlias " +
"WHERE TableAliaS.a > 1").collect()
}
}
def isExplanation(result: DataFrame): Boolean = {
val explanation = result.select('plan).collect().map { case Row(plan: String) => plan }
explanation.contains("== Physical Plan ==")
}
test("SPARK-1704: Explain commands as a DataFrame") {
sql("CREATE TABLE IF NOT EXISTS src (key INT, value STRING)")
val df = sql("explain select key, count(value) from src group by key")
assert(isExplanation(df))
TestHive.reset()
}
test("SPARK-2180: HAVING support in GROUP BY clauses (positive)") {
val fixture = List(("foo", 2), ("bar", 1), ("foo", 4), ("bar", 3))
.zipWithIndex.map {case Pair(Pair(value, attr), key) => HavingRow(key, value, attr)}
TestHive.sparkContext.parallelize(fixture).toDF().registerTempTable("having_test")
val results =
sql("SELECT value, max(attr) AS attr FROM having_test GROUP BY value HAVING attr > 3")
.collect()
.map(x => Pair(x.getString(0), x.getInt(1)))
assert(results === Array(Pair("foo", 4)))
TestHive.reset()
}
test("SPARK-2180: HAVING with non-boolean clause raises no exceptions") {
sql("select key, count(*) c from src group by key having c").collect()
}
test("SPARK-2225: turn HAVING without GROUP BY into a simple filter") {
assert(sql("select key from src having key > 490").collect().size < 100)
}
test("SPARK-5383 alias for udfs with multi output columns") {
assert(
sql("select stack(2, key, value, key, value) as (a, b) from src limit 5")
.collect()
.size == 5)
assert(
sql("select a, b from (select stack(2, key, value, key, value) as (a, b) from src) t limit 5")
.collect()
.size == 5)
}
test("SPARK-5367: resolve star expression in udf") {
assert(sql("select concat(*) from src limit 5").collect().size == 5)
assert(sql("select array(*) from src limit 5").collect().size == 5)
assert(sql("select concat(key, *) from src limit 5").collect().size == 5)
assert(sql("select array(key, *) from src limit 5").collect().size == 5)
}
test("Query Hive native command execution result") {
val databaseName = "test_native_commands"
assertResult(0) {
sql(s"DROP DATABASE IF EXISTS $databaseName").count()
}
assertResult(0) {
sql(s"CREATE DATABASE $databaseName").count()
}
assert(
sql("SHOW DATABASES")
.select('result)
.collect()
.map(_.getString(0))
.contains(databaseName))
assert(isExplanation(sql(s"EXPLAIN SELECT key, COUNT(*) FROM src GROUP BY key")))
TestHive.reset()
}
test("Exactly once semantics for DDL and command statements") {
val tableName = "test_exactly_once"
val q0 = sql(s"CREATE TABLE $tableName(key INT, value STRING)")
// If the table was not created, the following assertion would fail
assert(Try(table(tableName)).isSuccess)
// If the CREATE TABLE command got executed again, the following assertion would fail
assert(Try(q0.count()).isSuccess)
}
test("DESCRIBE commands") {
sql(s"CREATE TABLE test_describe_commands1 (key INT, value STRING) PARTITIONED BY (dt STRING)")
sql(
"""FROM src INSERT OVERWRITE TABLE test_describe_commands1 PARTITION (dt='2008-06-08')
|SELECT key, value
""".stripMargin)
// Describe a table
assertResult(
Array(
Row("key", "int", null),
Row("value", "string", null),
Row("dt", "string", null),
Row("# Partition Information", "", ""),
Row("# col_name", "data_type", "comment"),
Row("dt", "string", null))
) {
sql("DESCRIBE test_describe_commands1")
.select('col_name, 'data_type, 'comment)
.collect()
}
// Describe a table with a fully qualified table name
assertResult(
Array(
Row("key", "int", null),
Row("value", "string", null),
Row("dt", "string", null),
Row("# Partition Information", "", ""),
Row("# col_name", "data_type", "comment"),
Row("dt", "string", null))
) {
sql("DESCRIBE default.test_describe_commands1")
.select('col_name, 'data_type, 'comment)
.collect()
}
// Describe a column is a native command
assertResult(Array(Array("value", "string", "from deserializer"))) {
sql("DESCRIBE test_describe_commands1 value")
.select('result)
.collect()
.map(_.getString(0).split("\\t").map(_.trim))
}
// Describe a column is a native command
assertResult(Array(Array("value", "string", "from deserializer"))) {
sql("DESCRIBE default.test_describe_commands1 value")
.select('result)
.collect()
.map(_.getString(0).split("\\t").map(_.trim))
}
// Describe a partition is a native command
assertResult(
Array(
Array("key", "int"),
Array("value", "string"),
Array("dt", "string"),
Array(""),
Array("# Partition Information"),
Array("# col_name", "data_type", "comment"),
Array(""),
Array("dt", "string"))
) {
sql("DESCRIBE test_describe_commands1 PARTITION (dt='2008-06-08')")
.select('result)
.collect()
.map(_.getString(0).replaceAll("None", "").trim.split("\\t").map(_.trim))
}
// Describe a registered temporary table.
val testData =
TestHive.sparkContext.parallelize(
TestData(1, "str1") ::
TestData(1, "str2") :: Nil)
testData.toDF().registerTempTable("test_describe_commands2")
assertResult(
Array(
Row("a", "int", ""),
Row("b", "string", ""))
) {
sql("DESCRIBE test_describe_commands2")
.select('col_name, 'data_type, 'comment)
.collect()
}
}
test("SPARK-2263: Insert Map<K, V> values") {
sql("CREATE TABLE m(value MAP<INT, STRING>)")
sql("INSERT OVERWRITE TABLE m SELECT MAP(key, value) FROM src LIMIT 10")
sql("SELECT * FROM m").collect().zip(sql("SELECT * FROM src LIMIT 10").collect()).map {
case (Row(map: Map[_, _]), Row(key: Int, value: String)) =>
assert(map.size === 1)
assert(map.head === (key, value))
}
}
test("ADD JAR command") {
val testJar = TestHive.getHiveFile("data/files/TestSerDe.jar").getCanonicalPath
sql("CREATE TABLE alter1(a INT, b INT)")
intercept[Exception] {
sql(
"""ALTER TABLE alter1 SET SERDE 'org.apache.hadoop.hive.serde2.TestSerDe'
|WITH serdeproperties('s1'='9')
""".stripMargin)
}
sql("DROP TABLE alter1")
}
test("ADD JAR command 2") {
// this is a test case from mapjoin_addjar.q
val testJar = TestHive.getHiveFile("hive-hcatalog-core-0.13.1.jar").getCanonicalPath
val testData = TestHive.getHiveFile("data/files/sample.json").getCanonicalPath
sql(s"ADD JAR $testJar")
sql(
"""CREATE TABLE t1(a string, b string)
|ROW FORMAT SERDE 'org.apache.hive.hcatalog.data.JsonSerDe'""".stripMargin)
sql(s"""LOAD DATA LOCAL INPATH "$testData" INTO TABLE t1""")
sql("select * from src join t1 on src.key = t1.a")
sql("DROP TABLE t1")
}
test("ADD FILE command") {
val testFile = TestHive.getHiveFile("data/files/v1.txt").getCanonicalFile
sql(s"ADD FILE $testFile")
val checkAddFileRDD = sparkContext.parallelize(1 to 2, 1).mapPartitions { _ =>
Iterator.single(new File(SparkFiles.get("v1.txt")).canRead)
}
assert(checkAddFileRDD.first())
}
case class LogEntry(filename: String, message: String)
case class LogFile(name: String)
createQueryTest("dynamic_partition",
"""
|DROP TABLE IF EXISTS dynamic_part_table;
|CREATE TABLE dynamic_part_table(intcol INT) PARTITIONED BY (partcol1 INT, partcol2 INT);
|
|SET hive.exec.dynamic.partition.mode=nonstrict;
|
|INSERT INTO TABLE dynamic_part_table PARTITION(partcol1, partcol2)
|SELECT 1, 1, 1 FROM src WHERE key=150;
|
|INSERT INTO TABLE dynamic_part_table PARTITION(partcol1, partcol2)
|SELECT 1, NULL, 1 FROM src WHERE key=150;
|
|INSERT INTO TABLE dynamic_part_table PARTITION(partcol1, partcol2)
|SELECT 1, 1, NULL FROM src WHERE key=150;
|
|INSERT INTO TABLe dynamic_part_table PARTITION(partcol1, partcol2)
|SELECT 1, NULL, NULL FROM src WHERE key=150;
|
|DROP TABLE IF EXISTS dynamic_part_table;
""".stripMargin)
ignore("Dynamic partition folder layout") {
sql("DROP TABLE IF EXISTS dynamic_part_table")
sql("CREATE TABLE dynamic_part_table(intcol INT) PARTITIONED BY (partcol1 INT, partcol2 INT)")
sql("SET hive.exec.dynamic.partition.mode=nonstrict")
val data = Map(
Seq("1", "1") -> 1,
Seq("1", "NULL") -> 2,
Seq("NULL", "1") -> 3,
Seq("NULL", "NULL") -> 4)
data.foreach { case (parts, value) =>
sql(
s"""INSERT INTO TABLE dynamic_part_table PARTITION(partcol1, partcol2)
|SELECT $value, ${parts.mkString(", ")} FROM src WHERE key=150
""".stripMargin)
val partFolder = Seq("partcol1", "partcol2")
.zip(parts)
.map { case (k, v) =>
if (v == "NULL") {
s"$k=${ConfVars.DEFAULTPARTITIONNAME.defaultStrVal}"
} else {
s"$k=$v"
}
}
.mkString("/")
// Loads partition data to a temporary table to verify contents
val path = s"$warehousePath/dynamic_part_table/$partFolder/part-00000"
sql("DROP TABLE IF EXISTS dp_verify")
sql("CREATE TABLE dp_verify(intcol INT)")
sql(s"LOAD DATA LOCAL INPATH '$path' INTO TABLE dp_verify")
assert(sql("SELECT * FROM dp_verify").collect() === Array(Row(value)))
}
}
test("SPARK-5592: get java.net.URISyntaxException when dynamic partitioning") {
sql("""
|create table sc as select *
|from (select '2011-01-11', '2011-01-11+14:18:26' from src tablesample (1 rows)
|union all
|select '2011-01-11', '2011-01-11+15:18:26' from src tablesample (1 rows)
|union all
|select '2011-01-11', '2011-01-11+16:18:26' from src tablesample (1 rows) ) s
""".stripMargin)
sql("create table sc_part (key string) partitioned by (ts string) stored as rcfile")
sql("set hive.exec.dynamic.partition=true")
sql("set hive.exec.dynamic.partition.mode=nonstrict")
sql("insert overwrite table sc_part partition(ts) select * from sc")
sql("drop table sc_part")
}
test("Partition spec validation") {
sql("DROP TABLE IF EXISTS dp_test")
sql("CREATE TABLE dp_test(key INT, value STRING) PARTITIONED BY (dp INT, sp INT)")
sql("SET hive.exec.dynamic.partition.mode=strict")
// Should throw when using strict dynamic partition mode without any static partition
intercept[SparkException] {
sql(
"""INSERT INTO TABLE dp_test PARTITION(dp)
|SELECT key, value, key % 5 FROM src
""".stripMargin)
}
sql("SET hive.exec.dynamic.partition.mode=nonstrict")
// Should throw when a static partition appears after a dynamic partition
intercept[SparkException] {
sql(
"""INSERT INTO TABLE dp_test PARTITION(dp, sp = 1)
|SELECT key, value, key % 5 FROM src
""".stripMargin)
}
}
test("SPARK-3414 regression: should store analyzed logical plan when registering a temp table") {
sparkContext.makeRDD(Seq.empty[LogEntry]).toDF().registerTempTable("rawLogs")
sparkContext.makeRDD(Seq.empty[LogFile]).toDF().registerTempTable("logFiles")
sql(
"""
SELECT name, message
FROM rawLogs
JOIN (
SELECT name
FROM logFiles
) files
ON rawLogs.filename = files.name
""").registerTempTable("boom")
// This should be successfully analyzed
sql("SELECT * FROM boom").queryExecution.analyzed
}
test("SPARK-3810: PreInsertionCasts static partitioning support") {
val analyzedPlan = {
loadTestTable("srcpart")
sql("DROP TABLE IF EXISTS withparts")
sql("CREATE TABLE withparts LIKE srcpart")
sql("INSERT INTO TABLE withparts PARTITION(ds='1', hr='2') SELECT key, value FROM src")
.queryExecution.analyzed
}
assertResult(1, "Duplicated project detected\\n" + analyzedPlan) {
analyzedPlan.collect {
case _: Project => ()
}.size
}
}
test("SPARK-3810: PreInsertionCasts dynamic partitioning support") {
val analyzedPlan = {
loadTestTable("srcpart")
sql("DROP TABLE IF EXISTS withparts")
sql("CREATE TABLE withparts LIKE srcpart")
sql("SET hive.exec.dynamic.partition.mode=nonstrict")
sql("CREATE TABLE IF NOT EXISTS withparts LIKE srcpart")
sql("INSERT INTO TABLE withparts PARTITION(ds, hr) SELECT key, value FROM src")
.queryExecution.analyzed
}
assertResult(1, "Duplicated project detected\\n" + analyzedPlan) {
analyzedPlan.collect {
case _: Project => ()
}.size
}
}
test("parse HQL set commands") {
// Adapted from its SQL counterpart.
val testKey = "spark.sql.key.usedfortestonly"
val testVal = "val0,val_1,val2.3,my_table"
sql(s"set $testKey=$testVal")
assert(getConf(testKey, testVal + "_") == testVal)
sql("set some.property=20")
assert(getConf("some.property", "0") == "20")
sql("set some.property = 40")
assert(getConf("some.property", "0") == "40")
sql(s"set $testKey=$testVal")
assert(getConf(testKey, "0") == testVal)
sql(s"set $testKey=")
assert(getConf(testKey, "0") == "")
}
test("SET commands semantics for a HiveContext") {
// Adapted from its SQL counterpart.
val testKey = "spark.sql.key.usedfortestonly"
val testVal = "test.val.0"
val nonexistentKey = "nonexistent"
def collectResults(df: DataFrame): Set[Any] =
df.collect().map {
case Row(key: String, value: String) => key -> value
case Row(key: String, defaultValue: String, doc: String) => (key, defaultValue, doc)
}.toSet
conf.clear()
val expectedConfs = conf.getAllDefinedConfs.toSet
assertResult(expectedConfs)(collectResults(sql("SET -v")))
// "SET" itself returns all config variables currently specified in SQLConf.
// TODO: Should we be listing the default here always? probably...
assert(sql("SET").collect().size == 0)
assertResult(Set(testKey -> testVal)) {
collectResults(sql(s"SET $testKey=$testVal"))
}
assert(hiveconf.get(testKey, "") == testVal)
assertResult(Set(testKey -> testVal))(collectResults(sql("SET")))
sql(s"SET ${testKey + testKey}=${testVal + testVal}")
assert(hiveconf.get(testKey + testKey, "") == testVal + testVal)
assertResult(Set(testKey -> testVal, (testKey + testKey) -> (testVal + testVal))) {
collectResults(sql("SET"))
}
// "SET key"
assertResult(Set(testKey -> testVal)) {
collectResults(sql(s"SET $testKey"))
}
assertResult(Set(nonexistentKey -> "<undefined>")) {
collectResults(sql(s"SET $nonexistentKey"))
}
conf.clear()
}
createQueryTest("select from thrift based table",
"SELECT * from src_thrift")
// Put tests that depend on specific Hive settings before these last two test,
// since they modify /clear stuff.
}
// for SPARK-2180 test
case class HavingRow(key: Int, value: String, attr: Int)
| ArvinDevel/onlineAggregationOnSparkV2 | sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveQuerySuite.scala | Scala | apache-2.0 | 41,286 |
package ch09
/*
1. Write a Scala code snippet that reverses the lines in a file (making the last
line the first one, and so on).
*/
import scala.io.Source
object ex01 extends App {
var source: Source = null
if (args.length < 1) {
println("No input from the command line. Now reading from stdin.")
source = Source.stdin
} else {
source = Source.fromFile(args(0), "UTF-8")
}
for(l <- source.getLines.toArray.reverse) println(l)
}
| tuxdna/scala-for-the-impatient-exercises | src/main/scala/ch09/ex01.scala | Scala | apache-2.0 | 452 |
import sbt._
class RetColProjectSbtPlugins(info: ProjectInfo) extends PluginDefinition(info) {
val eclipse = "de.element34" % "sbt-eclipsify" % "0.7.0"
}
| melezov/iorc | project/plugins/RetColProjectSbtPlugins.scala | Scala | unlicense | 162 |
package org.apache.spark.streaming
import org.apache.spark.util.ManualClock
object ManualClockHelper {
def addToTime(ssc: StreamingContext, timeToAdd: Long): Unit = {
val clock = ssc.scheduler.clock.asInstanceOf[ManualClock]
clock.advance(timeToAdd)
}
}
| nchandrappa/incubator-geode | gemfire-spark-connector/gemfire-spark-connector/src/it/scala/org/apache/spark/streaming/ManualClockHelper.scala | Scala | apache-2.0 | 272 |
import cats.Monad
import cats.implicits._
import scala.language.higherKinds
@SuppressWarnings(Array("org.wartremover.warts.Any"))
object KVSApp extends App {
import KVStore.ops._
// This is one way to compose a program
def program1[F[_]: Monad: KVStore]: F[Option[Int]] = {
for {
_ <- KVStore.put("wild-cats", 2)
_ <- KVStore.update[Int, Int]("wild-cats", _ + 12)
_ <- KVStore.put("tame-cats", 5)
n <- KVStore.get[Int]("wild-cats")
_ <- KVStore.delete("tame-cats")
} yield n
}
import KVStore.KVStoreState
val prog = for {
r1 <- program1[KVStoreState]
_ = println(s"Result 1: $r1")
r2 <- program1[KVStoreState]
_ = println(s"Result 2: $r2")
} yield ()
val _ = prog.run(Map.empty).value
}
| lloydmeta/diesel | examples/src/main/scala/KVSApp.scala | Scala | mit | 764 |
import sbt._
import Keys._
import org.scalatra.sbt._
import org.scalatra.sbt.PluginKeys._
import com.mojolly.scalate.ScalatePlugin._
import ScalateKeys._
object EnwizBuild extends Build {
val Organization = "ru.wordmetrix"
val Name = "EnWiz"
val Version = "0.1.0-SNAPSHOT"
val ScalaVersion = "2.10.0"
val ScalatraVersion = "2.2.2"
lazy val project = Project (
"enwiz",
file("."),
settings = Defaults.defaultSettings ++ ScalatraPlugin.scalatraWithJRebel ++ scalateSettings ++ Seq(
organization := Organization,
name := Name,
version := Version,
scalaVersion := ScalaVersion,
resolvers += Classpaths.typesafeReleases,
libraryDependencies ++= Seq(
"org.scalatra" %% "scalatra" % ScalatraVersion,
"org.scalatra" %% "scalatra-scalate" % ScalatraVersion,
"org.scalatra" %% "scalatra-specs2" % ScalatraVersion % "test",
"ch.qos.logback" % "logback-classic" % "1.0.6" % "runtime",
"org.eclipse.jetty" % "jetty-webapp" % "8.1.8.v20121106" % "container",
"org.eclipse.jetty.orbit" % "javax.servlet" % "3.0.0.v201112011016" % "container;provided;test" artifacts (Artifact("javax.servlet", "jar", "jar"))
),
scalateTemplateConfig in Compile <<= (sourceDirectory in Compile){ base =>
Seq(
TemplateConfig(
base / "webapp" / "WEB-INF" / "templates",
Seq.empty, /* default imports should be added here */
Seq(
Binding("context", "_root_.org.scalatra.scalate.ScalatraRenderContext", importMembers = true, isImplicit = true)
), /* add extra bindings here */
Some("templates")
)
)
}
)
)
}
/*
lazy val scalate_plugin = "org.fusesource.scalate" % "sbt-scalate-plugin_2.10" % "1.6.1"
And then in your WebProject, you will need to add the org.fusesource.scalate.sbt.PrecompilerWebProject trait. And then make sure the Scalate dependencies are added to the project. For example:
class Project(info: ProjectInfo) extends
DefaultWebProject(info) with
PrecompilerWebProject {
lazy val scalate_core = "org.fusesource.scalate" % "scalate-core_2.10" % "1.6.1"
lazy val servlet = "javax.servlet" % "servlet-api"% "2.5"
lazy val logback = "ch.qos.logback" % "logback-classic" % "0.9.26"
*/ | electricmind/enwiz | project/build.scala | Scala | apache-2.0 | 2,327 |
package com.iz2use.express.parser
import fastparse.all._
import utest._
import scala.io.Source
import scala.util._
object ExpressParserTestsJVM extends TestSuite {
val tests = TestSuite {
'schema{
val fileParser = ExpressParser.file
'file{
val fileContent = Try(Source.fromFile("IFC4.exp").getLines().mkString)
fileContent match {
case Success(data) =>
val Parsed.Success(value, successIndex) = fileParser.parse(data)
}
}
}
}
} | math85360/ifc-scala | jvm/src/test/scala/com/iz2use/express/parser/ExpressParserJVM.scala | Scala | apache-2.0 | 527 |
/*
* Copyright 2015 Webtrends (http://www.webtrends.com)
*
* See the LICENCE.txt file distributed with this work for additional
* information regarding copyright ownership.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.webtrends.service
import com.webtrends.harness.component.netty.WebSocket
import io.netty.channel.ChannelHandlerContext
import io.netty.handler.codec.http.websocketx._
/**
* Created by wallinm on 12/9/14.
*/
trait NettyExampleWebSocket extends WebSocket {
override def WebSocketFrame(ctx: ChannelHandlerContext, frame: TextWebSocketFrame): Unit = {
val request = frame.text()
log.info(s"request: $request")
ctx.writeAndFlush(new TextWebSocketFrame("Echo from example server: ".concat(request.toString)))
}
}
| Webtrends/wookiee-netty | example-netty/src/main/scala/com/webtrends/service/NettyExampleWebSocket.scala | Scala | apache-2.0 | 1,281 |
package net.benchmark.akka.http.fortune
import io.circe.Codec, io.circe.generic.semiauto.deriveCodec
case class Fortune(id: Int, message: String)
object Fortune {
implicit val fortuneCodec: Codec[Fortune] = deriveCodec
def tupled = (this.apply _).tupled
}
| sumeetchhetri/FrameworkBenchmarks | frameworks/Scala/akka-http/akka-http-slick-postgres/src/main/scala/net/benchmark/akka/http/fortune/Fortune.scala | Scala | bsd-3-clause | 264 |
/*
* Copyright 2017 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.bforms.models
import cats.data.State
import java.time.LocalDateTime
import java.time.format.DateTimeFormatter
import play.api.libs.json.{ Format, JsError, JsString, JsSuccess, Reads, Writes }
import scala.util.Random
import uk.gov.hmrc.bforms.typeclasses.Now
case class ReconciliationId(value: String) extends AnyVal {
override def toString = value
}
object ReconciliationId {
def create(submissionRef: SubmissionRef)(implicit now: Now[LocalDateTime]): ReconciliationId = {
val dateFormatter = now().format(DateTimeFormatter.ofPattern("yyyyMMddHHmmss"))
ReconciliationId(submissionRef + "-" + dateFormatter)
}
}
| VlachJosef/bforms | app/uk/gov/hmrc/bforms/models/ReconciliationId.scala | Scala | apache-2.0 | 1,255 |
/*
* Copyright (c) 2015 Goldman Sachs.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* and Eclipse Distribution License v. 1.0 which accompany this distribution.
* The Eclipse Public License is available at http://www.eclipse.org/legal/epl-v10.html
* and the Eclipse Distribution License is available at
* http://www.eclipse.org/org/documents/edl-v10.php.
*/
package org.eclipse.collections.impl.jmh
import org.junit.Assert
import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer
object FunctionalInterfaceScalaTest
{
private val SIZE: Int = 1000000
private val integers: ArrayBuffer[Int] = new ArrayBuffer[Int]() ++ (1 to SIZE)
def megamorphic(megamorphicWarmupLevel: Int)
{
val predicate1: (Int) => Boolean = each => (each + 2) % 10000 != 0
val predicate2: (Int) => Boolean = each => (each + 3) % 10000 != 0
val predicate3: (Int) => Boolean = each => (each + 4) % 10000 != 0
val predicate4: (Int) => Boolean = each => (each + 5) % 10000 != 0
val function1: (Int) => String = each =>
{
Assert.assertNotNull(each)
String.valueOf(each)
}
val function2: (String) => Int = each =>
{
Assert.assertNotNull(each)
Integer.valueOf(each)
}
val function3: (Int) => String = each =>
{
Assert.assertEquals(each, each)
String.valueOf(each)
}
val function4: (String) => Int = each =>
{
Assert.assertEquals(each, each)
Integer.valueOf(each)
}
if (megamorphicWarmupLevel > 0)
{
// serial, lazy, EC
{
val set = this.integers.view.filter(predicate1).map(function1).map(function2).filter(predicate2).toSet
Assert.assertEquals(999800, set.size)
val buffer = this.integers.view.filter(predicate3).map(function3).map(function4).filter(predicate4).toBuffer
Assert.assertEquals(999800, buffer.size)
}
// parallel, lazy, EC
{
val set = this.integers.par.filter(predicate1).map(function1).map(function2).filter(predicate2).toSet
Assert.assertEquals(999800, set.size)
val buffer = this.integers.par.filter(predicate3).map(function3).map(function4).filter(predicate4).toBuffer
Assert.assertEquals(999800, buffer.size)
}
// serial, eager, EC
{
val set = this.integers.filter(predicate1).map(function1).map(function2).filter(predicate2).toSet
Assert.assertEquals(999800, set.size)
val buffer = this.integers.filter(predicate3).map(function3).map(function4).filter(predicate4).toBuffer
Assert.assertEquals(999800, buffer.size)
}
}
}
def serial_eager_scala(): ArrayBuffer[Integer] =
{
val list = this.integers
.filter(each => each % 10000 != 0)
.map(String.valueOf)
.map(Integer.valueOf)
.filter(each => (each + 1) % 10000 != 0)
Assert.assertEquals(999800, list.size)
list
}
def test_serial_eager_scala()
{
Assert.assertEquals(1.to(1000000, 10000).flatMap(each => each.to(each + 9997)).toBuffer, this.serial_eager_scala())
}
def serial_lazy_scala(): mutable.Buffer[Integer] =
{
val list = this.integers
.view
.filter(each => each % 10000 != 0)
.map(String.valueOf)
.map(Integer.valueOf)
.filter(each => (each + 1) % 10000 != 0)
.toBuffer
Assert.assertEquals(999800, list.size)
list
}
def test_serial_lazy_scala()
{
Assert.assertEquals(1.to(1000000, 10000).flatMap(each => each.to(each + 9997)).toBuffer, this.serial_lazy_scala())
}
def parallel_lazy_scala(): mutable.Buffer[Integer] =
{
val list = this.integers
.par
.filter(each => each % 10000 != 0)
.map(String.valueOf)
.map(Integer.valueOf)
.filter(each => (each + 1) % 10000 != 0)
.toBuffer
Assert.assertEquals(999800, list.size)
list
}
def test_parallel_lazy_scala()
{
Assert.assertEquals(1.to(1000000, 10000).flatMap(each => each.to(each + 9997)).toBuffer, this.parallel_lazy_scala())
}
def parallel_lazy_scala_hand_coded(): mutable.Buffer[Int] =
{
val list = this.integers
.par
.filter(integer => integer % 10000 != 0 && (Integer.valueOf(String.valueOf(integer)) + 1) % 10000 != 0)
.toBuffer
Assert.assertEquals(999800, list.size)
list
}
def test_parallel_lazy_scala_hand_coded()
{
Assert.assertEquals(1.to(1000000, 10000).flatMap(each => each.to(each + 9997)).toBuffer, this.parallel_lazy_scala_hand_coded())
}
}
| g-votte/eclipse-collections | jmh-scala-tests/src/main/scala/org/eclipse/collections/impl/jmh/FunctionalInterfaceScalaTest.scala | Scala | bsd-3-clause | 5,177 |
/*
* Copyright (c) 2008, Michael Pradel
* All rights reserved. See LICENSE for details.
*/
package applications
import scala.roles._
object RefinementTypes {
trait A {
var state = 23
def f = 1
}
trait MyCollab extends TransientCollaboration {
val r = new R{}
trait R extends Role[A] {
def blubb = core.f + core.state
}
}
def main (args: Array[String]) = {
val c = new MyCollab{}
val a = new A{
def tralala = 7
}
println((a -: c.r).blubb)
// known issue: causes exception, since refinement types have no (Java) interface...
println((a -: c.r).tralala)
}
}
| tupshin/Scala-Roles | examples/applications/RefinementTypes.scala | Scala | bsd-3-clause | 648 |
package com.twitter.scalding.hraven.reducer_estimation
import java.io.IOException
import cascading.flow.FlowStep
import com.twitter.hraven.{ Flow, JobDetails }
import com.twitter.hraven.rest.client.HRavenRestClient
import com.twitter.scalding.reducer_estimation._
import org.apache.hadoop.mapred.JobConf
import org.slf4j.LoggerFactory
import scala.collection.JavaConverters._
import com.twitter.hraven.JobDescFactory.{ JOBTRACKER_KEY, RESOURCE_MANAGER_KEY }
import scala.util.{ Failure, Success, Try }
object HRavenClient {
import HRavenHistoryService.jobConfToRichConfig
val apiHostnameKey = "hraven.api.hostname"
val clientConnectTimeoutKey = "hraven.client.connect.timeout"
val clientReadTimeoutKey = "hraven.client.read.timeout"
private final val clientConnectTimeoutDefault = 30000
private final val clientReadTimeoutDefault = 30000
def apply(conf: JobConf): Try[HRavenRestClient] =
conf.getFirstKey(apiHostnameKey)
.map(new HRavenRestClient(_,
conf.getInt(clientConnectTimeoutKey, clientConnectTimeoutDefault),
conf.getInt(clientReadTimeoutKey, clientReadTimeoutDefault)))
}
/**
* Mixin for ReducerEstimators to give them the ability to query hRaven for
* info about past runs.
*/
object HRavenHistoryService extends HistoryService {
private val LOG = LoggerFactory.getLogger(this.getClass)
// List of fields that we consume from fetchTaskDetails api.
// This is sent to hraven service to filter the response data
// and avoid hitting http content length limit on hraven side.
private val TaskDetailFields = List(
"taskType",
"status",
"startTime",
"finishTime").asJava
val RequiredJobConfigs = Seq("cascading.flow.step.num")
case class MissingFieldsException(fields: Seq[String]) extends Exception
/**
* Add some helper methods to JobConf
*/
case class RichConfig(conf: JobConf) {
val MaxFetch = "hraven.reducer.estimator.max.flow.history"
val MaxFetchDefault = 8
def maxFetch: Int = conf.getInt(MaxFetch, MaxFetchDefault)
/**
* Try fields in order until one returns a value.
* Logs a warning if nothing was found.
*/
def getFirstKey(fields: String*): Try[String] =
fields.collectFirst {
case f if conf.get(f) != null => Success(conf.get(f))
}.getOrElse {
LOG.warn("Missing required config param: " + fields.mkString(" or "))
Failure(MissingFieldsException(fields))
}
}
implicit def jobConfToRichConfig(conf: JobConf): RichConfig = RichConfig(conf)
/**
* Fetch flows until it finds one that was successful
* (using "HdfsBytesRead > 0" as a marker for successful jobs since it seems
* that this is only set on completion of jobs)
*
* TODO: query hRaven for successful jobs (first need to add ability to filter
* results in hRaven REST API)
*/
private def fetchSuccessfulFlows(client: HRavenRestClient, cluster: String, user: String, batch: String, signature: String, max: Int, nFetch: Int): Try[Seq[Flow]] =
Try(client.fetchFlowsWithConfig(cluster, user, batch, signature, nFetch, RequiredJobConfigs: _*))
.flatMap { flows =>
Try {
// Ugly mutable code to add task info to flows
flows.asScala.foreach { flow =>
flow.getJobs.asScala.foreach { job =>
// client.fetchTaskDetails might throw IOException
val tasks = client.fetchTaskDetails(flow.getCluster, job.getJobId, TaskDetailFields)
job.addTasks(tasks)
}
}
val successfulFlows = flows.asScala.filter(_.getHdfsBytesRead > 0).take(max)
if (successfulFlows.isEmpty) {
LOG.warn("Unable to find any successful flows in the last " + nFetch + " jobs.")
}
successfulFlows
}
}.recoverWith {
case e: IOException =>
LOG.error("Error making API request to hRaven. HRavenHistoryService will be disabled.")
Failure(e)
}
/**
* Fetch info from hRaven for the last time the given JobStep ran.
* Finds the last successful complete flow and selects the corresponding
* step from it.
*
* @param step FlowStep to get info for
* @return Details about the previous successful run.
*/
def fetchPastJobDetails(step: FlowStep[JobConf], max: Int): Try[Seq[JobDetails]] = {
val conf = step.getConfig
val stepNum = step.getStepNum
def findMatchingJobStep(pastFlow: Flow) =
pastFlow.getJobs.asScala.find { step =>
try {
step.getConfiguration.get("cascading.flow.step.num").toInt == stepNum
} catch {
case _: NumberFormatException => false
}
} orElse {
LOG.warn("No matching job step in the retrieved hRaven flow.")
None
}
def lookupClusterName(client: HRavenRestClient): Try[String] = {
// regex for case matching URL to get hostname out
val hostRegex = """(.*):\d+""".r
// first try resource manager (for Hadoop v2), then fallback to job tracker
conf.getFirstKey(RESOURCE_MANAGER_KEY, JOBTRACKER_KEY).flatMap {
// extract hostname from hostname:port
case hostRegex(host) =>
// convert hostname -> cluster name (e.g. dw2@smf1)
Try(client.getCluster(host))
}
}
val flowsTry = for {
// connect to hRaven REST API
client <- HRavenClient(conf)
// lookup cluster name used by hRaven
cluster <- lookupClusterName(client)
// get identifying info for this job
user <- conf.getFirstKey("hraven.history.user.name", "user.name")
batch <- conf.getFirstKey("batch.desc")
signature <- conf.getFirstKey("scalding.flow.class.signature")
// query hRaven for matching flows
flows <- fetchSuccessfulFlows(client, cluster, user, batch, signature, max, conf.maxFetch)
} yield flows
// Find the FlowStep in the hRaven flow that corresponds to the current step
// *Note*: when hRaven says "Job" it means "FlowStep"
flowsTry.map(flows => flows.flatMap(findMatchingJobStep))
}
override def fetchHistory(info: FlowStrategyInfo, maxHistory: Int): Try[Seq[FlowStepHistory]] =
fetchPastJobDetails(info.step, maxHistory).map { history =>
for {
step <- history
keys = FlowStepKeys(step.getJobName, step.getUser, step.getPriority, step.getStatus, step.getVersion, "")
// update HRavenHistoryService.TaskDetailFields when consuming additional task fields from hraven below
tasks = step.getTasks.asScala.map { t => Task(t.getType, t.getStatus, t.getStartTime, t.getFinishTime) }
} yield toFlowStepHistory(keys, step, tasks)
}
private def toFlowStepHistory(keys: FlowStepKeys, step: JobDetails, tasks: Seq[Task]) =
FlowStepHistory(
keys = keys,
submitTime = step.getSubmitTime,
launchTime = step.getLaunchTime,
finishTime = step.getFinishTime,
totalMaps = step.getTotalMaps,
totalReduces = step.getTotalReduces,
finishedMaps = step.getFinishedMaps,
finishedReduces = step.getFinishedReduces,
failedMaps = step.getFailedMaps,
failedReduces = step.getFailedReduces,
mapFileBytesRead = step.getMapFileBytesRead,
mapFileBytesWritten = step.getMapFileBytesWritten,
reduceFileBytesRead = step.getReduceFileBytesRead,
hdfsBytesRead = step.getHdfsBytesRead,
hdfsBytesWritten = step.getHdfsBytesWritten,
mapperTimeMillis = step.getMapSlotMillis,
reducerTimeMillis = step.getReduceSlotMillis,
reduceShuffleBytes = step.getReduceShuffleBytes,
cost = 0,
tasks = tasks)
}
class HRavenRatioBasedEstimator extends RatioBasedEstimator {
override val historyService = HRavenHistoryService
}
class HRavenRuntimeBasedEstimator extends RuntimeReducerEstimator {
override val historyService = HRavenHistoryService
}
| tglstory/scalding | scalding-hraven/src/main/scala/com/twitter/scalding/hraven/reducer_estimation/HRavenHistoryService.scala | Scala | apache-2.0 | 7,893 |
package com.twitter.inject.thrift.integration.inheritance
import com.twitter.finagle.Service
import com.twitter.inject.thrift.integration.AbstractThriftService
import com.twitter.serviceA.thriftscala.ServiceA
import com.twitter.serviceB.thriftscala.ServiceB
import com.twitter.util.Future
import com.twitter.util.logging.Logging
class ServiceBThriftService(
clientId: String)
extends AbstractThriftService
with ServiceB.ServicePerEndpoint
with Logging {
val ping: Service[ServiceB.Ping.Args, ServiceB.Ping.SuccessType] =
Service.mk { args: ServiceB.Ping.Args =>
assertClientId(clientId)
Future.value("pong")
}
val echo: Service[ServiceA.Echo.Args, ServiceA.Echo.SuccessType] =
Service.mk { args: ServiceA.Echo.Args =>
assertClientId(clientId)
Future.value(args.msg)
}
}
| twitter/finatra | inject/inject-thrift-client/src/test/scala/com/twitter/inject/thrift/integration/inheritance/ServiceBThriftService.scala | Scala | apache-2.0 | 832 |
/*
* Scala (https://www.scala-lang.org)
*
* Copyright EPFL and Lightbend, Inc.
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package scala.compat.java8.converterImpl
import scala.annotation.switch
import scala.compat.java8.collectionImpl._
/////////////////////////////
// Stepper implementations //
/////////////////////////////
private[java8] class StepsAnyIterator[A](_underlying: Iterator[A])
extends StepsLikeIterator[A, StepsAnyIterator[A]](_underlying) {
def semiclone() = new StepsAnyIterator(null)
def next() = if (proxied ne null) proxied.nextStep else underlying.next
}
private[java8] class StepsDoubleIterator(_underlying: Iterator[Double])
extends StepsDoubleLikeIterator[StepsDoubleIterator](_underlying) {
def semiclone() = new StepsDoubleIterator(null)
def nextDouble() = if (proxied ne null) proxied.nextStep else underlying.next
}
private[java8] class StepsIntIterator(_underlying: Iterator[Int])
extends StepsIntLikeIterator[StepsIntIterator](_underlying) {
def semiclone() = new StepsIntIterator(null)
def nextInt() = if (proxied ne null) proxied.nextStep else underlying.next
}
private[java8] class StepsLongIterator(_underlying: Iterator[Long])
extends StepsLongLikeIterator[StepsLongIterator](_underlying) {
def semiclone() = new StepsLongIterator(null)
def nextLong() = if (proxied ne null) proxied.nextStep else underlying.next
}
//////////////////////////
// Value class adapters //
//////////////////////////
final class RichIteratorCanStep[T](private val underlying: Iterator[T]) extends AnyVal with MakesStepper[T, Any] {
def stepper[S <: Stepper[_]](implicit ss: StepperShape[T, S]) = ((ss.shape: @switch) match {
case StepperShape.IntValue => new StepsIntIterator (underlying.asInstanceOf[Iterator[Int]])
case StepperShape.LongValue => new StepsLongIterator (underlying.asInstanceOf[Iterator[Long]])
case StepperShape.DoubleValue => new StepsDoubleIterator(underlying.asInstanceOf[Iterator[Double]])
case _ => ss.seqUnbox(new StepsAnyIterator[T](underlying))
}).asInstanceOf[S]
}
| scala/scala-java8-compat | src/main/scala-2.13-/scala/compat/java8/converterImpl/StepsIterator.scala | Scala | apache-2.0 | 2,240 |
package lore.compiler.assembly
import lore.compiler.poem.Poem
import java.util.concurrent.atomic.AtomicInteger
class RegisterProvider {
private val counter: AtomicInteger = new AtomicInteger(0)
/**
* Provides a fresh, unique register ID.
*
* The first ID is guaranteed to be 0.
*/
def fresh(): Poem.Register = Poem.Register(counter.getAndIncrement())
}
| marcopennekamp/lore | compiler/src/lore/compiler/assembly/RegisterProvider.scala | Scala | mit | 379 |
/*
Copyright 2014 Twitter, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.twitter.scalding.serialization.macros.impl.ordered_serialization.providers
import scala.reflect.macros.blackbox.Context
import com.twitter.scalding.serialization.macros.impl.ordered_serialization.{ProductLike, TreeOrderedBuf}
@SuppressWarnings(Array("org.wartremover.warts.MergeMaps"))
object CaseClassOrderedBuf {
def dispatch(c: Context)(
buildDispatcher: => PartialFunction[c.Type, TreeOrderedBuf[c.type]]
): PartialFunction[c.Type, TreeOrderedBuf[c.type]] = {
case tpe
if tpe.typeSymbol.isClass && tpe.typeSymbol.asClass.isCaseClass && !tpe.typeSymbol.asClass.isModuleClass =>
CaseClassOrderedBuf(c)(buildDispatcher, tpe)
}
def apply(c: Context)(
buildDispatcher: => PartialFunction[c.Type, TreeOrderedBuf[c.type]],
outerType: c.Type
): TreeOrderedBuf[c.type] = {
import c.universe._
def freshT(id: String) = TermName(c.freshName(id))
val dispatcher = buildDispatcher
val elementData: List[(c.universe.Type, TermName, TreeOrderedBuf[c.type])] =
outerType.decls
.collect { case m: MethodSymbol if m.isCaseAccessor => m }
.map { accessorMethod =>
val fieldType =
accessorMethod.returnType.asSeenFrom(outerType, outerType.typeSymbol.asClass)
val b: TreeOrderedBuf[c.type] = dispatcher(fieldType)
(fieldType, accessorMethod.name, b)
}
.toList
new TreeOrderedBuf[c.type] {
override val ctx: c.type = c
override val tpe = outerType
override def compareBinary(inputStreamA: ctx.TermName, inputStreamB: ctx.TermName) =
ProductLike.compareBinary(c)(inputStreamA, inputStreamB)(elementData)
override def hash(element: ctx.TermName): ctx.Tree =
ProductLike.hash(c)(element)(elementData)
override def put(inputStream: ctx.TermName, element: ctx.TermName) =
ProductLike.put(c)(inputStream, element)(elementData)
override def get(inputStream: ctx.TermName): ctx.Tree = {
val getValProcessor = elementData.map { case (tpe, accessorSymbol, tBuf) =>
val curR = freshT("curR")
val builderTree = q"""
val $curR: ${tBuf.tpe} = {
${tBuf.get(inputStream)}
}
"""
(builderTree, curR)
}
q"""
..${getValProcessor.map(_._1)}
${outerType.typeSymbol.companionSymbol}(..${getValProcessor.map(_._2)})
"""
}
override def compare(elementA: ctx.TermName, elementB: ctx.TermName): ctx.Tree =
ProductLike.compare(c)(elementA, elementB)(elementData)
override val lazyOuterVariables: Map[String, ctx.Tree] =
elementData.map(_._3.lazyOuterVariables).reduce(_ ++ _)
override def length(element: Tree) =
ProductLike.length(c)(element)(elementData)
}
}
}
| twitter/scalding | scalding-serialization/src/main/scala/com/twitter/scalding/serialization/macros/impl/ordered_serialization/providers/CaseClassOrderedBuf.scala | Scala | apache-2.0 | 3,385 |
package score.discord.canti.discord.permissions
import net.dv8tion.jda.api.Permission
import net.dv8tion.jda.api.entities.PermissionOverride
import scala.jdk.CollectionConverters.*
import PermissionValue.*
case class PermissionAttachment(permissions: Map[Permission, PermissionValue]):
def allow(perms: Iterable[Permission]): PermissionAttachment =
copy(permissions = permissions ++ perms.map(_ -> Allow))
def deny(perms: Iterable[Permission]): PermissionAttachment =
copy(permissions = permissions ++ perms.map(_ -> Deny))
def clear(perms: Iterable[Permission]): PermissionAttachment =
copy(permissions = permissions -- perms)
def merge(other: PermissionAttachment): PermissionAttachment =
copy(permissions = permissions ++ other.permissions.filter { case (_, v) =>
v != Inherit
})
def allows: Set[Permission] =
permissions.view.collect { case (k, Allow) => k }.toSet
def denies: Set[Permission] =
permissions.view.collect { case (k, Deny) => k }.toSet
def get(perm: Permission): PermissionValue =
permissions.getOrElse(perm, Inherit)
export permissions.isEmpty
object PermissionAttachment:
def apply(ov: PermissionOverride): PermissionAttachment =
empty.allow(ov.getAllowed.asScala).deny(ov.getDenied.asScala)
val empty: PermissionAttachment =
PermissionAttachment(Map.empty.withDefaultValue(Inherit))
export empty.{allow, deny}
| ScoreUnder/canti-bot | src/main/scala/score/discord/canti/discord/permissions/PermissionAttachment.scala | Scala | agpl-3.0 | 1,412 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.s2graph.loader.spark
import java.io.Serializable
import org.apache.hadoop.hbase.util.Bytes
/**
* This is the key to be used for sorting and shuffling.
*
* We will only partition on the rowKey but we will sort on all three
*
* @param rowKey Record RowKey
* @param family Record ColumnFamily
* @param qualifier Cell Qualifier
*/
class KeyFamilyQualifier(val rowKey:Array[Byte], val family:Array[Byte], val qualifier:Array[Byte])
extends Comparable[KeyFamilyQualifier] with Serializable {
override def compareTo(o: KeyFamilyQualifier): Int = {
var result = Bytes.compareTo(rowKey, o.rowKey)
if (result == 0) {
result = Bytes.compareTo(family, o.family)
if (result == 0) result = Bytes.compareTo(qualifier, o.qualifier)
}
result
}
override def toString: String = {
Bytes.toString(rowKey) + ":" + Bytes.toString(family) + ":" + Bytes.toString(qualifier)
}
}
| jongwook/incubator-s2graph | loader/src/main/scala/org/apache/s2graph/loader/spark/KeyFamilyQualifier.scala | Scala | apache-2.0 | 1,733 |
package com.mtraina.ps
import java.io.{File, PrintWriter}
import java.util.Date
import java.util.function.Predicate
object Chapter09 {
/**
* We define a singleton to match the files in a directory following certain criteria
*/
object FileMatcher {
private def filesHere = new java.io.File(".").listFiles()
// returns all the files in the directory that ends with a specific suffix
def fileEnding(query: String) =
for(file <- filesHere; if file.getName.endsWith(query))
yield file
//returns all the files containing a specific token in their name
def filesContaining(query: String) =
for(file <- filesHere; if file.getName.contains(query))
yield file
//returns all the files matching a specific regex in their name
def filesRegex(query: String) =
for(file <- filesHere; if file.getName.matches(query))
yield file
}
/**
* Refactored file matcher where we reuse the common part of matching the file name
*/
object FileMatcherRefactored {
private def filesHere = new java.io.File(".").listFiles()
def filesMatching(query: String, matcher: (String, String) => Boolean) = {
for(file <- filesHere; if matcher(file.getName, query))
yield file
}
// the first placeholder (_) will be fulfilled with the value of "file.getName" being it the first parameter passed
// to the function matcher inside the method fileMatching
// similarly the second placeholder will get the value of "query", being it the second parameter passed
def fileEnding(query: String) = filesMatching(query, _.endsWith(_))
def fileContaining(query: String) = filesMatching(query, _.contains(_))
def fileRegex(query: String) = filesMatching(query, _.matches(_))
}
/**
* Refactored file matcher using closures: query is the free variable
*/
object FileMatcherClosure {
private def filesHere = new java.io.File(".").listFiles()
private def filesMatching(matcher: String => Boolean) =
for(file <- filesHere; if matcher(file.getName))
yield file
def fileEnding(query: String) = filesMatching(_.endsWith(query))
def fileContaining(query: String) = filesMatching(_.contains(query))
def fileRegex(query: String) = filesMatching(_.matches(query))
}
/**
* Simplify client code using high order functions
*/
def containsNeg(nums: List[Int]): Boolean = {
var exists = false
for(num <- nums)
if(num < 0)
exists = true
exists
}
containsNeg(List(1,2,3,-4)) // true
// the following method give us the same result of the one above, in a more concise way
def containsNegRefactored(nums: List[Int]) = nums.exists(_ < 0)
/**
* Currying
*/
// in this example we show a typical function where we pass the two arguments and the body of the function sums them
def plainOldSum(x: Int, y: Int) = x + y
plainOldSum(1, 2) // 3
// in the example shown below, instead, we show create a function that takes an integer as parameter and returns
// a function. Passing the second parameter to the last function yields the sum
def curriedSum(x: Int)(y: Int) = x + y
curriedSum(1)(2) // 3
/**
* Writing new control structures
*/
def twice(op: Double => Double, x: Double) = op(op(x))
twice(_ + 1, 5) // 7 (5 + 1 + 1)
// in any method invocation in which we are passing in exactly one argument, we can use curly braces in stead of parentheses
println("Hello world!")
println { "Hello world!" }
// the advantage of using curly braces is that enables client programmers to write function literals in between
def withPrintWriter(file: File, op: PrintWriter => Unit) ={
val writer = new PrintWriter(file)
try {
op(writer)
} finally {
writer.close()
}
}
withPrintWriter(new File("a.txt"),
writer => writer.println(new Date))
// the same function as above refactored using the loan pattern
def withPrintWriterRefactored(file: File)(op: PrintWriter => Unit) ={
val writer = new PrintWriter(file)
try {
op(writer)
} finally {
writer.close()
}
}
// refactoring the function in this way let us use the curly braces for passing the last parameter
withPrintWriterRefactored(new File("a.txt")){ writer =>
writer.println(new Date)
}
/**
* By-name parameters
*/
var assertionsEnabled = true
// in this case we pass a function, it won't be evaluated if assertionsEnabled is false
def myAssert(predicate: () => Boolean) =
if(assertionsEnabled && !predicate())
throw new AssertionError
// using this function is a bit awkward
myAssert(() => 5 > 3)
// the next function is equivalent to the first
def byNameAssert(predicate: => Boolean) =
if(assertionsEnabled && !predicate)
throw new AssertionError
// calling it looks more natural
byNameAssert(5 > 3)
// this last case is similar but the parameter "predicate" will be evaluate before the call to the boolAssert method
def boolAssert(predicate: Boolean) =
if(assertionsEnabled && !predicate)
throw new AssertionError
val x = 5
assertionsEnabled = false
boolAssert(x / 0 == 0) // this will produce an arithmetic exception because it will be evaluated before entering the method
byNameAssert(x / 0 == 0) // it won't throw any exception because with assertionsEnable to false, the predicate won't be evaluated
}
| mtraina/programming-in-scala | src/main/scala/com/mtraina/ps/Chapter09.scala | Scala | mit | 5,461 |
/**
* Copyright (C) 2015 Stratio (http://stratio.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.stratio.datasource.util
import scala.language.reflectiveCalls
import scala.util.Try
/**
* DSL helper for enclosing some functionality into a closeable type.
* Helper will be responsible of closing the object.
* i.e.:{{{
* import java.io._
* val writer = new PrintWriter(new File("test.txt" ))
* using(writer){ w =>
* w.append("hi!")
* }
* }}}
*/
object using {
type AutoClosable = { def close(): Unit }
def apply[A <: AutoClosable, B](resource: A)(code: A => B): B =
try {
code(resource)
}
finally {
Try(resource.close())
}
}
| pfcoperez/spark-mongodb | spark-mongodb/src/main/scala/com/stratio/datasource/util/using.scala | Scala | apache-2.0 | 1,217 |
package pl.touk.nussknacker.ui.api.helpers
import akka.http.scaladsl.model.{ContentTypes, HttpEntity, RequestEntity}
import io.circe.Encoder
import io.circe.generic.extras.semiauto.deriveConfiguredEncoder
import io.circe.syntax._
import pl.touk.nussknacker.engine.graph.EspProcess
import pl.touk.nussknacker.restmodel.displayedgraph.{DisplayableProcess, ProcessProperties}
import pl.touk.nussknacker.ui.process.ProcessService.UpdateProcessCommand
import pl.touk.nussknacker.engine.api.CirceUtil._
import pl.touk.nussknacker.ui.process.marshall.ProcessConverter
class ProcessPosting {
private implicit val ptsEncoder: Encoder[UpdateProcessCommand] = deriveConfiguredEncoder
def toRequest[T:Encoder](value: T): RequestEntity = HttpEntity(ContentTypes.`application/json`, value.asJson.spaces2)
def toEntity(process: EspProcess): RequestEntity = {
toRequest(ProcessConverter.toDisplayable(process.toCanonicalProcess, TestProcessingTypes.Streaming))
}
def toEntityAsProcessToSave(process: EspProcess, comment: String = ""): RequestEntity = {
val displayable = ProcessConverter.toDisplayable(process.toCanonicalProcess, TestProcessingTypes.Streaming)
toRequest(UpdateProcessCommand(displayable, comment = comment))
}
def toEntity(properties: ProcessProperties): RequestEntity = {
toRequest(properties)
}
def toEntity(process: DisplayableProcess): RequestEntity = {
toRequest(process)
}
def toEntity(process: UpdateProcessCommand): RequestEntity = {
toRequest(process)
}
def toEntityAsProcessToSave(process: DisplayableProcess): RequestEntity = {
toRequest(UpdateProcessCommand(process, comment = ""))
}
}
| TouK/nussknacker | ui/server/src/test/scala/pl/touk/nussknacker/ui/api/helpers/ProcessPosting.scala | Scala | apache-2.0 | 1,666 |
/**
* Copyright (C) 2009-2011 the original author or authors.
* See the notice.md file distributed with this work for additional
* information regarding copyright ownership.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.fusesource.scalate.osgi
import java.io.{ InputStream, IOException, File }
import scala.reflect.io.AbstractFile
import java.net.URL
import java.lang.String
import org.osgi.framework.{ ServiceReference, Bundle }
import collection.mutable.{ ListBuffer, LinkedHashSet }
import org.osgi.service.packageadmin.PackageAdmin
import org.fusesource.scalate.util.{ Log, Strings }
/**
* Helper methods to transform OSGi bundles into {@link AbstractFile} implementations
* suitable for use with the Scala compiler
*/
object BundleClassPathBuilder {
val log = Log(getClass); import log._
// These were removed in Scala 2.11. We still use them.
private trait AbstractFileCompatibility { this: AbstractFile =>
def lookupPath(path: String, directory: Boolean): AbstractFile = {
lookup((f, p, dir) => f.lookupName(p, dir), path, directory)
}
private def lookup(
getFile: (AbstractFile, String, Boolean) => AbstractFile,
path0: String,
directory: Boolean
): AbstractFile = {
val separator = java.io.File.separatorChar
// trim trailing '/'s
val path: String = if (path0.last == separator) path0 dropRight 1 else path0
val length = path.length()
assert(length > 0 && !(path.last == separator), path)
var file: AbstractFile = this
var start = 0
while (true) {
val index = path.indexOf(separator, start)
assert(index < 0 || start < index, ((path, directory, start, index)))
val name = path.substring(start, if (index < 0) length else index)
file = getFile(file, name, if (index < 0) directory else true)
if ((file eq null) || index < 0) return file
start = index + 1
}
file
}
}
/**
* Create a list of AbstractFile instances, representing the bundle and its wired depedencies
*/
def fromBundle(bundle: Bundle): List[AbstractFile] = {
require(bundle != null, "Bundle should not be null")
// add the bundle itself
val files = ListBuffer(create(bundle))
// also add all bundles that have exports wired to imports from this bundle
files.appendAll(fromWires(bundle))
files.toList
}
/**
* Find bundles that have exports wired to the given and bundle
*/
def fromWires(bundle: Bundle): List[AbstractFile] = {
debug("Checking OSGi bundle wiring for %s", bundle)
val context = bundle.getBundleContext
val ref: ServiceReference[_] = context.getServiceReference(classOf[PackageAdmin].getName)
if (ref == null) {
warn("PackageAdmin service is unavailable - unable to check bundle wiring information")
return List()
}
try {
var admin: PackageAdmin = context.getService(ref).asInstanceOf[PackageAdmin]
if (admin == null) {
warn("PackageAdmin service is unavailable - unable to check bundle wiring information")
return List()
}
return fromWires(admin, bundle)
} finally {
context.ungetService(ref)
}
}
def fromWires(admin: PackageAdmin, bundle: Bundle): List[AbstractFile] = {
val exported = admin.getExportedPackages(null: Bundle)
val set = new LinkedHashSet[Bundle]
for (pkg <- exported; if pkg.getExportingBundle.getBundleId != 0) {
val bundles = pkg.getImportingBundles();
if (bundles != null) {
for (b <- bundles; if b.getBundleId == bundle.getBundleId) {
debug("Bundle imports %s from %s", pkg, pkg.getExportingBundle)
if (b.getBundleId == 0) {
debug("Ignoring system bundle")
} else {
set += pkg.getExportingBundle
}
}
}
}
set.map(create(_)).toList
}
/**
* Create a new { @link AbstractFile } instance representing an
* { @link org.osgi.framework.Bundle }
*
* @param bundle the bundle
*/
def create(bundle: Bundle): AbstractFile = {
require(bundle != null, "Bundle should not be null")
abstract class BundleEntry(url: URL, parent: DirEntry) extends AbstractFile with AbstractFileCompatibility {
require(url != null, "url must not be null")
lazy val (path: String, name: String) = getPathAndName(url)
lazy val fullName: String = (path :: name :: Nil).filter(n => !Strings.isEmpty(n)).mkString("/")
/**
* @return null
*/
def file: File = null
/**
* @return last modification time or 0 if not known
*/
def lastModified: Long =
try { url.openConnection.getLastModified }
catch { case _: Exception => 0 }
@throws(classOf[IOException])
def container: AbstractFile =
valueOrElse(parent) {
throw new IOException("No container")
}
@throws(classOf[IOException])
def input: InputStream = url.openStream
/**
* Not supported. Always throws an IOException.
* @throws IOException
*/
@throws(classOf[IOException])
def output = throw new IOException("not supported: output")
private def getPathAndName(url: URL): (String, String) = {
val u = url.getPath
var k = u.length
while ((k > 0) && (u(k - 1) == '/'))
k = k - 1
var j = k
while ((j > 0) && (u(j - 1) != '/'))
j = j - 1
(u.substring(if (j > 0) 1 else 0, if (j > 1) j - 1 else j), u.substring(j, k))
}
override def toString = fullName
}
class DirEntry(url: URL, parent: DirEntry) extends BundleEntry(url, parent) {
/**
* @return true
*/
def isDirectory: Boolean = true
override def iterator: Iterator[AbstractFile] = {
new Iterator[AbstractFile]() {
val dirs = bundle.getEntryPaths(fullName)
var nextEntry = prefetch()
def hasNext = {
if (nextEntry == null)
nextEntry = prefetch()
nextEntry != null
}
def next() = {
if (hasNext) {
val entry = nextEntry
nextEntry = null
entry
} else {
throw new NoSuchElementException()
}
}
private def prefetch() = {
if (dirs.hasMoreElements) {
val entry = dirs.nextElement.asInstanceOf[String]
var entryUrl = bundle.getResource("/" + entry)
// Bundle.getResource seems to be inconsistent with respect to requiring
// a trailing slash
if (entryUrl == null)
entryUrl = bundle.getResource("/" + removeTralingSlash(entry))
// If still null OSGi wont let use load that resource for some reason
if (entryUrl == null) {
null
} else {
if (entry.endsWith(".class"))
new FileEntry(entryUrl, DirEntry.this)
else
new DirEntry(entryUrl, DirEntry.this)
}
} else
null
}
private def removeTralingSlash(s: String): String =
if (s == null || s.length == 0)
s
else if (s.last == '/')
removeTralingSlash(s.substring(0, s.length - 1))
else
s
}
}
def lookupName(name: String, directory: Boolean): AbstractFile = {
val entry = bundle.getEntry(fullName + "/" + name)
nullOrElse(entry) { entry =>
if (directory)
new DirEntry(entry, DirEntry.this)
else
new FileEntry(entry, DirEntry.this)
}
}
override def lookupPathUnchecked(path: String, directory: Boolean) = lookupPath(path, directory)
def lookupNameUnchecked(name: String, directory: Boolean) = lookupName(path, directory)
def absolute = unsupported("absolute() is unsupported")
def create = unsupported("create() is unsupported")
def delete = unsupported("create() is unsupported")
}
class FileEntry(url: URL, parent: DirEntry) extends BundleEntry(url, parent) {
/**
* @return false
*/
def isDirectory: Boolean = false
override def sizeOption: Option[Int] = Some(bundle.getEntry(fullName).openConnection().getContentLength())
def lookupName(name: String, directory: Boolean): AbstractFile = null
override def lookupPathUnchecked(path: String, directory: Boolean) = lookupPath(path, directory)
def lookupNameUnchecked(name: String, directory: Boolean) = lookupName(path, directory)
def iterator = Iterator.empty
def absolute = unsupported("absolute() is unsupported")
def create = unsupported("create() is unsupported")
def delete = unsupported("create() is unsupported")
}
new DirEntry(bundle.getResource("/"), null) {
override def toString = "AbstractFile[" + bundle + "]"
}
}
/**
* Evaluate <code>f</code> on <code>s</code> if <code>s</code> is not null.
* @param s
* @param f
* @return <code>f(s)</code> if s is not <code>null</code>, <code>null</code> otherwise.
*/
def nullOrElse[S, T](s: S)(f: S => T): T =
if (s == null) null.asInstanceOf[T]
else f(s)
/**
* @param t
* @param default
* @return <code>t</code> or <code>default</code> if <code>null</code>.
*/
def valueOrElse[T](t: T)(default: => T) =
if (t == null) default
else t
}
| maslovalex/scalate | scalate-core/src/main/scala_2.12/org/fusesource/scalate/osgi/BundleClassPathBuilder.scala | Scala | apache-2.0 | 10,103 |
package akka.codepot.engine.search.tiered.top
import java.util.Locale
import akka.actor.{ActorLogging, Props, Stash}
import akka.codepot.engine.index.Indexing
import akka.codepot.engine.search.tiered.TieredSearchProtocol
import akka.persistence.{SnapshotOffer, PersistentActor}
import akka.stream.scaladsl.{ImplicitMaterializer, Sink}
import akka.util.ByteString
import scala.collection.immutable
object ShardedPersistentFromFileTopActor {
def props() =
Props(classOf[ShardedPersistentFromFileTopActor])
final case class PrepareIndex(char: Char)
}
class ShardedPersistentFromFileTopActor extends PersistentActor with ActorLogging
with Stash
with ImplicitMaterializer
with Indexing {
import TieredSearchProtocol._
val key = self.path.name
override def persistenceId: String = key
var inMemIndex: immutable.Set[String] = Set.empty
override def preStart() = {
log.info("Started Entity Actor for key [{}]...", key)
doIndex(key)
}
override def receiveRecover: Receive = indexing("recovering") orElse ({
case SnapshotOffer(meta, index: Set[String]) =>
log.info("Recovered using snapshot.")
inMemIndex = index
}: Receive)
override def receiveCommand: Receive = indexing("indexing")
def indexing(action: String): Receive = {
case word: String =>
persist(word) { inMemIndex += _ }
case word: ByteString =>
persist(word.toString()) { inMemIndex += _ }
case IndexingCompleted =>
log.info("Finished {} for key [{}] (entries: {}), snapshotting...", action, key, inMemIndex.size)
saveSnapshot(inMemIndex)
unstashAll()
context become ready
case _ => stash()
}
def ready: Receive = {
case Search(keyword, maxResults) =>
val results = inMemIndex
.filter(_ contains keyword).take(maxResults).toList
log.info("Search for: [{}], resulted in [{}] results on [{}]", keyword, results.size, key)
sender() ! SearchResults(results)
}
private def doIndex(part: String): Unit =
wikipediaCachedKeywordsSource
.map(_.utf8String)
.filter(_.toLowerCase(Locale.ROOT) contains part)
.runWith(Sink.actorRef(self, onCompleteMessage = IndexingCompleted))
}
| LIttleAncientForestKami/akka-codepot-workshop | src/main/scala/akka/codepot/engine/search/tiered/top/ShardedPersistentFromFileTopActor.scala | Scala | apache-2.0 | 2,209 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.factories
import org.apache.flink.table.api.{BatchTableEnvironment, StreamTableEnvironment, TableEnvironment, TableException}
import org.apache.flink.table.descriptors.Descriptor
import org.apache.flink.table.sinks.TableSink
import org.apache.flink.table.sources.TableSource
/**
* Utility for dealing with [[TableFactory]] using the [[TableFactoryService]].
*/
object TableFactoryUtil {
/**
* Returns a table source for a table environment.
*/
def findAndCreateTableSource[T](
tableEnvironment: TableEnvironment,
descriptor: Descriptor)
: TableSource[T] = {
val javaMap = descriptor.toProperties
tableEnvironment match {
case _: BatchTableEnvironment =>
TableFactoryService
.find(classOf[BatchTableSourceFactory[T]], javaMap)
.createBatchTableSource(javaMap)
case _: StreamTableEnvironment =>
TableFactoryService
.find(classOf[StreamTableSourceFactory[T]], javaMap)
.createStreamTableSource(javaMap)
case e@_ =>
throw new TableException(s"Unsupported table environment: ${e.getClass.getName}")
}
}
/**
* Returns a table sink for a table environment.
*/
def findAndCreateTableSink[T](
tableEnvironment: TableEnvironment,
descriptor: Descriptor)
: TableSink[T] = {
val javaMap = descriptor.toProperties
tableEnvironment match {
case _: BatchTableEnvironment =>
TableFactoryService
.find(classOf[BatchTableSinkFactory[T]], javaMap)
.createBatchTableSink(javaMap)
case _: StreamTableEnvironment =>
TableFactoryService
.find(classOf[StreamTableSinkFactory[T]], javaMap)
.createStreamTableSink(javaMap)
case e@_ =>
throw new TableException(s"Unsupported table environment: ${e.getClass.getName}")
}
}
}
| ueshin/apache-flink | flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/factories/TableFactoryUtil.scala | Scala | apache-2.0 | 2,699 |
package org.workcraft.graphics
import java.awt.geom.AffineTransform
import java.awt.geom.Point2D
import java.awt.geom.Rectangle2D
import java.awt.geom.Path2D
import java.awt.BasicStroke
import java.awt.Color
import org.workcraft.graphics.Java2DDecoration._
import org.workcraft.graphics.Graphics.HorizontalAlignment
import org.workcraft.graphics.Graphics.VerticalAlignment
import org.workcraft.graphics.Graphics._
import java.awt.Stroke
import java.awt.Font
import org.workcraft.gui.CommonVisualSettings
import scalaz._
import Scalaz._
package object stg {
type RichGraphicalContent = CommonVisualSettings => NotSoRichGraphicalContent
object RichGraphicalContent {
implicit def decorateRGC(rgc : RichGraphicalContent) = new {
def zeroCentered : RichGraphicalContent = rgc.map(_.zeroCentered)
}
}
case class NotSoRichGraphicalContent(val bcgc: BoundedColorisableGraphicalContent, val touchable: TouchableC) {
def zeroCentered = align(new Rectangle2D.Double(0,0,0,0), HorizontalAlignment.Center,VerticalAlignment.Center).
overrideCenter(new Point2D.Double(0,0))
def translate(p : Point2D.Double) = transform(AffineTransform.getTranslateInstance(p.getX(), p.getY()))
def align (to: Rectangle2D.Double, horizontalAlignment: HorizontalAlignment, verticalAlignment: VerticalAlignment): NotSoRichGraphicalContent =
transform(alignTransform(touchable.touchable.boundingBox.rect, to, horizontalAlignment, verticalAlignment))
def align (to: NotSoRichGraphicalContent, horizontalAlignment: HorizontalAlignment, verticalAlignment: VerticalAlignment): NotSoRichGraphicalContent =
align(to.touchable.touchable.boundingBox.rect, horizontalAlignment, verticalAlignment)
def transform(x: AffineTransform): NotSoRichGraphicalContent =
new NotSoRichGraphicalContent(bcgc.transform(x), touchable.transform(x))
def overrideCenter(center : Point2D.Double) : NotSoRichGraphicalContent = {
copy(touchable = touchable.copy(center = center))
}
def over(bot : BoundedColorisableGraphicalContent) : NotSoRichGraphicalContent = copy (bcgc = bot.compose(bcgc))
def under(top : BoundedColorisableGraphicalContent) : NotSoRichGraphicalContent = copy (bcgc = bcgc.compose(top))
def under(top : Option[BoundedColorisableGraphicalContent]) : NotSoRichGraphicalContent = copy (bcgc = top match {
case None => bcgc
case Some(top) => bcgc compose top
})
//def over (x: NotSoRichGraphicalContent, touchableOverride: Touchable) = debugOver (x, touchableOverride) */
}
object NotSoRichGraphicalContent {
def rectangle(width: Double, height: Double, stroke: Option[(Stroke, Color)], fill: Option[Color]) = {
val rect = Graphics.rectangle(width, height, stroke, fill)
NotSoRichGraphicalContent(rect.boundedColorisableGraphicalContent, TouchableC(rect.touchable, new Point2D.Double(0,0)))
}
def label(text : String, font : Font, color : Color) : NotSoRichGraphicalContent = {
val lbl = Graphics.label(text, font, color)
NotSoRichGraphicalContent(lbl.boundedColorisableGraphicalContent, TouchableC(lbl.touchable, lbl.visualBounds.center))
}
def circle(diameter: Double, stroke: Option[(Stroke, Color)], fill: Option[Color]) = {
val circ = Graphics.circle(diameter, stroke, fill)
NotSoRichGraphicalContent(circ.boundedColorisableGraphicalContent, TouchableC(circ.touchable, new Point2D.Double(0,0)))
}
}
/*
def translate(tx: Double, ty: Double): NotSoRichGraphicalContent = transform(AffineTransform.getTranslateInstance(tx, ty))
def translate(position: Point2D): NotSoRichGraphicalContent = transform(AffineTransform.getTranslateInstance(position.getX, position.getY))
def compose(b: NotSoRichGraphicalContent): NotSoRichGraphicalContent =
new NotSoRichGraphicalContent(Graphics.compose(colorisableGraphicalContent, b.colorisableGraphicalContent),
visualBounds.createUnionD(b.visualBounds),
TouchableUtil.compose(touchable, b.touchable))
def compose(b: NotSoRichGraphicalContent, touchableOverride: Touchable): NotSoRichGraphicalContent =
new NotSoRichGraphicalContent(Graphics.compose(colorisableGraphicalContent, b.colorisableGraphicalContent),
visualBounds.createUnionD(b.visualBounds),
touchableOverride)
def alignSideways (relativeTo: NotSoRichGraphicalContent, position: LabelPositioning): NotSoRichGraphicalContent =
transform(LabelPositioning.positionRelative(touchable.getBoundingBox, relativeTo.touchable.getBoundingBox, position))
private def releaseOver(x: NotSoRichGraphicalContent) = compose(x, this)
private def releaseOver(x: NotSoRichGraphicalContent, touchableOverride: Touchable) = x.compose(this, touchableOverride)
private def debugOver (x: NotSoRichGraphicalContent) = {
val redstroke = new BasicStroke(0.01f, BasicStroke.CAP_BUTT, BasicStroke.JOIN_MITER, 1.0f, Array(0.18f, 0.18f), 0.0f)
val stroke = new BasicStroke (0.01f)
((shape (x.touchable.getBoundingBox, Some((redstroke,Color.RED)), None)) releaseOver
(shape (x.visualBounds, Some((stroke,Color.BLUE)), None)) releaseOver
(shape (touchable.getBoundingBox, Some((redstroke,Color.RED)), None)) releaseOver
(shape (visualBounds, Some((stroke,Color.BLUE)), None)) releaseOver
x).compose(this, TouchableUtil.compose(touchable, x.touchable))
}
private def debugOver (x: NotSoRichGraphicalContent, touchableOverride: Touchable) = {
val redstroke = new BasicStroke(0.01f, BasicStroke.CAP_BUTT, BasicStroke.JOIN_MITER, 1.0f, Array(0.18f, 0.18f), 0.0f)
val stroke = new BasicStroke (0.01f)
((shape (x.touchable.getBoundingBox, Some((redstroke,Color.RED)), None)) releaseOver
(shape (x.visualBounds, Some((stroke,Color.BLUE)), None)) releaseOver
(shape (touchable.getBoundingBox, Some((redstroke,Color.RED)), None)) releaseOver
(shape (visualBounds, Some((stroke,Color.BLUE)), None)) releaseOver
x).compose(this, touchableOverride)
}
*/
} | tuura/workcraft-2.2 | STGPlugin21/src/main/scala/org/workcraft/graphics/RichGraphicalContent.scala | Scala | gpl-3.0 | 5,941 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib.clustering
import scala.util.Random
import org.apache.spark.SparkFunSuite
import org.apache.spark.mllib.linalg.Vectors
import org.apache.spark.mllib.util.MLlibTestSparkContext
import org.apache.spark.mllib.util.TestingUtils._
class DistanceMeasureSuite extends SparkFunSuite with MLlibTestSparkContext {
private val seed = 42
private val k = 10
private val dim = 8
private var centers: Array[VectorWithNorm] = _
private var data: Array[VectorWithNorm] = _
override def beforeAll(): Unit = {
super.beforeAll()
val rng = new Random(seed)
centers = Array.tabulate(k) { i =>
val values = Array.fill(dim)(rng.nextGaussian)
new VectorWithNorm(Vectors.dense(values))
}
data = Array.tabulate(1000) { i =>
val values = Array.fill(dim)(rng.nextGaussian)
new VectorWithNorm(Vectors.dense(values))
}
}
test("predict with statistics") {
Seq(DistanceMeasure.COSINE, DistanceMeasure.EUCLIDEAN).foreach { distanceMeasure =>
val distance = DistanceMeasure.decodeFromString(distanceMeasure)
val statistics = distance.computeStatistics(centers)
data.foreach { point =>
val (index1, cost1) = distance.findClosest(centers, point)
val (index2, cost2) = distance.findClosest(centers, statistics, point)
assert(index1 == index2)
assert(cost1 ~== cost2 relTol 1E-10)
}
}
}
test("compute statistics distributedly") {
Seq(DistanceMeasure.COSINE, DistanceMeasure.EUCLIDEAN).foreach { distanceMeasure =>
val distance = DistanceMeasure.decodeFromString(distanceMeasure)
val statistics1 = distance.computeStatistics(centers)
val sc = spark.sparkContext
val bcCenters = sc.broadcast(centers)
val statistics2 = distance.computeStatisticsDistributedly(sc, bcCenters)
bcCenters.destroy()
assert(Vectors.dense(statistics1) ~== Vectors.dense(statistics2) relTol 1E-10)
}
}
}
| shaneknapp/spark | mllib/src/test/scala/org/apache/spark/mllib/clustering/DistanceMeasureSuite.scala | Scala | apache-2.0 | 2,767 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.expressions
import java.util.Locale
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.analysis.{FunctionRegistry, TypeCheckResult, TypeCoercion}
import org.apache.spark.sql.catalyst.expressions.aggregate.DeclarativeAggregate
import org.apache.spark.sql.catalyst.expressions.codegen._
import org.apache.spark.sql.catalyst.expressions.codegen.Block._
import org.apache.spark.sql.catalyst.trees.{BinaryLike, LeafLike, QuaternaryLike, TernaryLike, TreeNode, UnaryLike}
import org.apache.spark.sql.catalyst.trees.TreePattern.{RUNTIME_REPLACEABLE, TreePattern}
import org.apache.spark.sql.catalyst.util.truncatedString
import org.apache.spark.sql.errors.QueryExecutionErrors
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types._
////////////////////////////////////////////////////////////////////////////////////////////////////
// This file defines the basic expression abstract classes in Catalyst.
////////////////////////////////////////////////////////////////////////////////////////////////////
/**
* An expression in Catalyst.
*
* If an expression wants to be exposed in the function registry (so users can call it with
* "name(arguments...)", the concrete implementation must be a case class whose constructor
* arguments are all Expressions types. See [[Substring]] for an example.
*
* There are a few important traits or abstract classes:
*
* - [[Nondeterministic]]: an expression that is not deterministic.
* - [[Stateful]]: an expression that contains mutable state. For example, MonotonicallyIncreasingID
* and Rand. A stateful expression is always non-deterministic.
* - [[Unevaluable]]: an expression that is not supposed to be evaluated.
* - [[CodegenFallback]]: an expression that does not have code gen implemented and falls back to
* interpreted mode.
* - [[NullIntolerant]]: an expression that is null intolerant (i.e. any null input will result in
* null output).
* - [[NonSQLExpression]]: a common base trait for the expressions that do not have SQL
* expressions like representation. For example, `ScalaUDF`, `ScalaUDAF`,
* and object `MapObjects` and `Invoke`.
* - [[UserDefinedExpression]]: a common base trait for user-defined functions, including
* UDF/UDAF/UDTF.
* - [[HigherOrderFunction]]: a common base trait for higher order functions that take one or more
* (lambda) functions and applies these to some objects. The function
* produces a number of variables which can be consumed by some lambda
* functions.
* - [[NamedExpression]]: An [[Expression]] that is named.
* - [[TimeZoneAwareExpression]]: A common base trait for time zone aware expressions.
* - [[SubqueryExpression]]: A base interface for expressions that contain a
* [[org.apache.spark.sql.catalyst.plans.logical.LogicalPlan]].
*
* - [[LeafExpression]]: an expression that has no child.
* - [[UnaryExpression]]: an expression that has one child.
* - [[BinaryExpression]]: an expression that has two children.
* - [[TernaryExpression]]: an expression that has three children.
* - [[QuaternaryExpression]]: an expression that has four children.
* - [[BinaryOperator]]: a special case of [[BinaryExpression]] that requires two children to have
* the same output data type.
*
* A few important traits used for type coercion rules:
* - [[ExpectsInputTypes]]: an expression that has the expected input types. This trait is typically
* used by operator expressions (e.g. [[Add]], [[Subtract]]) to define
* expected input types without any implicit casting.
* - [[ImplicitCastInputTypes]]: an expression that has the expected input types, which can be
* implicitly castable using [[TypeCoercion.ImplicitTypeCasts]].
* - [[ComplexTypeMergingExpression]]: to resolve output types of the complex expressions
* (e.g., [[CaseWhen]]).
*/
abstract class Expression extends TreeNode[Expression] {
/**
* Returns true when an expression is a candidate for static evaluation before the query is
* executed. A typical use case: [[org.apache.spark.sql.catalyst.optimizer.ConstantFolding]]
*
* The following conditions are used to determine suitability for constant folding:
* - A [[Coalesce]] is foldable if all of its children are foldable
* - A [[BinaryExpression]] is foldable if its both left and right child are foldable
* - A [[Not]], [[IsNull]], or [[IsNotNull]] is foldable if its child is foldable
* - A [[Literal]] is foldable
* - A [[Cast]] or [[UnaryMinus]] is foldable if its child is foldable
*/
def foldable: Boolean = false
/**
* Returns true when the current expression always return the same result for fixed inputs from
* children. The non-deterministic expressions should not change in number and order. They should
* not be evaluated during the query planning.
*
* Note that this means that an expression should be considered as non-deterministic if:
* - it relies on some mutable internal state, or
* - it relies on some implicit input that is not part of the children expression list.
* - it has non-deterministic child or children.
* - it assumes the input satisfies some certain condition via the child operator.
*
* An example would be `SparkPartitionID` that relies on the partition id returned by TaskContext.
* By default leaf expressions are deterministic as Nil.forall(_.deterministic) returns true.
*/
lazy val deterministic: Boolean = children.forall(_.deterministic)
def nullable: Boolean
/**
* Workaround scala compiler so that we can call super on lazy vals
*/
@transient
private lazy val _references: AttributeSet =
AttributeSet.fromAttributeSets(children.map(_.references))
def references: AttributeSet = _references
/** Returns the result of evaluating this expression on a given input Row */
def eval(input: InternalRow = null): Any
/**
* Returns an [[ExprCode]], that contains the Java source code to generate the result of
* evaluating the expression on an input row.
*
* @param ctx a [[CodegenContext]]
* @return [[ExprCode]]
*/
def genCode(ctx: CodegenContext): ExprCode = {
ctx.subExprEliminationExprs.get(this).map { subExprState =>
// This expression is repeated which means that the code to evaluate it has already been added
// as a function before. In that case, we just re-use it.
ExprCode(
ctx.registerComment(this.toString),
subExprState.eval.isNull,
subExprState.eval.value)
}.getOrElse {
val isNull = ctx.freshName("isNull")
val value = ctx.freshName("value")
val eval = doGenCode(ctx, ExprCode(
JavaCode.isNullVariable(isNull),
JavaCode.variable(value, dataType)))
reduceCodeSize(ctx, eval)
if (eval.code.toString.nonEmpty) {
// Add `this` in the comment.
eval.copy(code = ctx.registerComment(this.toString) + eval.code)
} else {
eval
}
}
}
private def reduceCodeSize(ctx: CodegenContext, eval: ExprCode): Unit = {
// TODO: support whole stage codegen too
val splitThreshold = SQLConf.get.methodSplitThreshold
if (eval.code.length > splitThreshold && ctx.INPUT_ROW != null && ctx.currentVars == null) {
val setIsNull = if (!eval.isNull.isInstanceOf[LiteralValue]) {
val globalIsNull = ctx.addMutableState(CodeGenerator.JAVA_BOOLEAN, "globalIsNull")
val localIsNull = eval.isNull
eval.isNull = JavaCode.isNullGlobal(globalIsNull)
s"$globalIsNull = $localIsNull;"
} else {
""
}
val javaType = CodeGenerator.javaType(dataType)
val newValue = ctx.freshName("value")
val funcName = ctx.freshName(nodeName)
val funcFullName = ctx.addNewFunction(funcName,
s"""
|private $javaType $funcName(InternalRow ${ctx.INPUT_ROW}) {
| ${eval.code}
| $setIsNull
| return ${eval.value};
|}
""".stripMargin)
eval.value = JavaCode.variable(newValue, dataType)
eval.code = code"$javaType $newValue = $funcFullName(${ctx.INPUT_ROW});"
}
}
/**
* Returns Java source code that can be compiled to evaluate this expression.
* The default behavior is to call the eval method of the expression. Concrete expression
* implementations should override this to do actual code generation.
*
* @param ctx a [[CodegenContext]]
* @param ev an [[ExprCode]] with unique terms.
* @return an [[ExprCode]] containing the Java source code to generate the given expression
*/
protected def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode
/**
* Returns `true` if this expression and all its children have been resolved to a specific schema
* and input data types checking passed, and `false` if it still contains any unresolved
* placeholders or has data types mismatch.
* Implementations of expressions should override this if the resolution of this type of
* expression involves more than just the resolution of its children and type checking.
*/
lazy val resolved: Boolean = childrenResolved && checkInputDataTypes().isSuccess
/**
* Returns the [[DataType]] of the result of evaluating this expression. It is
* invalid to query the dataType of an unresolved expression (i.e., when `resolved` == false).
*/
def dataType: DataType
/**
* Returns true if all the children of this expression have been resolved to a specific schema
* and false if any still contains any unresolved placeholders.
*/
def childrenResolved: Boolean = children.forall(_.resolved)
/**
* Returns an expression where a best effort attempt has been made to transform `this` in a way
* that preserves the result but removes cosmetic variations (case sensitivity, ordering for
* commutative operations, etc.) See [[Canonicalize]] for more details.
*
* `deterministic` expressions where `this.canonicalized == other.canonicalized` will always
* evaluate to the same result.
*/
lazy val canonicalized: Expression = {
val canonicalizedChildren = children.map(_.canonicalized)
Canonicalize.execute(withNewChildren(canonicalizedChildren))
}
/**
* Returns true when two expressions will always compute the same result, even if they differ
* cosmetically (i.e. capitalization of names in attributes may be different).
*
* See [[Canonicalize]] for more details.
*/
final def semanticEquals(other: Expression): Boolean =
deterministic && other.deterministic && canonicalized == other.canonicalized
/**
* Returns a `hashCode` for the calculation performed by this expression. Unlike the standard
* `hashCode`, an attempt has been made to eliminate cosmetic differences.
*
* See [[Canonicalize]] for more details.
*/
def semanticHash(): Int = canonicalized.hashCode()
/**
* Checks the input data types, returns `TypeCheckResult.success` if it's valid,
* or returns a `TypeCheckResult` with an error message if invalid.
* Note: it's not valid to call this method until `childrenResolved == true`.
*/
def checkInputDataTypes(): TypeCheckResult = TypeCheckResult.TypeCheckSuccess
/**
* Returns a user-facing string representation of this expression's name.
* This should usually match the name of the function in SQL.
*/
def prettyName: String =
getTagValue(FunctionRegistry.FUNC_ALIAS).getOrElse(nodeName.toLowerCase(Locale.ROOT))
protected def flatArguments: Iterator[Any] = stringArgs.flatMap {
case t: Iterable[_] => t
case single => single :: Nil
}
// Marks this as final, Expression.verboseString should never be called, and thus shouldn't be
// overridden by concrete classes.
final override def verboseString(maxFields: Int): String = simpleString(maxFields)
override def simpleString(maxFields: Int): String = toString
override def toString: String = prettyName + truncatedString(
flatArguments.toSeq, "(", ", ", ")", SQLConf.get.maxToStringFields)
/**
* Returns SQL representation of this expression. For expressions extending [[NonSQLExpression]],
* this method may return an arbitrary user facing string.
*/
def sql: String = {
val childrenSQL = children.map(_.sql).mkString(", ")
s"$prettyName($childrenSQL)"
}
override def simpleStringWithNodeId(): String = {
throw QueryExecutionErrors.simpleStringWithNodeIdUnsupportedError(nodeName)
}
}
/**
* An expression that cannot be evaluated. These expressions don't live past analysis or
* optimization time (e.g. Star) and should not be evaluated during query planning and
* execution.
*/
trait Unevaluable extends Expression {
/** Unevaluable is not foldable because we don't have an eval for it. */
final override def foldable: Boolean = false
final override def eval(input: InternalRow = null): Any =
throw QueryExecutionErrors.cannotEvaluateExpressionError(this)
final override protected def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode =
throw QueryExecutionErrors.cannotGenerateCodeForExpressionError(this)
}
/**
* An expression that gets replaced at runtime (currently by the optimizer) into a different
* expression for evaluation. This is mainly used to provide compatibility with other databases.
* For example, we use this to support "nvl" by replacing it with "coalesce".
*
* A RuntimeReplaceable should have the original parameters along with a "child" expression in the
* case class constructor, and define a normal constructor that accepts only the original
* parameters. For an example, see [[Nvl]]. To make sure the explain plan and expression SQL
* works correctly, the implementation should also override flatArguments method and sql method.
*/
trait RuntimeReplaceable extends UnaryExpression with Unevaluable {
override def nullable: Boolean = child.nullable
override def dataType: DataType = child.dataType
// As this expression gets replaced at optimization with its `child" expression,
// two `RuntimeReplaceable` are considered to be semantically equal if their "child" expressions
// are semantically equal.
override lazy val canonicalized: Expression = child.canonicalized
/**
* Only used to generate SQL representation of this expression.
*
* Implementations should override this with original parameters
*/
def exprsReplaced: Seq[Expression]
override def sql: String = mkString(exprsReplaced.map(_.sql))
final override val nodePatterns: Seq[TreePattern] = Seq(RUNTIME_REPLACEABLE)
def mkString(childrenString: Seq[String]): String = {
prettyName + childrenString.mkString("(", ", ", ")")
}
}
/**
* An aggregate expression that gets rewritten (currently by the optimizer) into a
* different aggregate expression for evaluation. This is mainly used to provide compatibility
* with other databases. For example, we use this to support every, any/some aggregates by rewriting
* them with Min and Max respectively.
*/
trait UnevaluableAggregate extends DeclarativeAggregate {
override def nullable: Boolean = true
override lazy val aggBufferAttributes =
throw QueryExecutionErrors.evaluateUnevaluableAggregateUnsupportedError(
"aggBufferAttributes", this)
override lazy val initialValues: Seq[Expression] =
throw QueryExecutionErrors.evaluateUnevaluableAggregateUnsupportedError(
"initialValues", this)
override lazy val updateExpressions: Seq[Expression] =
throw QueryExecutionErrors.evaluateUnevaluableAggregateUnsupportedError(
"updateExpressions", this)
override lazy val mergeExpressions: Seq[Expression] =
throw QueryExecutionErrors.evaluateUnevaluableAggregateUnsupportedError(
"mergeExpressions", this)
override lazy val evaluateExpression: Expression =
throw QueryExecutionErrors.evaluateUnevaluableAggregateUnsupportedError(
"evaluateExpression", this)
}
/**
* Expressions that don't have SQL representation should extend this trait. Examples are
* `ScalaUDF`, `ScalaUDAF`, and object expressions like `MapObjects` and `Invoke`.
*/
trait NonSQLExpression extends Expression {
final override def sql: String = {
transform {
case a: Attribute => new PrettyAttribute(a)
case a: Alias => PrettyAttribute(a.sql, a.dataType)
}.toString
}
}
/**
* An expression that is nondeterministic.
*/
trait Nondeterministic extends Expression {
final override lazy val deterministic: Boolean = false
final override def foldable: Boolean = false
@transient
private[this] var initialized = false
/**
* Initializes internal states given the current partition index and mark this as initialized.
* Subclasses should override [[initializeInternal()]].
*/
final def initialize(partitionIndex: Int): Unit = {
initializeInternal(partitionIndex)
initialized = true
}
protected def initializeInternal(partitionIndex: Int): Unit
/**
* @inheritdoc
* Throws an exception if [[initialize()]] is not called yet.
* Subclasses should override [[evalInternal()]].
*/
final override def eval(input: InternalRow = null): Any = {
require(initialized,
s"Nondeterministic expression ${this.getClass.getName} should be initialized before eval.")
evalInternal(input)
}
protected def evalInternal(input: InternalRow): Any
}
/**
* An expression that contains mutable state. A stateful expression is always non-deterministic
* because the results it produces during evaluation are not only dependent on the given input
* but also on its internal state.
*
* The state of the expressions is generally not exposed in the parameter list and this makes
* comparing stateful expressions problematic because similar stateful expressions (with the same
* parameter list) but with different internal state will be considered equal. This is especially
* problematic during tree transformations. In order to counter this the `fastEquals` method for
* stateful expressions only returns `true` for the same reference.
*
* A stateful expression should never be evaluated multiple times for a single row. This should
* only be a problem for interpreted execution. This can be prevented by creating fresh copies
* of the stateful expression before execution, these can be made using the `freshCopy` function.
*/
trait Stateful extends Nondeterministic {
/**
* Return a fresh uninitialized copy of the stateful expression.
*/
def freshCopy(): Stateful
/**
* Only the same reference is considered equal.
*/
override def fastEquals(other: TreeNode[_]): Boolean = this eq other
}
/**
* A leaf expression, i.e. one without any child expressions.
*/
abstract class LeafExpression extends Expression with LeafLike[Expression]
/**
* An expression with one input and one output. The output is by default evaluated to null
* if the input is evaluated to null.
*/
abstract class UnaryExpression extends Expression with UnaryLike[Expression] {
override def foldable: Boolean = child.foldable
override def nullable: Boolean = child.nullable
/**
* Default behavior of evaluation according to the default nullability of UnaryExpression.
* If subclass of UnaryExpression override nullable, probably should also override this.
*/
override def eval(input: InternalRow): Any = {
val value = child.eval(input)
if (value == null) {
null
} else {
nullSafeEval(value)
}
}
/**
* Called by default [[eval]] implementation. If subclass of UnaryExpression keep the default
* nullability, they can override this method to save null-check code. If we need full control
* of evaluation process, we should override [[eval]].
*/
protected def nullSafeEval(input: Any): Any =
throw QueryExecutionErrors.notOverrideExpectedMethodsError("UnaryExpressions",
"eval", "nullSafeEval")
/**
* Called by unary expressions to generate a code block that returns null if its parent returns
* null, and if not null, use `f` to generate the expression.
*
* As an example, the following does a boolean inversion (i.e. NOT).
* {{{
* defineCodeGen(ctx, ev, c => s"!($c)")
* }}}
*
* @param f function that accepts a variable name and returns Java code to compute the output.
*/
protected def defineCodeGen(
ctx: CodegenContext,
ev: ExprCode,
f: String => String): ExprCode = {
nullSafeCodeGen(ctx, ev, eval => {
s"${ev.value} = ${f(eval)};"
})
}
/**
* Called by unary expressions to generate a code block that returns null if its parent returns
* null, and if not null, use `f` to generate the expression.
*
* @param f function that accepts the non-null evaluation result name of child and returns Java
* code to compute the output.
*/
protected def nullSafeCodeGen(
ctx: CodegenContext,
ev: ExprCode,
f: String => String): ExprCode = {
val childGen = child.genCode(ctx)
val resultCode = f(childGen.value)
if (nullable) {
val nullSafeEval = ctx.nullSafeExec(child.nullable, childGen.isNull)(resultCode)
ev.copy(code = code"""
${childGen.code}
boolean ${ev.isNull} = ${childGen.isNull};
${CodeGenerator.javaType(dataType)} ${ev.value} = ${CodeGenerator.defaultValue(dataType)};
$nullSafeEval
""")
} else {
ev.copy(code = code"""
${childGen.code}
${CodeGenerator.javaType(dataType)} ${ev.value} = ${CodeGenerator.defaultValue(dataType)};
$resultCode""", isNull = FalseLiteral)
}
}
}
object UnaryExpression {
def unapply(e: UnaryExpression): Option[Expression] = Some(e.child)
}
/**
* An expression with two inputs and one output. The output is by default evaluated to null
* if any input is evaluated to null.
*/
abstract class BinaryExpression extends Expression with BinaryLike[Expression] {
override def foldable: Boolean = left.foldable && right.foldable
override def nullable: Boolean = left.nullable || right.nullable
/**
* Default behavior of evaluation according to the default nullability of BinaryExpression.
* If subclass of BinaryExpression override nullable, probably should also override this.
*/
override def eval(input: InternalRow): Any = {
val value1 = left.eval(input)
if (value1 == null) {
null
} else {
val value2 = right.eval(input)
if (value2 == null) {
null
} else {
nullSafeEval(value1, value2)
}
}
}
/**
* Called by default [[eval]] implementation. If subclass of BinaryExpression keep the default
* nullability, they can override this method to save null-check code. If we need full control
* of evaluation process, we should override [[eval]].
*/
protected def nullSafeEval(input1: Any, input2: Any): Any =
throw QueryExecutionErrors.notOverrideExpectedMethodsError("BinaryExpressions",
"eval", "nullSafeEval")
/**
* Short hand for generating binary evaluation code.
* If either of the sub-expressions is null, the result of this computation
* is assumed to be null.
*
* @param f accepts two variable names and returns Java code to compute the output.
*/
protected def defineCodeGen(
ctx: CodegenContext,
ev: ExprCode,
f: (String, String) => String): ExprCode = {
nullSafeCodeGen(ctx, ev, (eval1, eval2) => {
s"${ev.value} = ${f(eval1, eval2)};"
})
}
/**
* Short hand for generating binary evaluation code.
* If either of the sub-expressions is null, the result of this computation
* is assumed to be null.
*
* @param f function that accepts the 2 non-null evaluation result names of children
* and returns Java code to compute the output.
*/
protected def nullSafeCodeGen(
ctx: CodegenContext,
ev: ExprCode,
f: (String, String) => String): ExprCode = {
val leftGen = left.genCode(ctx)
val rightGen = right.genCode(ctx)
val resultCode = f(leftGen.value, rightGen.value)
if (nullable) {
val nullSafeEval =
leftGen.code + ctx.nullSafeExec(left.nullable, leftGen.isNull) {
rightGen.code + ctx.nullSafeExec(right.nullable, rightGen.isNull) {
s"""
${ev.isNull} = false; // resultCode could change nullability.
$resultCode
"""
}
}
ev.copy(code = code"""
boolean ${ev.isNull} = true;
${CodeGenerator.javaType(dataType)} ${ev.value} = ${CodeGenerator.defaultValue(dataType)};
$nullSafeEval
""")
} else {
ev.copy(code = code"""
${leftGen.code}
${rightGen.code}
${CodeGenerator.javaType(dataType)} ${ev.value} = ${CodeGenerator.defaultValue(dataType)};
$resultCode""", isNull = FalseLiteral)
}
}
}
object BinaryExpression {
def unapply(e: BinaryExpression): Option[(Expression, Expression)] = Some((e.left, e.right))
}
/**
* A [[BinaryExpression]] that is an operator, with two properties:
*
* 1. The string representation is "x symbol y", rather than "funcName(x, y)".
* 2. Two inputs are expected to be of the same type. If the two inputs have different types,
* the analyzer will find the tightest common type and do the proper type casting.
*/
abstract class BinaryOperator extends BinaryExpression with ExpectsInputTypes {
/**
* Expected input type from both left/right child expressions, similar to the
* [[ImplicitCastInputTypes]] trait.
*/
def inputType: AbstractDataType
def symbol: String
def sqlOperator: String = symbol
override def toString: String = s"($left $sqlOperator $right)"
override def inputTypes: Seq[AbstractDataType] = Seq(inputType, inputType)
override def checkInputDataTypes(): TypeCheckResult = {
// First check whether left and right have the same type, then check if the type is acceptable.
if (!left.dataType.sameType(right.dataType)) {
TypeCheckResult.TypeCheckFailure(s"differing types in '$sql' " +
s"(${left.dataType.catalogString} and ${right.dataType.catalogString}).")
} else if (!inputType.acceptsType(left.dataType)) {
TypeCheckResult.TypeCheckFailure(s"'$sql' requires ${inputType.simpleString} type," +
s" not ${left.dataType.catalogString}")
} else {
TypeCheckResult.TypeCheckSuccess
}
}
override def sql: String = s"(${left.sql} $sqlOperator ${right.sql})"
}
object BinaryOperator {
def unapply(e: BinaryOperator): Option[(Expression, Expression)] = Some((e.left, e.right))
}
/**
* An expression with three inputs and one output. The output is by default evaluated to null
* if any input is evaluated to null.
*/
abstract class TernaryExpression extends Expression with TernaryLike[Expression] {
override def foldable: Boolean = children.forall(_.foldable)
override def nullable: Boolean = children.exists(_.nullable)
/**
* Default behavior of evaluation according to the default nullability of TernaryExpression.
* If subclass of TernaryExpression override nullable, probably should also override this.
*/
override def eval(input: InternalRow): Any = {
val value1 = first.eval(input)
if (value1 != null) {
val value2 = second.eval(input)
if (value2 != null) {
val value3 = third.eval(input)
if (value3 != null) {
return nullSafeEval(value1, value2, value3)
}
}
}
null
}
/**
* Called by default [[eval]] implementation. If subclass of TernaryExpression keep the default
* nullability, they can override this method to save null-check code. If we need full control
* of evaluation process, we should override [[eval]].
*/
protected def nullSafeEval(input1: Any, input2: Any, input3: Any): Any =
throw QueryExecutionErrors.notOverrideExpectedMethodsError("TernaryExpressions",
"eval", "nullSafeEval")
/**
* Short hand for generating ternary evaluation code.
* If either of the sub-expressions is null, the result of this computation
* is assumed to be null.
*
* @param f accepts three variable names and returns Java code to compute the output.
*/
protected def defineCodeGen(
ctx: CodegenContext,
ev: ExprCode,
f: (String, String, String) => String): ExprCode = {
nullSafeCodeGen(ctx, ev, (eval1, eval2, eval3) => {
s"${ev.value} = ${f(eval1, eval2, eval3)};"
})
}
/**
* Short hand for generating ternary evaluation code.
* If either of the sub-expressions is null, the result of this computation
* is assumed to be null.
*
* @param f function that accepts the 3 non-null evaluation result names of children
* and returns Java code to compute the output.
*/
protected def nullSafeCodeGen(
ctx: CodegenContext,
ev: ExprCode,
f: (String, String, String) => String): ExprCode = {
val leftGen = children(0).genCode(ctx)
val midGen = children(1).genCode(ctx)
val rightGen = children(2).genCode(ctx)
val resultCode = f(leftGen.value, midGen.value, rightGen.value)
if (nullable) {
val nullSafeEval =
leftGen.code + ctx.nullSafeExec(children(0).nullable, leftGen.isNull) {
midGen.code + ctx.nullSafeExec(children(1).nullable, midGen.isNull) {
rightGen.code + ctx.nullSafeExec(children(2).nullable, rightGen.isNull) {
s"""
${ev.isNull} = false; // resultCode could change nullability.
$resultCode
"""
}
}
}
ev.copy(code = code"""
boolean ${ev.isNull} = true;
${CodeGenerator.javaType(dataType)} ${ev.value} = ${CodeGenerator.defaultValue(dataType)};
$nullSafeEval""")
} else {
ev.copy(code = code"""
${leftGen.code}
${midGen.code}
${rightGen.code}
${CodeGenerator.javaType(dataType)} ${ev.value} = ${CodeGenerator.defaultValue(dataType)};
$resultCode""", isNull = FalseLiteral)
}
}
}
/**
* An expression with four inputs and one output. The output is by default evaluated to null
* if any input is evaluated to null.
*/
abstract class QuaternaryExpression extends Expression with QuaternaryLike[Expression] {
override def foldable: Boolean = children.forall(_.foldable)
override def nullable: Boolean = children.exists(_.nullable)
/**
* Default behavior of evaluation according to the default nullability of QuaternaryExpression.
* If subclass of QuaternaryExpression override nullable, probably should also override this.
*/
override def eval(input: InternalRow): Any = {
val value1 = first.eval(input)
if (value1 != null) {
val value2 = second.eval(input)
if (value2 != null) {
val value3 = third.eval(input)
if (value3 != null) {
val value4 = fourth.eval(input)
if (value4 != null) {
return nullSafeEval(value1, value2, value3, value4)
}
}
}
}
null
}
/**
* Called by default [[eval]] implementation. If subclass of QuaternaryExpression keep the
* default nullability, they can override this method to save null-check code. If we need
* full control of evaluation process, we should override [[eval]].
*/
protected def nullSafeEval(input1: Any, input2: Any, input3: Any, input4: Any): Any =
throw QueryExecutionErrors.notOverrideExpectedMethodsError("QuaternaryExpressions",
"eval", "nullSafeEval")
/**
* Short hand for generating quaternary evaluation code.
* If either of the sub-expressions is null, the result of this computation
* is assumed to be null.
*
* @param f accepts four variable names and returns Java code to compute the output.
*/
protected def defineCodeGen(
ctx: CodegenContext,
ev: ExprCode,
f: (String, String, String, String) => String): ExprCode = {
nullSafeCodeGen(ctx, ev, (eval1, eval2, eval3, eval4) => {
s"${ev.value} = ${f(eval1, eval2, eval3, eval4)};"
})
}
/**
* Short hand for generating quaternary evaluation code.
* If either of the sub-expressions is null, the result of this computation
* is assumed to be null.
*
* @param f function that accepts the 4 non-null evaluation result names of children
* and returns Java code to compute the output.
*/
protected def nullSafeCodeGen(
ctx: CodegenContext,
ev: ExprCode,
f: (String, String, String, String) => String): ExprCode = {
val firstGen = children(0).genCode(ctx)
val secondGen = children(1).genCode(ctx)
val thridGen = children(2).genCode(ctx)
val fourthGen = children(3).genCode(ctx)
val resultCode = f(firstGen.value, secondGen.value, thridGen.value, fourthGen.value)
if (nullable) {
val nullSafeEval =
firstGen.code + ctx.nullSafeExec(children(0).nullable, firstGen.isNull) {
secondGen.code + ctx.nullSafeExec(children(1).nullable, secondGen.isNull) {
thridGen.code + ctx.nullSafeExec(children(2).nullable, thridGen.isNull) {
fourthGen.code + ctx.nullSafeExec(children(3).nullable, fourthGen.isNull) {
s"""
${ev.isNull} = false; // resultCode could change nullability.
$resultCode
"""
}
}
}
}
ev.copy(code = code"""
boolean ${ev.isNull} = true;
${CodeGenerator.javaType(dataType)} ${ev.value} = ${CodeGenerator.defaultValue(dataType)};
$nullSafeEval""")
} else {
ev.copy(code = code"""
${firstGen.code}
${secondGen.code}
${thridGen.code}
${fourthGen.code}
${CodeGenerator.javaType(dataType)} ${ev.value} = ${CodeGenerator.defaultValue(dataType)};
$resultCode""", isNull = FalseLiteral)
}
}
}
/**
* An expression with six inputs + 7th optional input and one output.
* The output is by default evaluated to null if any input is evaluated to null.
*/
abstract class SeptenaryExpression extends Expression {
override def foldable: Boolean = children.forall(_.foldable)
override def nullable: Boolean = children.exists(_.nullable)
/**
* Default behavior of evaluation according to the default nullability of SeptenaryExpression.
* If subclass of SeptenaryExpression override nullable, probably should also override this.
*/
override def eval(input: InternalRow): Any = {
val exprs = children
val v1 = exprs(0).eval(input)
if (v1 != null) {
val v2 = exprs(1).eval(input)
if (v2 != null) {
val v3 = exprs(2).eval(input)
if (v3 != null) {
val v4 = exprs(3).eval(input)
if (v4 != null) {
val v5 = exprs(4).eval(input)
if (v5 != null) {
val v6 = exprs(5).eval(input)
if (v6 != null) {
if (exprs.length > 6) {
val v7 = exprs(6).eval(input)
if (v7 != null) {
return nullSafeEval(v1, v2, v3, v4, v5, v6, Some(v7))
}
} else {
return nullSafeEval(v1, v2, v3, v4, v5, v6, None)
}
}
}
}
}
}
}
null
}
/**
* Called by default [[eval]] implementation. If subclass of SeptenaryExpression keep the
* default nullability, they can override this method to save null-check code. If we need
* full control of evaluation process, we should override [[eval]].
*/
protected def nullSafeEval(
input1: Any,
input2: Any,
input3: Any,
input4: Any,
input5: Any,
input6: Any,
input7: Option[Any]): Any = {
throw QueryExecutionErrors.notOverrideExpectedMethodsError("SeptenaryExpression",
"eval", "nullSafeEval")
}
/**
* Short hand for generating septenary evaluation code.
* If either of the sub-expressions is null, the result of this computation
* is assumed to be null.
*
* @param f accepts seven variable names and returns Java code to compute the output.
*/
protected def defineCodeGen(
ctx: CodegenContext,
ev: ExprCode,
f: (String, String, String, String, String, String, Option[String]) => String
): ExprCode = {
nullSafeCodeGen(ctx, ev, (eval1, eval2, eval3, eval4, eval5, eval6, eval7) => {
s"${ev.value} = ${f(eval1, eval2, eval3, eval4, eval5, eval6, eval7)};"
})
}
/**
* Short hand for generating septenary evaluation code.
* If either of the sub-expressions is null, the result of this computation
* is assumed to be null.
*
* @param f function that accepts the 7 non-null evaluation result names of children
* and returns Java code to compute the output.
*/
protected def nullSafeCodeGen(
ctx: CodegenContext,
ev: ExprCode,
f: (String, String, String, String, String, String, Option[String]) => String
): ExprCode = {
val firstGen = children(0).genCode(ctx)
val secondGen = children(1).genCode(ctx)
val thirdGen = children(2).genCode(ctx)
val fourthGen = children(3).genCode(ctx)
val fifthGen = children(4).genCode(ctx)
val sixthGen = children(5).genCode(ctx)
val seventhGen = if (children.length > 6) Some(children(6).genCode(ctx)) else None
val resultCode = f(
firstGen.value,
secondGen.value,
thirdGen.value,
fourthGen.value,
fifthGen.value,
sixthGen.value,
seventhGen.map(_.value))
if (nullable) {
val nullSafeEval =
firstGen.code + ctx.nullSafeExec(children(0).nullable, firstGen.isNull) {
secondGen.code + ctx.nullSafeExec(children(1).nullable, secondGen.isNull) {
thirdGen.code + ctx.nullSafeExec(children(2).nullable, thirdGen.isNull) {
fourthGen.code + ctx.nullSafeExec(children(3).nullable, fourthGen.isNull) {
fifthGen.code + ctx.nullSafeExec(children(4).nullable, fifthGen.isNull) {
sixthGen.code + ctx.nullSafeExec(children(5).nullable, sixthGen.isNull) {
val nullSafeResultCode =
s"""
${ev.isNull} = false; // resultCode could change nullability.
$resultCode
"""
seventhGen.map { gen =>
gen.code + ctx.nullSafeExec(children(6).nullable, gen.isNull) {
nullSafeResultCode
}
}.getOrElse(nullSafeResultCode)
}
}
}
}
}
}
ev.copy(code = code"""
boolean ${ev.isNull} = true;
${CodeGenerator.javaType(dataType)} ${ev.value} = ${CodeGenerator.defaultValue(dataType)};
$nullSafeEval""")
} else {
ev.copy(code = code"""
${firstGen.code}
${secondGen.code}
${thirdGen.code}
${fourthGen.code}
${fifthGen.code}
${sixthGen.code}
${seventhGen.map(_.code).getOrElse("")}
${CodeGenerator.javaType(dataType)} ${ev.value} = ${CodeGenerator.defaultValue(dataType)};
$resultCode""", isNull = FalseLiteral)
}
}
}
/**
* A trait used for resolving nullable flags, including `nullable`, `containsNull` of [[ArrayType]]
* and `valueContainsNull` of [[MapType]], containsNull, valueContainsNull flags of the output date
* type. This is usually utilized by the expressions (e.g. [[CaseWhen]]) that combine data from
* multiple child expressions of non-primitive types.
*/
trait ComplexTypeMergingExpression extends Expression {
/**
* A collection of data types used for resolution the output type of the expression. By default,
* data types of all child expressions. The collection must not be empty.
*/
@transient
lazy val inputTypesForMerging: Seq[DataType] = children.map(_.dataType)
def dataTypeCheck: Unit = {
require(
inputTypesForMerging.nonEmpty,
"The collection of input data types must not be empty.")
require(
TypeCoercion.haveSameType(inputTypesForMerging),
"All input types must be the same except nullable, containsNull, valueContainsNull flags." +
s" The input types found are\\n\\t${inputTypesForMerging.mkString("\\n\\t")}")
}
private lazy val internalDataType: DataType = {
dataTypeCheck
inputTypesForMerging.reduceLeft(TypeCoercion.findCommonTypeDifferentOnlyInNullFlags(_, _).get)
}
override def dataType: DataType = internalDataType
}
/**
* Common base trait for user-defined functions, including UDF/UDAF/UDTF of different languages
* and Hive function wrappers.
*/
trait UserDefinedExpression {
def name: String
}
| wangmiao1981/spark | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/Expression.scala | Scala | apache-2.0 | 41,960 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.