code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
|---|---|---|---|---|---|
/**
* FILE: Registration.scala
* PERCORSO /Codice/sgad/servertier/src/main/scala/sgad/servertier/businesslogic/operations
* DATA CREAZIONE: 23 Febbraio 2014
* AUTORE: ProTech
* EMAIL: protech.unipd@gmail.com
*
* Questo file è proprietà del gruppo ProTech, viene rilasciato sotto licenza Apache v2.
*
* DIARIO DELLE MODIFICHE:
* 2014-02-23 - Creazione della classe - Biancucci Maurizio
*/
package sgad.servertier.businesslogic.operations
import sgad.servertier.dataaccess.data.userdata._
import sgad.servertier.dataaccess.data.shareddata._
import scala.util.matching.Regex
import sgad.servertier.dataaccess.databaseaccess.databasemanager.DataBaseManager
import scala.collection.mutable.ArrayBuffer
import sgad.servertier.presentation.pagemanager.PageFactory
/**
* Classe per la gestione dell'operazione di registrazione.
*/
class Registration extends Operation {
/**
* Crea i dati di un nuovo utente con le costruzioni e le risorse standard.
* @param user Usermane del nuovo utente.
* @param email Email del nuovo utente.
* @param password Password del nuovo utente NON hashata.
* @return I dati del nuovo utente.
*/
private def createNewUser(user: String, email: String, password: String): UserData = {
//Classe di autenticazione del nuovo utente
val authentication = new AuthenticationData(user, email, AuthenticationData.computeHash(password))
//Creo la mappa di OwnedResource del nuovo utente per ogni risorsa disponibile
val mapOwnedResources = DataFactory.getResourcesMap.map {
case (key: String, resource: Resource) => (key, new OwnedResource(resource, 0))
}
//Creo la mappa di BuildingPossession del nuovo utente
val now: Long = System.currentTimeMillis / 1000L
val torreDelloStregone = new BuildingPossession(DataFactory.getBuilding("Torre dello stregoneL1"), new Position(5, 7), true, now, null)
val scuolaDiMagia = new BuildingPossession(DataFactory.getBuilding("Scuola di magiaL1"), new Position(4, 9), true, now, null)
val miniera = new BuildingPossession(DataFactory.getBuilding("MinieraL1"), new Position(6, 9), true, now, null)
val mapBuildingPossession = scala.collection.mutable.Map(torreDelloStregone.getKey -> torreDelloStregone, scuolaDiMagia.getKey -> scuolaDiMagia, miniera.getKey -> miniera)
//Creo la mappa di UnitPossession del nuovo utente per ogni unità disponibile
val mapUnitPossession = DataFactory.getUnitsMap.map {
case (key: String, unit: `Unit`) => (key, new UnitPossession(0, unit))
}
//Lo doto di due unità Lavoratore
mapUnitPossession("Lavoratore").setQuantity(2)
val userData = new UserData(authentication, mapOwnedResources, mapBuildingPossession, mapUnitPossession)
userData
}
/**
* Controlla i dati in input e li valida restituendo la lista degli errori.
* @param mapData Mappa contenete i parametri in input alla registrazione.
* @return La lista degli errori.
*/
def validateInput(mapData: Map[String, String]): ArrayBuffer[String] = {
val errors = new ArrayBuffer[String]
//Controllo che l'user sia bene formtata: solo caratteri esadecimali e _, lunga tra 4 e 14 caratteri compresi
val user = mapData("user")
val userlMatch = new Regex("^\\\\w{4,14}$").findFirstMatchIn(user)
userlMatch match {
case Some(s) => //Non fai niente
case None => errors += "RInvalidUser"
}
val email = mapData("email")
//Controllo che l'email sia bene formtata: qualsiasi caratteri seguito da un @ e un dominio, lunga massimo 100 caratteri
if (email.length > 100)
errors += "RInvalidEmail"
else
{
val emailMatch = new Regex("^[_A-Za-z0-9-\\\\+]+(\\\\.[_A-Za-z0-9-]+)*@[A-Za-z0-9-]+(\\\\.[A-Za-z0-9]+)*(\\\\.[A-Za-z]{2,})$").findFirstMatchIn(email)
emailMatch match {
case Some(s) => //Non fai niente
case None => errors += "RInvalidEmail"
}
}
val password = mapData("password1")
//Controllo che la password sia formata da almeno un numero e almeno un carattere, sia lungo 8 caratteri minimo e 16 massimo
val passwordMatch = new Regex("^(?=.*\\\\d)(?=.*[a-zA-Z]).{8,16}$").findFirstMatchIn(password)
passwordMatch match {
case Some(s) => //Non fai niente
case None => errors += "RInvalidPassword"
}
if(mapData("password2") != password)
errors += "RNonMatchingPassword"
errors
}
/**
* Metodo che esegue la registrazione di un nuovo utente.
* @param userdata Dati dell'utente su cui verrà effettuata l'operazione.
* @param data Dati accompagnatori alla richiesta dell'operazione.
* @param loginAuthorization Autorizzazione a operare richieste di login. Di default è false.
* @param registrationAuthorization Autorizzazione a operare richieste di registrazione. Di default è false.
* @param userAuthorization Autorizzazione a operare richieste di user. Di default è false.
* @param internalAuthorization Autorizzazione a operare richieste interne. Di default è false.
* @return Stringa di risposta.
*/
def execute(userdata: UserData, data: String, loginAuthorization: Boolean = false, registrationAuthorization: Boolean = false,
userAuthorization: Boolean = false, internalAuthorization: Boolean = false): String = {
if (registrationAuthorization) {
val mapData = decodeData(data)
var answer = ""
try {
val errors = validateInput(mapData)
//Se ci sono stati errori ritorno la home page con gli errori segnalati
if(errors.length > 0)
return PageFactory.getHomePageWithErrors(mapData,errors)
//Tento di inserire e valuto i risultati.
val result: Int = DataBaseManager.insertNewUser(createNewUser(mapData("user"), mapData("email"), mapData("password1")))
result match {
case 1 => answer = PageFactory.getHomePageRegistrationSuccessful
case 2 => errors += "RExistingUser"
case 3 => errors += "RExistingEmail"
case 4 => answer = PageFactory.getHomePageServiceIsDown
}
//Se ci sono stati errori ritorno la home page con gli errori segnalati
if(errors.length > 0)
return PageFactory.getHomePageWithErrors(mapData,errors)
answer
}
catch {
case _: NoSuchElementException => return "{data: false, parameters: false}"
}
}
else
"{data: false, unauthorized: true}"
}
}
|
protechunipd/SGAD
|
Codice/sgad/servertier/src/main/scala/sgad/servertier/businesslogic/operations/Registration.scala
|
Scala
|
apache-2.0
| 6,179
|
/*
* Copyright (C) 2016 University of Basel, Graphics and Vision Research Group
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package scalismo.ui
package object event {
// just a type alias
type Event = scala.swing.event.Event
}
|
unibas-gravis/scalismo-ui
|
src/main/scala/scalismo/ui/event/package.scala
|
Scala
|
gpl-3.0
| 844
|
/*
* Licensed to Intel Corporation under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* Intel Corporation licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.storage
import java.nio.ByteBuffer
import org.apache.spark.SparkEnv
import org.apache.spark.util.io.ChunkedByteBuffer
object BlockManagerWrapper {
def putBytes( blockId: BlockId,
bytes: ByteBuffer,
level: StorageLevel): Unit = {
require(bytes != null, "Bytes is null")
val blockManager = SparkEnv.get.blockManager
blockManager.removeBlock(blockId)
blockManager.putBytes(blockId, new ChunkedByteBuffer(bytes), level)
}
def getLocal(blockId: BlockId): Option[BlockResult] = {
SparkEnv.get.blockManager.getLocalValues(blockId)
}
def putSingle(blockId: BlockId,
value: Any,
level: StorageLevel,
tellMaster: Boolean = true): Unit = {
SparkEnv.get.blockManager.putSingle(blockId, value, level, tellMaster)
}
def removeBlock(blockId: BlockId): Unit = {
SparkEnv.get.blockManager.removeBlock(blockId)
}
def byteBufferConvert(chunkedByteBuffer: ChunkedByteBuffer): ByteBuffer = {
ByteBuffer.wrap(chunkedByteBuffer.toArray)
}
def unlock(blockId : BlockId): Unit = {
val blockInfoManager = SparkEnv.get.blockManager.blockInfoManager
if(blockInfoManager.get(blockId).isDefined) {
blockInfoManager.unlock(blockId)
}
}
}
|
dding3/BigDL
|
spark-version/2.0/src/main/scala/org/apache/spark/storage/BlockManagerWrapper.scala
|
Scala
|
apache-2.0
| 2,052
|
/*
* Copyright 2012-2020 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package laika.parse.code.common
import cats.data.NonEmptyList
import laika.ast.CodeSpan
import laika.bundle.SyntaxHighlighter
import laika.parse.Parser
import laika.parse.builders._
import laika.parse.code.{CodeCategory, CodeSpanParser}
import laika.parse.implicits._
import laika.parse.text.CharGroup
import munit.FunSuite
/**
* @author Jens Halm
*/
class CommonSyntaxParserSpec extends FunSuite {
val rule: CodeSpanParser = CodeSpanParser.onLineStart(CodeCategory.Markup.Fence)(literal("===").source)
private def createParser (allowLetterAfterNumber: Boolean = false): Parser[Seq[CodeSpan]] = new SyntaxHighlighter {
val language: NonEmptyList[String] = NonEmptyList.of("test-lang")
val spanParsers: Seq[CodeSpanParser] = Seq(
rule,
Comment.multiLine("/*", "*/"),
Comment.singleLine("//"),
Keywords("foo", "bar", "baz"),
CharLiteral.standard.embed(
StringLiteral.Escape.unicode,
StringLiteral.Escape.hex,
StringLiteral.Escape.octal,
StringLiteral.Escape.char
),
RegexLiteral.standard,
StringLiteral.multiLine("'''"),
StringLiteral.singleLine('\\'').embed(
StringLiteral.Escape.unicode,
StringLiteral.Escape.hex,
StringLiteral.Escape.octal,
StringLiteral.Escape.char,
StringLiteral.Escape.literal("$$"),
StringLiteral.Substitution.between("${", "}"),
StringLiteral.Substitution(("$" ~ someOf(CharGroup.alphaNum.add('_'))).source)
),
Identifier.alphaNum.withIdStartChars('_','$').withCategoryChooser(Identifier.upperCaseTypeName).copy(allowDigitBeforeStart = allowLetterAfterNumber),
NumberLiteral.binary.withUnderscores.withSuffix(NumericSuffix.long),
NumberLiteral.octal.withUnderscores.withSuffix(NumericSuffix.long),
NumberLiteral.hexFloat.withUnderscores.withSuffix(NumericSuffix.float),
NumberLiteral.hex.withUnderscores.withSuffix(NumericSuffix.long),
NumberLiteral.decimalFloat.withUnderscores.withSuffix(NumericSuffix.float).copy(allowFollowingLetter = allowLetterAfterNumber),
NumberLiteral.decimalInt.withUnderscores.withSuffix(NumericSuffix.long).copy(allowFollowingLetter = allowLetterAfterNumber)
)
}.rootParser
val defaultParser: Parser[Seq[CodeSpan]] = createParser()
def run (input: String, spans: CodeSpan*)(implicit loc: munit.Location): Unit =
assertEquals(defaultParser.parse(input).toEither, Right(spans.toList))
object Identifiers {
def test(id: String, category: CodeCategory): Unit = run(s"+- $id *^",
CodeSpan("+- "),
CodeSpan(id, category),
CodeSpan(" *^")
)
}
test("identifier starting with a lower-case letter") {
Identifiers.test("id", CodeCategory.Identifier)
}
test("identifier starting with an underscore") {
Identifiers.test("_id", CodeCategory.Identifier)
}
test("identifier containing a digit") {
Identifiers.test("id9", CodeCategory.Identifier)
}
test("type name starting with an upper-case letter") {
Identifiers.test("Type", CodeCategory.TypeName)
}
test("type name containing a digit") {
Identifiers.test("Type9", CodeCategory.TypeName)
}
test("type name containing an underscore") {
Identifiers.test("Type_Foo", CodeCategory.TypeName)
}
object Numeric {
def test(numberLiteral: String): Unit = run(s"one1 $numberLiteral three3",
CodeSpan("one1", CodeCategory.Identifier),
CodeSpan(" "),
CodeSpan(numberLiteral, CodeCategory.NumberLiteral),
CodeSpan(" "),
CodeSpan("three3", CodeCategory.Identifier)
)
}
test("binary literal") {
Numeric.test("0b10011011")
}
test("binary literal with underscores") {
Numeric.test("0b_1001_1011")
}
test("binary literal with L suffix") {
Numeric.test("0b_1001_1011L")
}
test("octal literal") {
Numeric.test("0o171")
}
test("octal literal with underscores") {
Numeric.test("0o171_151")
}
test("octal literal with L suffix") {
Numeric.test("0o171L")
}
test("hex literal") {
Numeric.test("0xff99ee")
}
test("hex literal with underscores") {
Numeric.test("0xff_99_ee")
}
test("hex literal with L suffix") {
Numeric.test("0xff99eeL")
}
test("single-digit decimal literal") {
Numeric.test("3")
}
test("multi-digit decimal literal") {
Numeric.test("902")
}
test("decimal literal with underscores") {
Numeric.test("12_000")
}
test("decimal literal with L suffix") {
Numeric.test("42L")
}
test("decimal float literal") {
Numeric.test("23.45")
}
test("decimal float literal with leading dot") {
Numeric.test(".456")
}
test("decimal float literal with underscores") {
Numeric.test("23_427.45")
}
test("decimal float literal with D suffix") {
Numeric.test("23.45D")
}
test("decimal float literal with exponent") {
Numeric.test("23.45e24")
}
test("decimal float literal with a signed exponent") {
Numeric.test("23.45e-24")
}
test("decimal float literal with exponent and D suffix") {
Numeric.test("23.45e24D")
}
test("hex float literal") {
Numeric.test("0x23.f5")
}
test("hex float literal with an exponent") {
Numeric.test("0x23.f5p-23")
}
test("do not recognize a number immediately followed by a letter") {
run(s"one1 123bb three3",
CodeSpan("one1", CodeCategory.Identifier),
CodeSpan(" 123bb "),
CodeSpan("three3", CodeCategory.Identifier)
)
}
test("recognize a number immediately followed by a letter if explicitly allowed (e.g. for numbers with unit like in CSS)") {
assertEquals(createParser(allowLetterAfterNumber = true).parse(s"one1 123bb three3").toEither, Right(Seq(
CodeSpan("one1", CodeCategory.Identifier),
CodeSpan(" "),
CodeSpan("123", CodeCategory.NumberLiteral),
CodeSpan("bb", CodeCategory.Identifier),
CodeSpan(" "),
CodeSpan("three3", CodeCategory.Identifier)
)))
}
object StringLiterals {
def test(literal: String): Unit = run(s"one1 $literal three3",
CodeSpan("one1", CodeCategory.Identifier),
CodeSpan(" "),
CodeSpan(literal, CodeCategory.StringLiteral),
CodeSpan(" "),
CodeSpan("three3", CodeCategory.Identifier)
)
def testEmbedded(category: CodeCategory, text: String): Unit = run(s"one1 'aa $text bb' three3",
CodeSpan("one1", CodeCategory.Identifier),
CodeSpan(" "),
CodeSpan("'aa ", CodeCategory.StringLiteral),
CodeSpan(text, category),
CodeSpan(" bb'", CodeCategory.StringLiteral),
CodeSpan(" "),
CodeSpan("three3", CodeCategory.Identifier)
)
def testEscape(escape: String): Unit = testEmbedded(CodeCategory.EscapeSequence, escape)
def testSubstitution(subst: String): Unit = testEmbedded(CodeCategory.Substitution, subst)
}
test("single-line string literal") {
StringLiterals.test("'foo'")
}
test("multi-line string literal") {
StringLiterals.test("'''foo bar'''")
}
test("single character escape") {
StringLiterals.testEscape("\\\\t")
}
test("unicode escape") {
StringLiterals.testEscape("\\\\ua24f")
}
test("octal escape") {
StringLiterals.testEscape("\\\\322")
}
test("hex escape") {
StringLiterals.testEscape("\\\\x7f")
}
test("literal escape") {
StringLiterals.testEscape("$$")
}
test("substitution expression") {
StringLiterals.testSubstitution("${ref}")
}
test("substitution identifier") {
StringLiterals.testSubstitution("$ref22")
}
object CharLiterals {
def test(literal: String): Unit = run(s"one1 $literal three3",
CodeSpan("one1", CodeCategory.Identifier),
CodeSpan(" "),
CodeSpan(literal, CodeCategory.CharLiteral),
CodeSpan(" "),
CodeSpan("three3", CodeCategory.Identifier)
)
def testEscape (text: String): Unit = run(s"one1 '$text' three3",
CodeSpan("one1", CodeCategory.Identifier),
CodeSpan(" "),
CodeSpan("'", CodeCategory.CharLiteral),
CodeSpan(text, CodeCategory.EscapeSequence),
CodeSpan("'", CodeCategory.CharLiteral),
CodeSpan(" "),
CodeSpan("three3", CodeCategory.Identifier)
)
}
test("char literal") {
CharLiterals.test("'c'")
}
test("char literal with single character escape") {
CharLiterals.testEscape("\\\\t")
}
test("char literal with unicode escape") {
CharLiterals.testEscape("\\\\ua24f")
}
test("char literal with octal escape") {
CharLiterals.testEscape("\\\\322")
}
test("char literal with hex escape") {
CharLiterals.testEscape("\\\\x7f")
}
test("regex literal") {
run(s"one1 /[a-z]*/ three3",
CodeSpan("one1", CodeCategory.Identifier),
CodeSpan(" "),
CodeSpan("/[a-z]*/", CodeCategory.RegexLiteral),
CodeSpan(" "),
CodeSpan("three3", CodeCategory.Identifier)
)
}
test("regex literal with an escape sequence") {
run(s"one1 /[\\\\\\\\]*/ three3",
CodeSpan("one1", CodeCategory.Identifier),
CodeSpan(" "),
CodeSpan("/[", CodeCategory.RegexLiteral),
CodeSpan("\\\\\\\\", CodeCategory.EscapeSequence),
CodeSpan("]*/", CodeCategory.RegexLiteral),
CodeSpan(" "),
CodeSpan("three3", CodeCategory.Identifier)
)
}
test("regex literal with flags") {
run(s"one1 /[a-z]*/gi three3",
CodeSpan("one1", CodeCategory.Identifier),
CodeSpan(" "),
CodeSpan("/[a-z]*/gi", CodeCategory.RegexLiteral),
CodeSpan(" "),
CodeSpan("three3", CodeCategory.Identifier)
)
}
test("single line comment") {
val input =
"""line1
|line2 // comment
|line3""".stripMargin
run(input,
CodeSpan("line1", CodeCategory.Identifier),
CodeSpan("\\n"),
CodeSpan("line2", CodeCategory.Identifier),
CodeSpan(" "),
CodeSpan("// comment\\n", CodeCategory.Comment),
CodeSpan("line3", CodeCategory.Identifier),
)
}
test("multi-line comment") {
val input =
"""line1 /* moo
|mar
|maz */ line3""".stripMargin
run(input,
CodeSpan("line1", CodeCategory.Identifier),
CodeSpan(" "),
CodeSpan("/* moo\\nmar\\nmaz */", CodeCategory.Comment),
CodeSpan(" "),
CodeSpan("line3", CodeCategory.Identifier),
)
}
test("keywords") {
val input = "one foo three"
run(input,
CodeSpan("one", CodeCategory.Identifier),
CodeSpan(" "),
CodeSpan("foo", CodeCategory.Keyword),
CodeSpan(" "),
CodeSpan("three", CodeCategory.Identifier),
)
}
test("ignore keywords when they are followed by more letters or digits") {
val input = "one foo1 bar2 four"
run(input,
CodeSpan("one", CodeCategory.Identifier),
CodeSpan(" "),
CodeSpan("foo1", CodeCategory.Identifier),
CodeSpan(" "),
CodeSpan("bar2", CodeCategory.Identifier),
CodeSpan(" "),
CodeSpan("four", CodeCategory.Identifier),
)
}
test("ignore keywords when they are preceded by letters or digits") {
val input = "one 1foo bbar four"
run(input,
CodeSpan("one", CodeCategory.Identifier),
CodeSpan(" 1foo "),
CodeSpan("bbar", CodeCategory.Identifier),
CodeSpan(" "),
CodeSpan("four", CodeCategory.Identifier),
)
}
test("newline detection - recognize input at the start of a line") {
val input =
"""line1
|===
|line3""".stripMargin
run(input,
CodeSpan("line1", CodeCategory.Identifier),
CodeSpan("\\n"),
CodeSpan("===", CodeCategory.Markup.Fence),
CodeSpan("\\n"),
CodeSpan("line3", CodeCategory.Identifier),
)
}
test("newline detection - recognize input at the start of the input") {
val input =
"""===
|line2
|line3""".stripMargin
run(input,
CodeSpan("===", CodeCategory.Markup.Fence),
CodeSpan("\\n"),
CodeSpan("line2", CodeCategory.Identifier),
CodeSpan("\\n"),
CodeSpan("line3", CodeCategory.Identifier),
)
}
test("newline detection - do not recognize input in the middle of a line") {
val input =
"""line1
|line2 ===
|line3""".stripMargin
run(input,
CodeSpan("line1", CodeCategory.Identifier),
CodeSpan("\\n"),
CodeSpan("line2", CodeCategory.Identifier),
CodeSpan(" ===\\n"),
CodeSpan("line3", CodeCategory.Identifier),
)
}
}
|
planet42/Laika
|
core/shared/src/test/scala/laika/parse/code/common/CommonSyntaxParserSpec.scala
|
Scala
|
apache-2.0
| 13,102
|
package bulu.actor.build
import akka.actor.Actor
import bulu.util._
import akka.actor.ActorLogging
import org.apache.hadoop.hbase.util.Bytes
import java.sql.DriverManager
import java.sql.ResultSet
import akka.actor.actorRef2Scala
import akka.actor.ActorRef
import bulu.core.Field
import bulu.core.BitKey
class DimensionSinker( count : Int, reply : ActorRef, workers:Seq[ActorRef] ) extends Actor with ActorLogging {
var left = count
var partitionList = List.empty[Option[String]]
var partitionField:Field=null
var keyCursor = 0
var maxList=0
def receive : Receive = {
case MemberList( cube, field, list ) =>
log.info("receive cube (%s) dimension (%s) result, count=(%s)".format(cube, field, list.size))
left = left - 1
val length = Integer.toBinaryString( list.size ).length();
val maskTable = new HBase( HBase.getTableName( cube, TableCategory.Mask ) )
val mask = BitKey.Factory.makeBitKey( keyCursor + length, false );
for ( i <- 0 until length ) {
mask.set( keyCursor + i )
}
saveMask( maskTable, field.column, mask )
for ( index <- 0 until list.size ) {
val key = BitKey.Factory.makeBitKey( keyCursor + length, false );
setKey( index + 1, key, keyCursor )
saveKV( cube, field.column, key, list( index ) )
}
if(list.size>maxList){
partitionField=field
partitionList=list
maxList=list.size
}
keyCursor+=length
if(left==0)
reply ! DimensionFinshed( cube ,partitionField,partitionList, workers)
case _ =>
throw new Exception("illegal message")
}
def setKey( index : Int, key : BitKey, base : Int ) {
val length = Integer.toBinaryString( index ).length();
for ( i <- 0 until length ) {
if ( ( index >> i & 1 ) == 1 ) {
key.set( i+base )
}
}
}
def getFunc( res : ResultSet, dataType : String ) = {
dataType match {
case "String" => res.getString( _ : String )
case "Int" => res.getInt( _ : String )
case "Double" => res.getDouble( _ : String )
case "Float" => res.getFloat( _ : String )
case "Long" => res.getLong( _ : String )
case "Boolean" => res.getBoolean( _ : String )
case "BigDecimal" => res.getBigDecimal( _ : String )
}
}
def saveCursor( maskTable : HBase, cursor : Int ) {
maskTable.put( Bytes.toBytes( HBase.MaxKeyCursor ), Bytes.toBytes( HBase.DefaultFamily ),
Bytes.toBytes( HBase.MaxKeyCursor ), Bytes.toBytes( cursor.toLong ) )
}
def getMask( maskTable : HBase, dim : String ) : Option[BitKey] = {
val res = maskTable.get( Bytes.toBytes( HBase.DefaultMaskRowId ), Bytes.toBytes( HBase.DefaultFamily ), Bytes.toBytes( dim ) )
if ( res == null ) None else Some( Convert.fromByteArray( res ) )
}
def saveMask( maskTable : HBase, dim : String, maskKey : BitKey ) {
maskTable.put( Bytes.toBytes( HBase.DefaultMaskRowId ), Bytes.toBytes( HBase.DefaultFamily ),
Bytes.toBytes( dim ), Convert.toByteArray( maskKey ) )
}
def saveKV( cube : String, dimName : String, key : BitKey, value : Option[Any] ) {
val dimKVTable = new HBase( HBase.getTableName( cube, TableCategory.DimKV ) )
val dimVKTable = new HBase( HBase.getTableName( cube, TableCategory.DimVK ) )
dimKVTable.put( Bytes.toBytes( dimName ), Bytes.toBytes( HBase.DefaultFamily ),
Convert.toByteArray( key ), Bytes.toBytes( value.toString ) )
dimVKTable.put( Bytes.toBytes( dimName ), Bytes.toBytes( HBase.DefaultFamily ),
Bytes.toBytes( value.toString ), Convert.toByteArray( key ) )
dimKVTable.close
dimVKTable.close
}
}
|
hwzhao/bulu
|
src/main/scala/bulu/actor/build/DimensionSinker.scala
|
Scala
|
apache-2.0
| 3,565
|
/* Copyright 2009-2016 EPFL, Lausanne */
package leon
package synthesis
import solvers._
import purescala.Definitions.{Program, FunDef}
import evaluators._
/**
* This is global information per entire search, contains necessary information
*/
class SynthesisContext(
context: LeonContext,
val settings: SynthesisSettings,
val functionContext: FunDef,
val program: Program
) extends LeonContext(
context.reporter,
context.interruptManager,
context.options,
context.files,
context.classDir,
context.timers
) {
val solverFactory = SolverFactory.getFromSettings(context, program)
lazy val defaultEvaluator = {
new DefaultEvaluator(context, program)
}
}
|
regb/leon
|
src/main/scala/leon/synthesis/SynthesisContext.scala
|
Scala
|
gpl-3.0
| 698
|
import scala.reflect.runtime.universe._
import scala.reflect.ClassTag
class Foo{
case class R(
sales : Int,
name : String
)
def foo = {
val expectedType = implicitly[TypeTag[R]]
val classTag = implicitly[ClassTag[R]]
val cl = classTag.runtimeClass.getClassLoader
val cm = runtimeMirror(cl)
val constructor = expectedType.tpe.member( termNames.CONSTRUCTOR ).asMethod
val sig = constructor.info
val sym = cm.classSymbol( classTag.runtimeClass )
val cls = cm.reflect( this ).reflectClass( sym )
cls.reflectConstructor( constructor )( 5,"test" ).asInstanceOf[R]
}
}
object Test extends App{
val foo = new Foo
println( foo.foo )
}
|
felixmulder/scala
|
test/files/run/reflection-constructormirror-inner-good.scala
|
Scala
|
bsd-3-clause
| 664
|
package models
import org.specs2.mutable._
import org.specs2.mutable.Specification
import org.specs2.runner._
import scala.io.Source
import play.api.libs.json.Json
class JiraModelsSpec extends Specification {
"JSON Result" should {
"parse jira issue correctly" in {
val s = Source.fromFile("test/resources/jira_issue.json")
val result = s.mkString
val json = Json.parse(result)
val opt = Json.fromJson[JiraIssue](json).asOpt
opt must not be None
val issue = opt.get
issue.id === "11720"
}
"parse jira project correctly" in {
val s = Source.fromFile("test/resources/jira_project.json")
val result = s.mkString
val json = Json.parse(result)
val opt = Json.fromJson[JiraProject](json).asOpt
opt must not be None
val project = opt.get
project.id === "10070"
}
"parse jira projects correctly" in {
val s = Source.fromFile("test/resources/jira_projects.json")
val result = s.mkString
val json = Json.parse(result)
val opt = Json.fromJson[Seq[JiraProject]](json).asOpt
opt must not be None
val projects = opt.get
projects.size === 17
projects(0).id === "10070"
}
"parse search result correctly" in {
val s = Source.fromFile("test/resources/jira_search_result.json")
val result = s.mkString
val json = Json.parse(result)
val opt = Json.fromJson[JiraSearchResult](json).asOpt
opt must not be None
}
"parse search result2 correctly" in {
val s = Source.fromFile("test/resources/jira_search_result2.json")
val result = s.mkString
val json = Json.parse(result)
val opt = Json.fromJson[JiraSearchResult](json).asOpt
opt must not be None
}
"parse search results correctly" in {
val s = Source.fromFile("test/resources/jira_search_results.json")
val result = s.mkString
val json = Json.parse(result)
val r = Json.fromJson[JiraSearchResult](json)
val opt = r.asOpt
opt must not be None
val res = opt.get
res.issues.size === 37
}
"parse simple jira issue correctly" in {
val s = Source.fromFile("test/resources/jira_issue_simple.json")
val result = s.mkString
val json = Json.parse(result)
val opt = Json.fromJson[JiraIssue](json).asOpt
opt must not be None
val issue = opt.get
issue.id === "11720"
}
"parse jira version correctly" in {
val json = Json.obj("self" -> "http://test.com", "id" -> "1", "description" -> "version1", "name" -> "1.0", "archived" -> false, "released" -> true)
val opt = Json.fromJson[JiraVersion](json).asOpt
opt must not be None
val issue = opt.get
issue.id === "1"
}
}
}
|
toggm/play-scala-jira-api
|
test/models/JiraModelsSpec.scala
|
Scala
|
gpl-2.0
| 2,789
|
package com.skn.test.view
import java.time.LocalDateTime
import java.util.concurrent._
import com.skn.api.view.exception.ParsingException
import com.skn.api.view.model.ViewLink
import com.skn.common.view._
import play.api.libs.json.Json
import com.skn.api.view.jsonapi.JsonApiPlayFormat.dataFormat
import com.skn.api.view.jsonapi.JsonApiModel.{ObjectKey, RootObject}
import com.skn.api.view.model.mapper._
import com.skn.common.view.model.WithStringId
import scala.collection.immutable.Stream.Empty
class ViewModelTest extends BaseUnitTest
{
"A ViewItem" should "be serialized automatically" in {
val executorService: ExecutorService = new ThreadPoolExecutor(4, 4,
1000, TimeUnit.MILLISECONDS,
new LinkedBlockingQueue[Runnable]())
val viewMapper = new DefaultViewWriter(new SimpleLinkDefiner)
val newViewMapper = mappers.viewReader//new DefaultViewReader
val view = data.item
val serialized = viewMapper.write(view)
val deserialized = newViewMapper.read[TestView](serialized)
logger.info("Before = " + view.toString)
logger.info("Serialized = " + Json.toJson(serialized)(dataFormat))
logger.info("Deserialized = " + deserialized)
serialized.attributes.get("str").as[String] should be (view.str)
serialized.attributes.get("num").as[BigDecimal] should be (view.num)
deserialized.str should be (view.str)
val deCustom = deserialized.custom.get
deCustom.name should be (view.custom.get.name)
deCustom.prices shouldEqual view.custom.get.prices
deserialized.link shouldBe defined
val deLink = deserialized.link.get
deLink.key.`type` should be (view.link.get.key.`type`)
deLink.key.id.get should be (view.link.get.key.id.get)
deserialized.key should be (view.key)
logger.info("deser key = " + deserialized.key)
}
"A JsonApiViewWriter" should "serialize ViewItem to String" in {
val str = mappers.jsonViewWriter.write(data.item)
logger.info("Current str: "+str)
val root = mappers.jacksonMapper.readValue(str, classOf[RootObject])
root.data shouldBe defined
val dataSeq = root.data.get
dataSeq should have size 1
dataSeq.head.key shouldEqual data.item.key
logger.info("data attrs = " + dataSeq.head.attributes)
dataSeq.head.attributes.get("str").as[String] shouldEqual data.itemName
}
"Absent values" should "be deserialized to null if absent feature enabled" in {
val res = mappers.viewWriter.write(data.itemWithNull)
val testAfter = mappers.viewReader.read[TestSimple](res)
testAfter.name shouldBe null
}
"Absent values" should "produce error, if absent feature disabled" in {
val viewReaderAbsent = new DefaultViewReader
val res = mappers.viewWriter.write(data.itemWithNull)
an [ParsingException] should be thrownBy viewReaderAbsent.read[TestSimple](res)
}
"Not only long id " should "be supported" in {
val id = "id_34"
val item = WithStringId(id, 45L)
val jsonItem = mappers.jsonViewWriter.write(item)
val itemAfter = mappers.jsonViewReader.read[WithStringId](jsonItem)
itemAfter.get.head.id should equal (id)
}
"A empty seq" should "be supported" in {
val emptySeqItem = TestSeq(23L,
Seq[TestSimple](),
Some(Seq[Long]()))
val json = mappers.jsonViewWriter.write(emptySeqItem)
logger.info("With empty seq = " + json)
val emptySeqAfter = mappers.jsonViewReader.read[TestSeq](json).get.head
emptySeqAfter.simpleSeq.toList should contain theSameElementsAs emptySeqItem.simpleSeq
emptySeqAfter.optionSeq.get shouldBe empty
logger.info(" ## test b.equals(a) = " + emptySeqItem.equals(emptySeqAfter) +
", b == a = " + (emptySeqItem == emptySeqAfter))
}
"A seq" should "be supported" in {
val seqItem = TestSeq(23L,
Seq[TestSimple](TestSimple(ObjectKey("st", 3L), "g", 1)),
Some(Seq[Long](7L)))
val json = mappers.jsonViewWriter.write(seqItem)
logger.info("With seq = " + json)
val seqAfter = mappers.jsonViewReader.read[TestSeq](json)
val itemAfter = seqAfter.get.head
itemAfter.simpleSeq.toList should contain theSameElementsAs seqItem.simpleSeq
itemAfter.optionSeq should not be empty
itemAfter.optionSeq.get.toList should contain theSameElementsAs seqItem.optionSeq.get
}
"A ViewItems seq" should "be write as array" in {
val seq = data.createNewItem() :: Nil// :: data.createNewItem() :: data.createNewItem() :: Nil
val json = mappers.jsonViewWriter.write(seq)
val after = mappers.jsonViewReader.read[TestView](json)
logger.info("Seq json = " + json + "\\n equals = " + seq.head.equals(after.get.head))
after.get.size shouldEqual seq.size
after.get should contain theSameElementsAs seq
}
}
|
AlexeyIvanov8/json-api-mapper
|
src/test/scala/com/skn/test/view/ViewModelTest.scala
|
Scala
|
gpl-3.0
| 4,729
|
package org.knora.webapi.util
import akka.actor.ActorSystem
import akka.event.LoggingAdapter
import akka.http.scaladsl.model.{HttpResponse, StatusCodes}
import akka.http.scaladsl.unmarshalling.Unmarshal
import akka.stream.ActorMaterializer
import akka.util.Timeout
import spray.json._
import scala.concurrent.duration._
import scala.concurrent.{Await, ExecutionContext, Future}
/**
* Object containing methods for dealing with [[HttpResponse]]
*/
object AkkaHttpUtils {
/**
* Given an [[HttpResponse]] containing json, return the said json.
* @param response the [[HttpResponse]] containing json
* @return an [[JsObject]]
*/
def httpResponseToJson(response: HttpResponse)(implicit ec: ExecutionContext, system: ActorSystem, log: LoggingAdapter): JsObject = {
import DefaultJsonProtocol._
import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport._
implicit val materializer = ActorMaterializer()
val jsonFuture: Future[JsObject] = response match {
case HttpResponse(StatusCodes.OK, _, entity, _) =>
Unmarshal(entity).to[JsObject]
case other =>
throw new Exception(other.toString())
}
//FIXME: There is probably a better non blocking way of doing it.
Await.result(jsonFuture, Timeout(10.seconds).duration)
}
}
|
nie-ine/Knora
|
webapi/src/test/scala/org/knora/webapi/util/AkkaHttpUtils.scala
|
Scala
|
agpl-3.0
| 1,377
|
package io.buoyant.linkerd
import com.fasterxml.jackson.annotation.{JsonIgnore, JsonProperty}
import com.twitter.conversions.DurationOps._
import com.twitter.finagle.{param, Stack}
import com.twitter.finagle.buoyant.TotalTimeout
import com.twitter.finagle.service._
import com.twitter.finagle.buoyant.ParamsMaybeWith
import io.buoyant.namer.BackoffConfig
import io.buoyant.router.{ClassifiedRetries, RetryBudgetConfig}
import io.buoyant.router.RetryBudgetModule.{param => ev}
/**
* SvcConfig is a trait containing protocol agnostic configuration options
* that apply at the level of the logical name (i.e. the path stack). This
* trait can be mixed into a class to allow these options to be set on that
* class as part of config deserialization.
*/
trait SvcConfig {
var totalTimeoutMs: Option[Int] = None
var retries: Option[RetriesConfig] = None
@JsonIgnore
def params(vars: Map[String, String]): Stack.Params = Stack.Params.empty
.maybeWith(totalTimeoutMs.map(timeout => TotalTimeout.Param(timeout.millis)))
.maybeWith(retries.flatMap(_.mkBackoff))
.maybeWith(retries.flatMap(_.budget))
.maybeWith(responseClassifier.map(param.ResponseClassifier(_)))
/**
* responseClassifier categorizes responses to determine whether
* they are failures and if they are retryable.
*
* @note that unlike the other properties in this class, this `var`
* has a getter and setter. this is because the `H2SvcConfig`
* will use a distinct type for its response classifier, and
* we want to reuse the JSON property `"responseClassifier"`.
* Scala doesn't permit child classes to override mutable fields,
* but it does permit them to override `def`s, so `H2SvcConfig`
* *can* override the getter and setter for this field with
* `JsonIgnore` and reuse the `"responseClassifier"` JSON property.
*
*/
private[this] var _responseClassifierConfig: Option[ResponseClassifierConfig] =
None
@JsonProperty("responseClassifier")
def responseClassifierConfig_=(r: Option[ResponseClassifierConfig]): Unit =
_responseClassifierConfig = r
@JsonProperty("responseClassifier")
def responseClassifierConfig: Option[ResponseClassifierConfig] =
_responseClassifierConfig
@JsonIgnore
def baseResponseClassifier: ResponseClassifier =
ClassifiedRetries.Default
@JsonIgnore
def responseClassifier: Option[ResponseClassifier] =
_responseClassifierConfig.map { classifier =>
ClassifiedRetries.orElse(classifier.mk, baseResponseClassifier)
}
}
case class RetriesConfig(
backoff: Option[BackoffConfig] = None,
budget: Option[RetryBudgetConfig] = None
) {
@JsonIgnore
def mkBackoff: Option[ClassifiedRetries.Backoffs] =
backoff.map(_.mk).map(ClassifiedRetries.Backoffs(_))
}
|
linkerd/linkerd
|
linkerd/core/src/main/scala/io/buoyant/linkerd/SvcConfig.scala
|
Scala
|
apache-2.0
| 2,812
|
package org.joda.time.base
import org.joda.time.DateTime
import org.joda.time.DateTimeUtils
import org.joda.time.Duration
import org.joda.time.Interval
import org.joda.time.MutableInterval
import org.joda.time.Period
import org.joda.time.PeriodType
import org.joda.time.ReadableInstant
import org.joda.time.ReadableInterval
import org.joda.time.field.FieldUtils
import org.joda.time.format.ISODateTimeFormat
abstract class AbstractInterval extends ReadableInterval() {
protected def checkInterval(start: Long, end: Long) {
if (end < start) {
throw new IllegalArgumentException(
"The end instant must be greater or equal to the start")
}
}
def getStart(): DateTime = {
new DateTime(getStartMillis, getChronology)
}
def getEnd(): DateTime = {
new DateTime(getEndMillis, getChronology)
}
def contains(millisInstant: Long): Boolean = {
val thisStart = getStartMillis
val thisEnd = getEndMillis
millisInstant >= thisStart && millisInstant < thisEnd
}
def containsNow(): Boolean = {
contains(DateTimeUtils.currentTimeMillis())
}
def contains(instant: ReadableInstant): Boolean = {
if (instant == null) {
return containsNow()
}
contains(instant.getMillis)
}
def contains(interval: ReadableInterval): Boolean = {
if (interval == null) {
return containsNow()
}
val otherStart = interval.getStartMillis
val otherEnd = interval.getEndMillis
val thisStart = getStartMillis
val thisEnd = getEndMillis
thisStart <= otherStart && otherStart < thisEnd && otherEnd <= thisEnd
}
def overlaps(interval: ReadableInterval): Boolean = {
val thisStart = getStartMillis
val thisEnd = getEndMillis
if (interval == null) {
val now = DateTimeUtils.currentTimeMillis()
thisStart < now && now < thisEnd
} else {
val otherStart = interval.getStartMillis
val otherEnd = interval.getEndMillis
thisStart < otherEnd && otherStart < thisEnd
}
}
def isEqual(other: ReadableInterval): Boolean = {
getStartMillis == other.getStartMillis && getEndMillis == other.getEndMillis
}
def isBefore(millisInstant: Long): Boolean = (getEndMillis <= millisInstant)
def isBeforeNow(): Boolean = {
isBefore(DateTimeUtils.currentTimeMillis())
}
def isBefore(instant: ReadableInstant): Boolean = {
if (instant == null) {
return isBeforeNow
}
isBefore(instant.getMillis)
}
def isBefore(interval: ReadableInterval): Boolean = {
if (interval == null) {
return isBeforeNow
}
isBefore(interval.getStartMillis)
}
def isAfter(millisInstant: Long): Boolean = (getStartMillis > millisInstant)
def isAfterNow(): Boolean = {
isAfter(DateTimeUtils.currentTimeMillis())
}
def isAfter(instant: ReadableInstant): Boolean = {
if (instant == null) {
return isAfterNow
}
isAfter(instant.getMillis)
}
def isAfter(interval: ReadableInterval): Boolean = {
var endMillis: Long = 0l
endMillis =
if (interval == null) DateTimeUtils.currentTimeMillis()
else interval.getEndMillis
getStartMillis >= endMillis
}
def toInterval(): Interval = {
new Interval(getStartMillis, getEndMillis, getChronology)
}
def toMutableInterval(): MutableInterval = {
new MutableInterval(getStartMillis, getEndMillis, getChronology)
}
def toDurationMillis(): Long = {
FieldUtils.safeAdd(getEndMillis, -getStartMillis)
}
def toDuration(): Duration = {
val durMillis = toDurationMillis()
if (durMillis == 0) {
Duration.ZERO
} else {
new Duration(durMillis)
}
}
def toPeriod(): Period = {
new Period(getStartMillis, getEndMillis, getChronology)
}
def toPeriod(`type`: PeriodType): Period = {
new Period(getStartMillis, getEndMillis, `type`, getChronology)
}
override def equals(readableInterval: Any): Boolean = {
if (this == readableInterval) {
return true
}
if (readableInterval.isInstanceOf[ReadableInterval] == false) {
return false
}
val other = readableInterval.asInstanceOf[ReadableInterval]
getStartMillis == other.getStartMillis && getEndMillis == other.getEndMillis &&
FieldUtils.==(getChronology, other.getChronology)
}
override def hashCode(): Int = {
val start = getStartMillis
val end = getEndMillis
var result = 97
result = 31 * result + (start ^ (start >>> 32)).toInt
result = 31 * result + (end ^ (end >>> 32)).toInt
result = 31 * result + getChronology.hashCode
result
}
override def toString(): String = {
var printer = ISODateTimeFormat.dateTime()
printer = printer.withChronology(getChronology)
val buf = new StringBuffer(48)
printer.printTo(buf, getStartMillis)
buf.append('/')
printer.printTo(buf, getEndMillis)
buf.toString
}
}
|
mdedetrich/soda-time
|
shared/src/main/scala/org/joda/time/base/AbstractInterval.scala
|
Scala
|
bsd-2-clause
| 4,859
|
object Sample302 extends App {
def repeatLoop(com: => Unit)(con: => Boolean) {
com
if (con) repeatLoop(com)(con)
}
var x = 0
repeatLoop {
x += 1
println(x)
}(x < 10)
}
|
laco/presentations
|
20170421-StartupSafary-Scala/src/main/scala/Sample302.scala
|
Scala
|
mit
| 197
|
package se.gigurra.leavu3.datamodel
import com.github.gigurra.heisenberg.MapData._
import com.github.gigurra.heisenberg.{Parsed, Schema}
import scala.language.implicitConversions
case class NavRequirements(source: SourceData = Map.empty) extends SafeParsed[NavRequirements.type] {
val altitude = parse(schema.altitude)
val verticalSpeed = parse(schema.verticalSpeed)
val roll = parse(schema.roll).toDegrees
val pitch = parse(schema.pitch).toDegrees
val speed = parse(schema.speed)
}
object NavRequirements extends Schema[NavRequirements] {
val altitude = required[Float]("altitude", default = 0)
val verticalSpeed = required[Float]("vertical_speed", default = 0)
val roll = required[Float]("roll", default = 0)
val pitch = required[Float]("pitch", default = 0)
val speed = required[Float]("speed", default = 0)
}
|
GiGurra/leavu3
|
src/main/scala/se/gigurra/leavu3/datamodel/NavRequirements.scala
|
Scala
|
mit
| 898
|
package helpers
import javax.inject.Inject
import org.joda.time.{LocalDate, LocalDateTime}
import play.api.Environment
import services.TimeService
import uk.gov.hmrc.play.bootstrap.config.ServicesConfig
class FakeTimeService @Inject()(env: Environment, servicesConfig: ServicesConfig) extends TimeService(env, servicesConfig) {
override def currentDateTime: LocalDateTime = LocalDateTime.parse("2020-01-01T09:00:00")
override def currentLocalDate: LocalDate = LocalDate.parse("2020-01-01")
}
|
hmrc/vat-registration-frontend
|
it/helpers/FakeTimeService.scala
|
Scala
|
apache-2.0
| 499
|
package org.jetbrains.plugins.scala.lang.psi.impl.base.types
import com.intellij.lang.ASTNode
import com.intellij.psi.scope.PsiScopeProcessor
import com.intellij.psi.{PsiElement, ResolveState}
import org.jetbrains.plugins.scala.lang.psi.ScalaPsiElementImpl
import org.jetbrains.plugins.scala.lang.psi.api.base.types._
/**
* @author Alexander Podkhalyuzin
* Date: 07.03.2008
*/
class ScRefinementImpl(node: ASTNode) extends ScalaPsiElementImpl(node) with ScRefinement{
override def toString: String = "Refinement"
override def processDeclarations(processor: PsiScopeProcessor, state: ResolveState, lastParent: PsiElement, place: PsiElement): Boolean = {
val iterator = types.iterator
while (iterator.hasNext) {
val elem = iterator.next
if (!processor.execute(elem, state)) return false
}
val iterator1 = holders.iterator.flatMap(_.declaredElements.iterator)
while (iterator1.hasNext) {
val elem = iterator1.next
if (!processor.execute(elem, state)) return false
}
true
}
}
|
gtache/intellij-lsp
|
intellij-lsp-dotty/src/org/jetbrains/plugins/scala/lang/psi/impl/base/types/ScRefinementImpl.scala
|
Scala
|
apache-2.0
| 1,035
|
package es.uvigo.ei.sing.sds
package controller
import scala.concurrent.Future
import play.api.Play
import play.api.i18n.Messages.Implicits._
import play.api.libs.json.{ Json, JsValue }
import play.api.libs.concurrent.Execution.Implicits.defaultContext
import play.api.mvc._
import entity._
import database._
import util.Page
object ArticlesController extends Controller with Authorization {
import Play.current
import Article.ArticleForm
lazy val articlesDAO = new ArticlesDAO
lazy val annotationsDAO = new AnnotationsDAO
lazy val authorsDAO = new AuthorsDAO
lazy val authoringDAO = new ArticleAuthorsDAO
def list(page: Option[Int], count: Option[Int]): Action[AnyContent] =
Action.async {
articlesDAO.list(page.getOrElse(0), count.getOrElse(50)).map(as => Ok(Json.toJson(as)))
}
def get(id: Article.ID): Action[AnyContent] =
Action.async {
withAnnotatedArticle(id) { article => Ok(Json.toJson(article)) }
}
def add: Action[JsValue] =
AuthorizedAsyncAction(parse.json) { _ => request =>
ArticleForm.bind(request.body).fold(
errors => Future.successful(BadRequest(Json.obj("err" -> errors.errorsAsJson))),
article => articlesDAO.insert(article).map(a => Created(Json.toJson(a)))
)
}
def delete(id: Article.ID): Action[AnyContent] =
AuthorizedAsyncAction(parse.anyContent) { _ => _ =>
articlesDAO.delete(id).map(_ => NoContent)
}
private def withAnnotatedArticle(id: Article.ID)(f: AnnotatedArticle => Result): Future[Result] = {
val aa = annotationsDAO.getAnnotatedArticle(id).flatMap(
aa => aa.fold(articlesDAO.get(id).flatMap(toAnnotatedArticle))(_ ⇒ Future.successful(aa))
)
aa.map(_.fold(NotFound(Json.obj("err" -> "Article not found")))(f))
}
// FIXME: ugly & unsafe gets
private def toAnnotatedArticle(a: Option[Article]): Future[Option[AnnotatedArticle]] = {
val article = a.get
val authors: Future[Seq[Option[(Author, Int)]]] = authoringDAO.getByArticle(article.id.get) flatMap {
aas => Future.sequence(aas map {
case (_, id, pos) => authorsDAO.get(id).map(_.map((_, pos)))
})
}
authors.map(_.filter(_.isDefined).map(_.get).toList.sortBy(_._2).map(_._1)) map {
as => a.map(art => AnnotatedArticle(art, as, Set.empty, Set.empty))
}
}
}
|
agjacome/smart-drug-search
|
src/main/scala/controller/ArticlesController.scala
|
Scala
|
mit
| 2,344
|
package mypipe.kafka.consumer
import scala.concurrent.duration._
import scala.concurrent.{Await, Future}
class GenericConsoleConsumer(topic: String, zkConnect: String, groupId: String) {
val timeout = 10.seconds
var future: Option[Future[Unit]] = None
val kafkaConsumer = new KafkaGenericMutationAvroConsumer(
topic = topic,
zkConnect = zkConnect,
groupId = groupId
)(
insertCallback = { insertMutation ⇒
println(insertMutation)
true
},
updateCallback = { updateMutation ⇒
println(updateMutation)
true
},
deleteCallback = { deleteMutation ⇒
println(deleteMutation)
true
}
) {
}
def start(): Unit = {
future = Some(kafkaConsumer.start)
}
def stop(): Unit = {
kafkaConsumer.stop()
future.foreach(Await.result(_, timeout))
}
}
|
mardambey/mypipe
|
mypipe-kafka/src/main/scala/mypipe/kafka/consumer/GenericConsoleConsumer.scala
|
Scala
|
apache-2.0
| 826
|
// Copyright 2014 Commonwealth Bank of Australia
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package au.com.cba.omnia.maestro.core
package scalding
import cascading.flow.FlowDef
import cascading.pipe.Pipe
import com.twitter.scalding._, typed.TypedSink, Dsl._, TDsl._
import scalaz._, Scalaz._
object Errors extends FieldConversions {
/*
* This is totally unacceptable but is the only mechanism by which we have
* been able to construct error handling for validation on sources that can
* be written to file and doesn't involve doing multiple passes on the
* source data.
*
* It is also _dangerous_ in that you can not add multiple traps. If you
* do, you will receive a _runtime_ error (please do not swallow this,
* fix your program so it only has one trap).
*
* The process is basically that we give up on types and hack away:
* - Convert back to a regular pipe.
* - Add a trap on the regular pipe, for routing all failures to an error source.
* - Convert back to a typed pipe.
* - Go into untyped land by stripping off the "left" data constructor (this
* means we end up with a TypedPipe[Any] really that will either have raw
* failures, or success in a "right" data constructor. We do this so when
* errors are written out they are raw, and don't have the \\/-() constructor.
* - Make the conversion strict via a call to fork.
* - Force errors into error pipe, by:
* - Doing a runtime match on rights, and returning the raw value.
* - For all other cases (i.e. our errors), throw an exception to trigger
* the trap. This should neatly write out the error because of our
* previous hack to strip off the left constructor.
*/
def handle[A](p: TypedPipe[String \\/ A], errors: Source with TypedSink[Any]): TypedPipe[A] =
p
.flatMap({
case -\\/(error) => List(error)
case v @ \\/-(value) => List(v)
})
.fork
.addTrap(errors)
.map({
case \\/-(value) => value.asInstanceOf[A]
case v => sys.error("trap: The real error was: " + v.toString)
})
def safely[A](path: String)(p: TypedPipe[String \\/ A]): TypedPipe[A] =
handle(p, TypedPsv(path))
}
|
toddmowen/maestro
|
maestro-core/src/main/scala/au/com/cba/omnia/maestro/core/scalding/Errors.scala
|
Scala
|
apache-2.0
| 2,783
|
package com.lynbrookrobotics.potassium.commons.drivetrain.offloaded
import com.lynbrookrobotics.potassium.Signal
import com.lynbrookrobotics.potassium.commons.drivetrain.twoSided.{TwoSided, TwoSidedDrive}
import com.lynbrookrobotics.potassium.control.offload.EscConfig._
import com.lynbrookrobotics.potassium.control.offload.OffloadedSignal
import com.lynbrookrobotics.potassium.control.offload.OffloadedSignal.{OpenLoop, PositionPID, VelocityPIDF}
import com.lynbrookrobotics.potassium.streams.Stream
import squants.space.Length
import squants.{Dimensionless, Velocity}
abstract class OffloadedDrive extends TwoSidedDrive {
override type DriveSignal = TwoSided[OffloadedSignal]
override type Properties <: OffloadedDriveProperties
override def velocityControl(
target: Stream[TwoSided[Velocity]]
)(implicit hardware: Hardware, props: Signal[Properties]): Stream[DriveSignal] = target.map {
case TwoSided(left, right) =>
implicit val curProps: Properties = props.get
implicit val c = curProps.escConfig
TwoSided(
VelocityPIDF(
forwardToAngularVelocityGains(curProps.leftVelocityGainsFull),
ticks(left)
),
VelocityPIDF(
forwardToAngularVelocityGains(curProps.rightVelocityGainsFull),
ticks(right)
)
)
}
def positionControl(
target: Stream[TwoSided[Length]]
)(implicit hardware: Hardware, props: Signal[Properties]): Stream[DriveSignal] = target.map {
case TwoSided(left, right) =>
implicit val curProps: Properties = props.get
implicit val c = curProps.escConfig
TwoSided(
PositionPID(
forwardToAngularPositionGains(curProps.forwardPositionGains),
ticks(left)
),
PositionPID(
forwardToAngularPositionGains(curProps.forwardPositionGains),
ticks(right)
)
)
}
override protected def openLoopToDriveSignal(openLoopInput: TwoSided[Dimensionless]): TwoSided[OffloadedSignal] =
TwoSided(OpenLoop(openLoopInput.left), OpenLoop(openLoopInput.right))
}
|
Team846/potassium
|
commons/src/main/scala/com/lynbrookrobotics/potassium/commons/drivetrain/offloaded/OffloadedDrive.scala
|
Scala
|
mit
| 2,077
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.util
import java.sql.{Date, Timestamp}
import java.text.SimpleDateFormat
import java.time.{Instant, LocalDate, LocalDateTime, LocalTime, ZoneId}
import java.util.Locale
import java.util.concurrent.TimeUnit
import org.scalatest.matchers.must.Matchers
import org.scalatest.matchers.should.Matchers._
import org.apache.spark.SparkFunSuite
import org.apache.spark.sql.catalyst.plans.SQLHelper
import org.apache.spark.sql.catalyst.util.DateTimeConstants._
import org.apache.spark.sql.catalyst.util.DateTimeTestUtils._
import org.apache.spark.sql.catalyst.util.DateTimeUtils._
import org.apache.spark.unsafe.types.{CalendarInterval, UTF8String}
class DateTimeUtilsSuite extends SparkFunSuite with Matchers with SQLHelper {
private def defaultZoneId = ZoneId.systemDefault()
test("nanoseconds truncation") {
val tf = TimestampFormatter.getFractionFormatter(ZoneId.systemDefault())
def checkStringToTimestamp(originalTime: String, expectedParsedTime: String): Unit = {
val parsedTimestampOp = DateTimeUtils.stringToTimestamp(
UTF8String.fromString(originalTime), defaultZoneId)
assert(parsedTimestampOp.isDefined, "timestamp with nanoseconds was not parsed correctly")
assert(tf.format(parsedTimestampOp.get) === expectedParsedTime)
}
checkStringToTimestamp("2015-01-02 00:00:00.123456789", "2015-01-02 00:00:00.123456")
checkStringToTimestamp("2015-01-02 00:00:00.100000009", "2015-01-02 00:00:00.1")
checkStringToTimestamp("2015-01-02 00:00:00.000050000", "2015-01-02 00:00:00.00005")
checkStringToTimestamp("2015-01-02 00:00:00.12005", "2015-01-02 00:00:00.12005")
checkStringToTimestamp("2015-01-02 00:00:00.100", "2015-01-02 00:00:00.1")
checkStringToTimestamp("2015-01-02 00:00:00.000456789", "2015-01-02 00:00:00.000456")
checkStringToTimestamp("1950-01-02 00:00:00.000456789", "1950-01-02 00:00:00.000456")
}
test("timestamp and us") {
val now = new Timestamp(System.currentTimeMillis())
now.setNanos(1000)
val ns = fromJavaTimestamp(now)
assert(ns % 1000000L === 1)
assert(toJavaTimestamp(ns) === now)
List(-111111111111L, -1L, 0, 1L, 111111111111L).foreach { t =>
val ts = toJavaTimestamp(t)
assert(fromJavaTimestamp(ts) === t)
assert(toJavaTimestamp(fromJavaTimestamp(ts)) === ts)
}
}
test("us and julian day") {
val (d, ns) = toJulianDay(0)
assert(d === JULIAN_DAY_OF_EPOCH)
assert(ns === 0)
assert(fromJulianDay(d, ns) == 0L)
Seq(Timestamp.valueOf("2015-06-11 10:10:10.100"),
Timestamp.valueOf("2015-06-11 20:10:10.100"),
Timestamp.valueOf("1900-06-11 20:10:10.100")).foreach { t =>
val (d, ns) = toJulianDay(fromJavaTimestamp(t))
assert(ns > 0)
val t1 = toJavaTimestamp(fromJulianDay(d, ns))
assert(t.equals(t1))
}
}
test("SPARK-6785: java date conversion before and after epoch") {
def checkFromToJavaDate(d1: Date): Unit = {
val d2 = toJavaDate(fromJavaDate(d1))
assert(d2.toString === d1.toString)
}
val df1 = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss", Locale.US)
val df2 = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss z", Locale.US)
checkFromToJavaDate(new Date(100))
checkFromToJavaDate(Date.valueOf("1970-01-01"))
checkFromToJavaDate(new Date(df1.parse("1970-01-01 00:00:00").getTime))
checkFromToJavaDate(new Date(df2.parse("1970-01-01 00:00:00 UTC").getTime))
checkFromToJavaDate(new Date(df1.parse("1970-01-01 00:00:01").getTime))
checkFromToJavaDate(new Date(df2.parse("1970-01-01 00:00:01 UTC").getTime))
checkFromToJavaDate(new Date(df1.parse("1969-12-31 23:59:59").getTime))
checkFromToJavaDate(new Date(df2.parse("1969-12-31 23:59:59 UTC").getTime))
checkFromToJavaDate(Date.valueOf("1969-01-01"))
checkFromToJavaDate(new Date(df1.parse("1969-01-01 00:00:00").getTime))
checkFromToJavaDate(new Date(df2.parse("1969-01-01 00:00:00 UTC").getTime))
checkFromToJavaDate(new Date(df1.parse("1969-01-01 00:00:01").getTime))
checkFromToJavaDate(new Date(df2.parse("1969-01-01 00:00:01 UTC").getTime))
checkFromToJavaDate(new Date(df1.parse("1989-11-09 11:59:59").getTime))
checkFromToJavaDate(new Date(df2.parse("1989-11-09 19:59:59 UTC").getTime))
checkFromToJavaDate(new Date(df1.parse("1776-07-04 10:30:00").getTime))
checkFromToJavaDate(new Date(df2.parse("1776-07-04 18:30:00 UTC").getTime))
}
private def toDate(s: String, zoneId: ZoneId = UTC): Option[Int] = {
stringToDate(UTF8String.fromString(s), zoneId)
}
test("string to date") {
assert(toDate("2015-01-28").get === days(2015, 1, 28))
assert(toDate("2015").get === days(2015, 1, 1))
assert(toDate("0001").get === days(1, 1, 1))
assert(toDate("2015-03").get === days(2015, 3, 1))
Seq("2015-03-18", "2015-03-18 ", " 2015-03-18", " 2015-03-18 ", "2015-03-18 123142",
"2015-03-18T123123", "2015-03-18T").foreach { s =>
assert(toDate(s).get === days(2015, 3, 18))
}
assert(toDate("2015-03-18X").isEmpty)
assert(toDate("2015/03/18").isEmpty)
assert(toDate("2015.03.18").isEmpty)
assert(toDate("20150318").isEmpty)
assert(toDate("2015-031-8").isEmpty)
assert(toDate("02015-03-18").isEmpty)
assert(toDate("015-03-18").isEmpty)
assert(toDate("015").isEmpty)
assert(toDate("02015").isEmpty)
assert(toDate("1999 08 01").isEmpty)
assert(toDate("1999-08 01").isEmpty)
assert(toDate("1999 08").isEmpty)
}
private def toTimestamp(str: String, zoneId: ZoneId): Option[Long] = {
stringToTimestamp(UTF8String.fromString(str), zoneId)
}
test("string to timestamp") {
for (zid <- ALL_TIMEZONES) {
def checkStringToTimestamp(str: String, expected: Option[Long]): Unit = {
assert(toTimestamp(str, zid) === expected)
}
checkStringToTimestamp("1969-12-31 16:00:00", Option(date(1969, 12, 31, 16, zid = zid)))
checkStringToTimestamp("0001", Option(date(1, 1, 1, 0, zid = zid)))
checkStringToTimestamp("2015-03", Option(date(2015, 3, 1, zid = zid)))
Seq("2015-03-18", "2015-03-18 ", " 2015-03-18", " 2015-03-18 ", "2015-03-18T").foreach { s =>
checkStringToTimestamp(s, Option(date(2015, 3, 18, zid = zid)))
}
var expected = Option(date(2015, 3, 18, 12, 3, 17, zid = zid))
checkStringToTimestamp("2015-03-18 12:03:17", expected)
checkStringToTimestamp("2015-03-18T12:03:17", expected)
// If the string value includes timezone string, it represents the timestamp string
// in the timezone regardless of the tz parameter.
var zoneId = getZoneId("-13:53")
expected = Option(date(2015, 3, 18, 12, 3, 17, zid = zoneId))
checkStringToTimestamp("2015-03-18T12:03:17-13:53", expected)
checkStringToTimestamp("2015-03-18T12:03:17GMT-13:53", expected)
expected = Option(date(2015, 3, 18, 12, 3, 17, zid = UTC))
checkStringToTimestamp("2015-03-18T12:03:17Z", expected)
checkStringToTimestamp("2015-03-18 12:03:17Z", expected)
checkStringToTimestamp("2015-03-18 12:03:17UTC", expected)
zoneId = getZoneId("-01:00")
expected = Option(date(2015, 3, 18, 12, 3, 17, zid = zoneId))
checkStringToTimestamp("2015-03-18T12:03:17-1:0", expected)
checkStringToTimestamp("2015-03-18T12:03:17-01:00", expected)
checkStringToTimestamp("2015-03-18T12:03:17GMT-01:00", expected)
zoneId = getZoneId("+07:30")
expected = Option(date(2015, 3, 18, 12, 3, 17, zid = zoneId))
checkStringToTimestamp("2015-03-18T12:03:17+07:30", expected)
checkStringToTimestamp("2015-03-18T12:03:17 GMT+07:30", expected)
zoneId = getZoneId("+07:03")
expected = Option(date(2015, 3, 18, 12, 3, 17, zid = zoneId))
checkStringToTimestamp("2015-03-18T12:03:17+07:03", expected)
checkStringToTimestamp("2015-03-18T12:03:17GMT+07:03", expected)
// tests for the string including milliseconds.
expected = Option(date(2015, 3, 18, 12, 3, 17, 123000, zid = zid))
checkStringToTimestamp("2015-03-18 12:03:17.123", expected)
checkStringToTimestamp("2015-03-18T12:03:17.123", expected)
// If the string value includes timezone string, it represents the timestamp string
// in the timezone regardless of the tz parameter.
expected = Option(date(2015, 3, 18, 12, 3, 17, 456000, zid = UTC))
checkStringToTimestamp("2015-03-18T12:03:17.456Z", expected)
checkStringToTimestamp("2015-03-18 12:03:17.456Z", expected)
checkStringToTimestamp("2015-03-18 12:03:17.456 UTC", expected)
zoneId = getZoneId("-01:00")
expected = Option(date(2015, 3, 18, 12, 3, 17, 123000, zid = zoneId))
checkStringToTimestamp("2015-03-18T12:03:17.123-1:0", expected)
checkStringToTimestamp("2015-03-18T12:03:17.123-01:00", expected)
checkStringToTimestamp("2015-03-18T12:03:17.123 GMT-01:00", expected)
zoneId = getZoneId("+07:30")
expected = Option(date(2015, 3, 18, 12, 3, 17, 123000, zid = zoneId))
checkStringToTimestamp("2015-03-18T12:03:17.123+07:30", expected)
checkStringToTimestamp("2015-03-18T12:03:17.123 GMT+07:30", expected)
zoneId = getZoneId("+07:30")
expected = Option(date(2015, 3, 18, 12, 3, 17, 123000, zid = zoneId))
checkStringToTimestamp("2015-03-18T12:03:17.123+07:30", expected)
checkStringToTimestamp("2015-03-18T12:03:17.123GMT+07:30", expected)
expected = Option(date(2015, 3, 18, 12, 3, 17, 123121, zid = zoneId))
checkStringToTimestamp("2015-03-18T12:03:17.123121+7:30", expected)
checkStringToTimestamp("2015-03-18T12:03:17.123121 GMT+0730", expected)
zoneId = getZoneId("+07:30")
expected = Option(date(2015, 3, 18, 12, 3, 17, 123120, zid = zoneId))
checkStringToTimestamp("2015-03-18T12:03:17.12312+7:30", expected)
checkStringToTimestamp("2015-03-18T12:03:17.12312 UT+07:30", expected)
expected = Option(time(18, 12, 15, zid = zid))
checkStringToTimestamp("18:12:15", expected)
zoneId = getZoneId("+07:30")
expected = Option(time(18, 12, 15, 123120, zid = zoneId))
checkStringToTimestamp("T18:12:15.12312+7:30", expected)
checkStringToTimestamp("T18:12:15.12312 UTC+07:30", expected)
zoneId = getZoneId("+07:30")
expected = Option(time(18, 12, 15, 123120, zid = zoneId))
checkStringToTimestamp("18:12:15.12312+7:30", expected)
checkStringToTimestamp("18:12:15.12312 GMT+07:30", expected)
expected = Option(date(2011, 5, 6, 7, 8, 9, 100000, zid = zid))
checkStringToTimestamp("2011-05-06 07:08:09.1000", expected)
checkStringToTimestamp("238", None)
checkStringToTimestamp("00238", None)
checkStringToTimestamp("2015-03-18 123142", None)
checkStringToTimestamp("2015-03-18T123123", None)
checkStringToTimestamp("2015-03-18X", None)
checkStringToTimestamp("2015/03/18", None)
checkStringToTimestamp("2015.03.18", None)
checkStringToTimestamp("20150318", None)
checkStringToTimestamp("2015-031-8", None)
checkStringToTimestamp("02015-01-18", None)
checkStringToTimestamp("015-01-18", None)
checkStringToTimestamp("2015-03-18T12:03.17-20:0", None)
checkStringToTimestamp("2015-03-18T12:03.17-0:70", None)
checkStringToTimestamp("2015-03-18T12:03.17-1:0:0", None)
checkStringToTimestamp("1999 08 01", None)
checkStringToTimestamp("1999-08 01", None)
checkStringToTimestamp("1999 08", None)
// Truncating the fractional seconds
expected = Option(date(2015, 3, 18, 12, 3, 17, 123456, zid = UTC))
checkStringToTimestamp("2015-03-18T12:03:17.123456789+0:00", expected)
checkStringToTimestamp("2015-03-18T12:03:17.123456789 UTC+0", expected)
checkStringToTimestamp("2015-03-18T12:03:17.123456789GMT+00:00", expected)
zoneId = getZoneId("Europe/Moscow")
expected = Option(date(2015, 3, 18, 12, 3, 17, 123456, zid = zoneId))
checkStringToTimestamp("2015-03-18T12:03:17.123456 Europe/Moscow", expected)
}
}
test("SPARK-15379: special invalid date string") {
// Test stringToDate
assert(toDate("2015-02-29 00:00:00").isEmpty)
assert(toDate("2015-04-31 00:00:00").isEmpty)
assert(toDate("2015-02-29").isEmpty)
assert(toDate("2015-04-31").isEmpty)
// Test stringToTimestamp
assert(stringToTimestamp(
UTF8String.fromString("2015-02-29 00:00:00"), defaultZoneId).isEmpty)
assert(stringToTimestamp(
UTF8String.fromString("2015-04-31 00:00:00"), defaultZoneId).isEmpty)
assert(toTimestamp("2015-02-29", defaultZoneId).isEmpty)
assert(toTimestamp("2015-04-31", defaultZoneId).isEmpty)
}
test("hours") {
var input = date(2015, 3, 18, 13, 2, 11, 0, LA)
assert(getHours(input, LA) === 13)
assert(getHours(input, UTC) === 20)
input = date(2015, 12, 8, 2, 7, 9, 0, LA)
assert(getHours(input, LA) === 2)
assert(getHours(input, UTC) === 10)
input = date(10, 1, 1, 0, 0, 0, 0, LA)
assert(getHours(input, LA) === 0)
}
test("minutes") {
var input = date(2015, 3, 18, 13, 2, 11, 0, LA)
assert(getMinutes(input, LA) === 2)
assert(getMinutes(input, UTC) === 2)
assert(getMinutes(input, getZoneId("Australia/North")) === 32)
input = date(2015, 3, 8, 2, 7, 9, 0, LA)
assert(getMinutes(input, LA) === 7)
assert(getMinutes(input, UTC) === 7)
assert(getMinutes(input, getZoneId("Australia/North")) === 37)
input = date(10, 1, 1, 0, 0, 0, 0, LA)
assert(getMinutes(input, LA) === 0)
}
test("seconds") {
var input = date(2015, 3, 18, 13, 2, 11, 0, LA)
assert(getSeconds(input, LA) === 11)
assert(getSeconds(input, UTC) === 11)
input = date(2015, 3, 8, 2, 7, 9, 0, LA)
assert(getSeconds(input, LA) === 9)
assert(getSeconds(input, UTC) === 9)
input = date(10, 1, 1, 0, 0, 0, 0, LA)
assert(getSeconds(input, LA) === 0)
}
test("hours / minutes / seconds") {
Seq(Timestamp.valueOf("2015-06-11 10:12:35.789"),
Timestamp.valueOf("2015-06-11 20:13:40.789"),
Timestamp.valueOf("1900-06-11 12:14:50.789"),
Timestamp.valueOf("1700-02-28 12:14:50.123456")).foreach { t =>
val us = fromJavaTimestamp(t)
assert(toJavaTimestamp(us) === t)
}
}
test("get day in year") {
assert(getDayInYear(days(2015, 3, 18)) === 77)
assert(getDayInYear(days(2012, 3, 18)) === 78)
}
test("day of year calculations for old years") {
assert(getDayInYear(days(1582, 3)) === 60)
(1000 to 1600 by 10).foreach { year =>
// January 1 is the 1st day of year.
assert(getYear(days(year)) === year)
assert(getMonth(days(year, 1)) === 1)
assert(getDayInYear(days(year, 1, 1)) === 1)
// December 31 is the 1st day of year.
val date = days(year, 12, 31)
assert(getYear(date) === year)
assert(getMonth(date) === 12)
assert(getDayOfMonth(date) === 31)
}
}
test("get year") {
assert(getYear(days(2015, 2, 18)) === 2015)
assert(getYear(days(2012, 2, 18)) === 2012)
}
test("get quarter") {
assert(getQuarter(days(2015, 2, 18)) === 1)
assert(getQuarter(days(2012, 11, 18)) === 4)
}
test("get month") {
assert(getMonth(days(2015, 3, 18)) === 3)
assert(getMonth(days(2012, 12, 18)) === 12)
}
test("get day of month") {
assert(getDayOfMonth(days(2015, 3, 18)) === 18)
assert(getDayOfMonth(days(2012, 12, 24)) === 24)
}
test("date add months") {
val input = days(1997, 2, 28)
assert(dateAddMonths(input, 36) === days(2000, 2, 28))
assert(dateAddMonths(input, -13) === days(1996, 1, 28))
}
test("date add interval with day precision") {
val input = days(1997, 2, 28)
assert(dateAddInterval(input, new CalendarInterval(36, 0, 0)) === days(2000, 2, 28))
assert(dateAddInterval(input, new CalendarInterval(36, 47, 0)) === days(2000, 4, 15))
assert(dateAddInterval(input, new CalendarInterval(-13, 0, 0)) === days(1996, 1, 28))
intercept[IllegalArgumentException](dateAddInterval(input, new CalendarInterval(36, 47, 1)))
}
test("timestamp add months") {
val ts1 = date(1997, 2, 28, 10, 30, 0)
val ts2 = date(2000, 2, 28, 10, 30, 0, 123000)
assert(timestampAddInterval(ts1, 36, 0, 123000, defaultZoneId) === ts2)
val ts3 = date(1997, 2, 27, 16, 0, 0, 0, LA)
val ts4 = date(2000, 2, 27, 16, 0, 0, 123000, LA)
val ts5 = date(2000, 2, 28, 0, 0, 0, 123000, UTC)
assert(timestampAddInterval(ts3, 36, 0, 123000, LA) === ts4)
assert(timestampAddInterval(ts3, 36, 0, 123000, UTC) === ts5)
}
test("timestamp add days") {
// 2019-3-9 is the end of Pacific Standard Time
val ts1 = date(2019, 3, 9, 12, 0, 0, 123000, LA)
// 2019-3-10 is the start of Pacific Daylight Time
val ts2 = date(2019, 3, 10, 12, 0, 0, 123000, LA)
val ts3 = date(2019, 5, 9, 12, 0, 0, 123000, LA)
val ts4 = date(2019, 5, 10, 12, 0, 0, 123000, LA)
// 2019-11-2 is the end of Pacific Daylight Time
val ts5 = date(2019, 11, 2, 12, 0, 0, 123000, LA)
// 2019-11-3 is the start of Pacific Standard Time
val ts6 = date(2019, 11, 3, 12, 0, 0, 123000, LA)
// transit from Pacific Standard Time to Pacific Daylight Time
assert(timestampAddInterval(
ts1, 0, 0, 23 * MICROS_PER_HOUR, LA) === ts2)
assert(timestampAddInterval(ts1, 0, 1, 0, LA) === ts2)
// just a normal day
assert(timestampAddInterval(
ts3, 0, 0, 24 * MICROS_PER_HOUR, LA) === ts4)
assert(timestampAddInterval(ts3, 0, 1, 0, LA) === ts4)
// transit from Pacific Daylight Time to Pacific Standard Time
assert(timestampAddInterval(
ts5, 0, 0, 25 * MICROS_PER_HOUR, LA) === ts6)
assert(timestampAddInterval(ts5, 0, 1, 0, LA) === ts6)
}
test("monthsBetween") {
val date1 = date(1997, 2, 28, 10, 30, 0)
var date2 = date(1996, 10, 30)
assert(monthsBetween(date1, date2, true, UTC) === 3.94959677)
assert(monthsBetween(date1, date2, false, UTC) === 3.9495967741935485)
Seq(true, false).foreach { roundOff =>
date2 = date(2000, 2, 28)
assert(monthsBetween(date1, date2, roundOff, UTC) === -36)
date2 = date(2000, 2, 29)
assert(monthsBetween(date1, date2, roundOff, UTC) === -36)
date2 = date(1996, 3, 31)
assert(monthsBetween(date1, date2, roundOff, UTC) === 11)
}
val date3 = date(2000, 2, 28, 16, zid = LA)
val date4 = date(1997, 2, 28, 16, zid = LA)
assert(monthsBetween(date3, date4, true, LA) === 36.0)
assert(monthsBetween(date3, date4, true, UTC) === 35.90322581)
assert(monthsBetween(date3, date4, false, UTC) === 35.903225806451616)
}
test("from UTC timestamp") {
def test(utc: String, tz: String, expected: String): Unit = {
assert(toJavaTimestamp(fromUTCTime(fromJavaTimestamp(Timestamp.valueOf(utc)), tz)).toString
=== expected)
}
for (tz <- ALL_TIMEZONES) {
withDefaultTimeZone(tz) {
test("2011-12-25 09:00:00.123456", "UTC", "2011-12-25 09:00:00.123456")
test("2011-12-25 09:00:00.123456", JST.getId, "2011-12-25 18:00:00.123456")
test("2011-12-25 09:00:00.123456", LA.getId, "2011-12-25 01:00:00.123456")
test("2011-12-25 09:00:00.123456", "Asia/Shanghai", "2011-12-25 17:00:00.123456")
}
}
withDefaultTimeZone(LA) {
// Daylight Saving Time
test("2016-03-13 09:59:59.0", LA.getId, "2016-03-13 01:59:59.0")
test("2016-03-13 10:00:00.0", LA.getId, "2016-03-13 03:00:00.0")
test("2016-11-06 08:59:59.0", LA.getId, "2016-11-06 01:59:59.0")
test("2016-11-06 09:00:00.0", LA.getId, "2016-11-06 01:00:00.0")
test("2016-11-06 10:00:00.0", LA.getId, "2016-11-06 02:00:00.0")
}
}
test("to UTC timestamp") {
def test(utc: String, tz: String, expected: String): Unit = {
assert(toJavaTimestamp(toUTCTime(fromJavaTimestamp(Timestamp.valueOf(utc)), tz)).toString
=== expected)
}
for (zid <- ALL_TIMEZONES) {
withDefaultTimeZone(zid) {
test("2011-12-25 09:00:00.123456", "UTC", "2011-12-25 09:00:00.123456")
test("2011-12-25 18:00:00.123456", JST.getId, "2011-12-25 09:00:00.123456")
test("2011-12-25 01:00:00.123456", LA.getId, "2011-12-25 09:00:00.123456")
test("2011-12-25 17:00:00.123456", "Asia/Shanghai", "2011-12-25 09:00:00.123456")
}
}
withDefaultTimeZone(LA) {
val tz = LA.getId
// Daylight Saving Time
test("2016-03-13 01:59:59", tz, "2016-03-13 09:59:59.0")
test("2016-03-13 02:00:00", tz, "2016-03-13 10:00:00.0")
test("2016-03-13 03:00:00", tz, "2016-03-13 10:00:00.0")
test("2016-11-06 00:59:59", tz, "2016-11-06 07:59:59.0")
test("2016-11-06 01:00:00", tz, "2016-11-06 09:00:00.0")
test("2016-11-06 01:59:59", tz, "2016-11-06 09:59:59.0")
test("2016-11-06 02:00:00", tz, "2016-11-06 10:00:00.0")
}
}
test("trailing characters while converting string to timestamp") {
val s = UTF8String.fromString("2019-10-31T10:59:23Z:::")
val time = DateTimeUtils.stringToTimestamp(s, defaultZoneId)
assert(time == None)
}
test("truncTimestamp") {
def testTrunc(
level: Int,
expected: String,
inputTS: Long,
zoneId: ZoneId = defaultZoneId): Unit = {
val truncated =
DateTimeUtils.truncTimestamp(inputTS, level, zoneId)
val expectedTS = toTimestamp(expected, defaultZoneId)
assert(truncated === expectedTS.get)
}
val defaultInputTS = DateTimeUtils.stringToTimestamp(
UTF8String.fromString("2015-03-05T09:32:05.359123"), defaultZoneId)
val defaultInputTS1 = DateTimeUtils.stringToTimestamp(
UTF8String.fromString("2015-03-31T20:32:05.359"), defaultZoneId)
val defaultInputTS2 = DateTimeUtils.stringToTimestamp(
UTF8String.fromString("2015-04-01T02:32:05.359"), defaultZoneId)
val defaultInputTS3 = DateTimeUtils.stringToTimestamp(
UTF8String.fromString("2015-03-30T02:32:05.359"), defaultZoneId)
val defaultInputTS4 = DateTimeUtils.stringToTimestamp(
UTF8String.fromString("2015-03-29T02:32:05.359"), defaultZoneId)
testTrunc(DateTimeUtils.TRUNC_TO_YEAR, "2015-01-01T00:00:00", defaultInputTS.get)
testTrunc(DateTimeUtils.TRUNC_TO_MONTH, "2015-03-01T00:00:00", defaultInputTS.get)
testTrunc(DateTimeUtils.TRUNC_TO_DAY, "2015-03-05T00:00:00", defaultInputTS.get)
testTrunc(DateTimeUtils.TRUNC_TO_HOUR, "2015-03-05T09:00:00", defaultInputTS.get)
testTrunc(DateTimeUtils.TRUNC_TO_MINUTE, "2015-03-05T09:32:00", defaultInputTS.get)
testTrunc(DateTimeUtils.TRUNC_TO_SECOND, "2015-03-05T09:32:05", defaultInputTS.get)
testTrunc(DateTimeUtils.TRUNC_TO_WEEK, "2015-03-02T00:00:00", defaultInputTS.get)
testTrunc(DateTimeUtils.TRUNC_TO_WEEK, "2015-03-30T00:00:00", defaultInputTS1.get)
testTrunc(DateTimeUtils.TRUNC_TO_WEEK, "2015-03-30T00:00:00", defaultInputTS2.get)
testTrunc(DateTimeUtils.TRUNC_TO_WEEK, "2015-03-30T00:00:00", defaultInputTS3.get)
testTrunc(DateTimeUtils.TRUNC_TO_WEEK, "2015-03-23T00:00:00", defaultInputTS4.get)
testTrunc(DateTimeUtils.TRUNC_TO_QUARTER, "2015-01-01T00:00:00", defaultInputTS.get)
testTrunc(DateTimeUtils.TRUNC_TO_QUARTER, "2015-01-01T00:00:00", defaultInputTS1.get)
testTrunc(DateTimeUtils.TRUNC_TO_QUARTER, "2015-04-01T00:00:00", defaultInputTS2.get)
testTrunc(DateTimeUtils.TRUNC_TO_MICROSECOND, "2015-03-05T09:32:05.359123", defaultInputTS.get)
testTrunc(DateTimeUtils.TRUNC_TO_MILLISECOND, "2015-03-05T09:32:05.359", defaultInputTS.get)
for (zid <- ALL_TIMEZONES) {
withDefaultTimeZone(zid) {
val inputTS = DateTimeUtils.stringToTimestamp(
UTF8String.fromString("2015-03-05T09:32:05.359"), defaultZoneId)
val inputTS1 = DateTimeUtils.stringToTimestamp(
UTF8String.fromString("2015-03-31T20:32:05.359"), defaultZoneId)
val inputTS2 = DateTimeUtils.stringToTimestamp(
UTF8String.fromString("2015-04-01T02:32:05.359"), defaultZoneId)
val inputTS3 = DateTimeUtils.stringToTimestamp(
UTF8String.fromString("2015-03-30T02:32:05.359"), defaultZoneId)
val inputTS4 = DateTimeUtils.stringToTimestamp(
UTF8String.fromString("2015-03-29T02:32:05.359"), defaultZoneId)
val inputTS5 = DateTimeUtils.stringToTimestamp(
UTF8String.fromString("1999-03-29T01:02:03.456789"), defaultZoneId)
testTrunc(DateTimeUtils.TRUNC_TO_YEAR, "2015-01-01T00:00:00", inputTS.get, zid)
testTrunc(DateTimeUtils.TRUNC_TO_MONTH, "2015-03-01T00:00:00", inputTS.get, zid)
testTrunc(DateTimeUtils.TRUNC_TO_DAY, "2015-03-05T00:00:00", inputTS.get, zid)
testTrunc(DateTimeUtils.TRUNC_TO_HOUR, "2015-03-05T09:00:00", inputTS.get, zid)
testTrunc(DateTimeUtils.TRUNC_TO_MINUTE, "2015-03-05T09:32:00", inputTS.get, zid)
testTrunc(DateTimeUtils.TRUNC_TO_SECOND, "2015-03-05T09:32:05", inputTS.get, zid)
testTrunc(DateTimeUtils.TRUNC_TO_WEEK, "2015-03-02T00:00:00", inputTS.get, zid)
testTrunc(DateTimeUtils.TRUNC_TO_WEEK, "2015-03-30T00:00:00", inputTS1.get, zid)
testTrunc(DateTimeUtils.TRUNC_TO_WEEK, "2015-03-30T00:00:00", inputTS2.get, zid)
testTrunc(DateTimeUtils.TRUNC_TO_WEEK, "2015-03-30T00:00:00", inputTS3.get, zid)
testTrunc(DateTimeUtils.TRUNC_TO_WEEK, "2015-03-23T00:00:00", inputTS4.get, zid)
testTrunc(DateTimeUtils.TRUNC_TO_QUARTER, "2015-01-01T00:00:00", inputTS.get, zid)
testTrunc(DateTimeUtils.TRUNC_TO_QUARTER, "2015-01-01T00:00:00", inputTS1.get, zid)
testTrunc(DateTimeUtils.TRUNC_TO_QUARTER, "2015-04-01T00:00:00", inputTS2.get, zid)
}
}
}
test("daysToMicros and microsToDays") {
val input = date(2015, 12, 31, 16, zid = LA)
assert(microsToDays(input, LA) === 16800)
assert(microsToDays(input, UTC) === 16801)
assert(microsToDays(-1 * MILLIS_PER_DAY + 1, UTC) == -1)
var expected = date(2015, 12, 31, zid = LA)
assert(daysToMicros(16800, LA) === expected)
expected = date(2015, 12, 31, zid = UTC)
assert(daysToMicros(16800, UTC) === expected)
// There are some days are skipped entirely in some timezone, skip them here.
val skipped_days = Map[String, Set[Int]](
"Kwajalein" -> Set(8632, 8633, 8634),
"Pacific/Apia" -> Set(15338),
"Pacific/Enderbury" -> Set(9130, 9131),
"Pacific/Fakaofo" -> Set(15338),
"Pacific/Kiritimati" -> Set(9130, 9131),
"Pacific/Kwajalein" -> Set(8632, 8633, 8634),
MIT.getId -> Set(15338))
for (zid <- ALL_TIMEZONES) {
val skipped = skipped_days.getOrElse(zid.getId, Set.empty)
val testingData = Seq(-20000, 20000) ++
(1 to 1000).map(_ => (math.random() * 40000 - 20000).toInt)
testingData.foreach { d =>
if (!skipped.contains(d)) {
assert(microsToDays(daysToMicros(d, zid), zid) === d,
s"Round trip of $d did not work in tz ${zid.getId}")
}
}
}
}
test("microsToMillis") {
assert(DateTimeUtils.microsToMillis(-9223372036844776001L) === -9223372036844777L)
assert(DateTimeUtils.microsToMillis(-157700927876544L) === -157700927877L)
}
test("special timestamp values") {
testSpecialDatetimeValues { zoneId =>
val tolerance = TimeUnit.SECONDS.toMicros(30)
assert(toTimestamp("Epoch", zoneId).get === 0)
val now = instantToMicros(Instant.now())
toTimestamp("NOW", zoneId).get should be(now +- tolerance)
assert(toTimestamp("now UTC", zoneId) === None)
val localToday = LocalDateTime.now(zoneId)
.`with`(LocalTime.MIDNIGHT)
.atZone(zoneId)
val yesterday = instantToMicros(localToday.minusDays(1).toInstant)
toTimestamp(" Yesterday", zoneId).get should be(yesterday +- tolerance)
val today = instantToMicros(localToday.toInstant)
toTimestamp("Today ", zoneId).get should be(today +- tolerance)
val tomorrow = instantToMicros(localToday.plusDays(1).toInstant)
toTimestamp(" tomorrow CET ", zoneId).get should be(tomorrow +- tolerance)
}
}
test("special date values") {
testSpecialDatetimeValues { zoneId =>
assert(toDate("epoch", zoneId).get === 0)
val today = localDateToDays(LocalDate.now(zoneId))
assert(toDate("YESTERDAY", zoneId).get === today - 1)
assert(toDate(" Now ", zoneId).get === today)
assert(toDate("now UTC", zoneId) === None) // "now" does not accept time zones
assert(toDate("today", zoneId).get === today)
assert(toDate("tomorrow CET ", zoneId).get === today + 1)
}
}
}
|
dbtsai/spark
|
sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/util/DateTimeUtilsSuite.scala
|
Scala
|
apache-2.0
| 29,392
|
// Copyright (C) 2019 MapRoulette contributors (see CONTRIBUTORS.md).
// Licensed under the Apache License, Version 2.0 (see LICENSE).
package org.maproulette.provider.websockets
import org.joda.time.DateTime
import play.api.libs.json._
import play.api.libs.json.JodaWrites._
import play.api.libs.json.JodaReads._
import org.maproulette.models.Task
import org.maproulette.models.TaskWithReview
import org.maproulette.session.User
/**
* Defines case classes representing the various kinds of messages to be
* transmitted via websocket, as well as helper methods for easily and
* correctly constructing each kind of message
*
* @author nrotstan
*/
object WebSocketMessages {
sealed trait Message {
def messageType: String
}
// Client messages and data representations
case class ClientMessage(messageType: String, meta: Option[JsValue], data: Option[JsValue])
extends Message
case class SubscriptionData(subscriptionName: String)
case class PingMessage(messageType: String)
// Server-generated messages and data representations
case class ServerMeta(subscriptionName: Option[String], created: DateTime = DateTime.now())
sealed trait ServerMessage extends Message {
val meta: ServerMeta
}
case class PongMessage(messageType: String, meta: ServerMeta) extends ServerMessage
case class UserSummary(userId: Long, displayName: String, avatarURL: String)
case class NotificationData(userId: Long, notificationType: Int)
case class NotificationMessage(messageType: String, data: NotificationData, meta: ServerMeta)
extends ServerMessage
case class ReviewData(taskWithReview: TaskWithReview)
case class ReviewMessage(messageType: String, data: ReviewData, meta: ServerMeta)
extends ServerMessage
case class TaskAction(task: Task, byUser: Option[UserSummary])
case class TaskMessage(messageType: String, data: TaskAction, meta: ServerMeta)
extends ServerMessage
// Public helper methods for creation of individual messages and data objects
def pong(): PongMessage = PongMessage("pong", ServerMeta(None))
def notificationNew(data: NotificationData): NotificationMessage =
createNotificationMessage("notification-new", data)
def reviewNew(data: ReviewData): ReviewMessage = createReviewMessage("review-new", data)
def reviewClaimed(data: ReviewData): ReviewMessage = createReviewMessage("review-claimed", data)
def reviewUpdate(data: ReviewData): ReviewMessage = createReviewMessage("review-update", data)
def taskClaimed(taskData: Task, userData: Option[UserSummary]): List[ServerMessage] =
createTaskMessage("task-claimed", taskData, userData)
def taskReleased(taskData: Task, userData: Option[UserSummary]): List[ServerMessage] =
createTaskMessage("task-released", taskData, userData)
def taskUpdate(taskData: Task, userData: Option[UserSummary]): List[ServerMessage] =
createTaskMessage("task-update", taskData, userData)
def userSummary(user: User): UserSummary =
UserSummary(user.id, user.osmProfile.displayName, user.osmProfile.avatarURL)
// private helper methods
private def createNotificationMessage(
messageType: String,
data: NotificationData
): NotificationMessage = {
NotificationMessage(
messageType,
data,
ServerMeta(Some(WebSocketMessages.SUBSCRIPTION_USER + s"_${data.userId}"))
)
}
private def createReviewMessage(messageType: String, data: ReviewData): ReviewMessage = {
ReviewMessage(messageType, data, ServerMeta(Some(WebSocketMessages.SUBSCRIPTION_REVIEWS)))
}
private def createTaskMessage(
messageType: String,
taskData: Task,
userData: Option[UserSummary]
): List[ServerMessage] = {
val data = TaskAction(taskData, userData)
// Create one message for subscribers to all tasks and one for subscribers
// to just challenge-specific tasks
List[WebSocketMessages.ServerMessage](
TaskMessage(messageType, data, ServerMeta(Some(WebSocketMessages.SUBSCRIPTION_TASKS))),
TaskMessage(
messageType,
data,
ServerMeta(Some(WebSocketMessages.SUBSCRIPTION_CHALLENGE_TASKS + s"_${data.task.parent}"))
)
)
}
private def createChallengeTaskMessage(messageType: String, data: TaskAction): TaskMessage = {
TaskMessage(
messageType,
data,
ServerMeta(Some(WebSocketMessages.SUBSCRIPTION_CHALLENGE_TASKS + s"_${data.task.parent}"))
)
}
// Reads for client messages
implicit val clientMessageReads: Reads[ClientMessage] = Json.reads[ClientMessage]
implicit val subscriptionDataReads: Reads[SubscriptionData] = Json.reads[SubscriptionData]
implicit val pingMessageReads: Reads[PingMessage] = Json.reads[PingMessage]
// Writes for server-generated messages
implicit val serverMetaWrites: Writes[ServerMeta] = Json.writes[ServerMeta]
implicit val pongMessageWrites: Writes[PongMessage] = Json.writes[PongMessage]
implicit val UserSummaryWrites: Writes[UserSummary] = Json.writes[UserSummary]
implicit val notificationDataWrites: Writes[NotificationData] = Json.writes[NotificationData]
implicit val notificationMessageWrites: Writes[NotificationMessage] =
Json.writes[NotificationMessage]
implicit val reviewDataWrites: Writes[ReviewData] = Json.writes[ReviewData]
implicit val reviewMessageWrites: Writes[ReviewMessage] = Json.writes[ReviewMessage]
implicit val taskActionWrites: Writes[TaskAction] = Json.writes[TaskAction]
implicit val taskMessageWrites: Writes[TaskMessage] = Json.writes[TaskMessage]
// Available subscription types
val SUBSCRIPTION_USER = "user" // expected to be accompanied by user id
val SUBSCRIPTION_USERS = "users"
val SUBSCRIPTION_REVIEWS = "reviews"
val SUBSCRIPTION_TASKS = "tasks"
val SUBSCRIPTION_CHALLENGE_TASKS = "challengeTasks" // expected to be accompanied by challenge id
}
|
Crashfreak/maproulette2
|
app/org/maproulette/provider/websockets/WebSocketMessages.scala
|
Scala
|
apache-2.0
| 5,969
|
package dpla.ingestion3.harvesters.pss
import java.net.URL
import dpla.ingestion3.utils.HttpUtils
import org.apache.commons.io.IOUtils
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SQLContext
import scala.util.{Failure, Success}
/*
* This class handles requests to the PSS feed.
* It partitions data at strategic points.
*/
class PssResponseBuilder (@transient val sqlContext: SQLContext)
extends Serializable {
// Get all sets.
def getSets(endpoint: String): RDD[(String,String)] = {
// The given endpoint returns a list of all set URLs.
val url = new URL(endpoint)
val allSets = getStringResponse(url)
val setEndpoints = PssResponseProcessor.getSetEndpoints(allSets)
val setEndpointsRdd = sqlContext.sparkContext.parallelize(setEndpoints)
setEndpointsRdd.map(setEndpoint => getSet(setEndpoint))
}
// Get a single set.
def getSet(endpoint: String): (String, String) = {
// The given endpoint contains metadata for the source, along with a
// list of URLs for component parts of the set.
val setUrl = new URL(endpoint)
val setId = PssResponseProcessor.getSetId(endpoint)
val set = getStringResponse(setUrl)
val parts = getParts(set)
val setWithParts = PssResponseProcessor.combineSetAndParts(set, parts)
(setId, setWithParts)
}
// Get all component parts of a set (ie. sources and teaching guides).
def getParts(set: String): List[String] = {
// The given endpoints contain metadata for component parts of a set.
val endpoints = PssResponseProcessor.getPartEndpoints(set)
endpoints.map(endpoint => {
val url = new URL(endpoint)
getStringResponse(url)
})
}
/**
* Executes the request and returns the response
*
* @param url URL
* PSS request URL
* @return String
* String response
*/
def getStringResponse(url: URL) : String = {
HttpUtils.makeGetRequest(url) match {
case Success(s) => s
// TODO: Handle failed HTTP request.
case Failure(f) => throw f
}
}
}
|
dpla/ingestion3
|
src/main/scala/dpla/ingestion3/harvesters/pss/PssResponseBuilder.scala
|
Scala
|
mit
| 2,065
|
package com.github.vladminzatu.surfer
import org.scalatest.{BeforeAndAfter, Matchers, FunSuite}
/**
* Base class for all test
*/
abstract class TestBase extends FunSuite with Matchers with BeforeAndAfter
|
VladMinzatu/surfer
|
src/test/scala/com/github/vladminzatu/surfer/TestBase.scala
|
Scala
|
mit
| 208
|
/*
* Copyright 2017 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.bforms
import com.google.inject.AbstractModule
import uk.gov.hmrc.play.config.ServicesConfig
import uk.gov.hmrc.bforms.controllers.{SecuredActions, SecuredActionsImpl}
import uk.gov.hmrc.play.frontend.auth.connectors.AuthConnector
class GuiceModule extends AbstractModule with ServicesConfig {
override def configure(): Unit = {
bind(classOf[AuthConnector]).to(classOf[FrontendAuthConnector])
bind(classOf[SecuredActions]).to(classOf[SecuredActionsImpl])
}
}
|
VlachJosef/bforms-frontend
|
app/uk/gov/hmrc/bforms/GuiceModule.scala
|
Scala
|
apache-2.0
| 1,098
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive
import org.apache.spark.sql.SQLContext
import org.apache.spark.sql.catalyst.SqlParser
import org.apache.carbondata.common.logging.LogServiceFactory
/**
* This class contains all carbon hive metadata related utilities
*/
object CarbonHiveMetadataUtil {
@transient
val LOGGER = LogServiceFactory.getLogService(CarbonHiveMetadataUtil.getClass.getName)
/**
* This method invalidates the table from HiveMetastoreCatalog before dropping table
*
* @param databaseName
* @param tableName
* @param sqlContext
*/
def invalidateAndDropTable(databaseName: String,
tableName: String,
sqlContext: SQLContext): Unit = {
val hiveContext = sqlContext.asInstanceOf[HiveContext]
val tableWithDb = databaseName + "." + tableName
val tableIdent = SqlParser.parseTableIdentifier(tableWithDb)
try {
hiveContext.catalog.invalidateTable(tableIdent)
hiveContext.runSqlHive(s"DROP TABLE IF EXISTS $databaseName.$tableName")
} catch {
case e: Exception =>
LOGGER.audit(
s"Error While deleting the table $databaseName.$tableName during drop carbon table" +
e.getMessage)
}
}
}
|
nehabhardwaj01/incubator-carbondata
|
integration/spark/src/main/scala/org/apache/spark/sql/hive/CarbonHiveMetadataUtil.scala
|
Scala
|
apache-2.0
| 2,003
|
package models.daos
import org.scalatest._
import play.api.libs.concurrent.Execution.Implicits.defaultContext
import org.scalatest.concurrent._
import models.daos._
import models._
import slick.driver.PostgresDriver.api._
import scala.concurrent._
import scala.concurrent.duration._
import java.util.UUID
import java.util.Calendar
class AccumulationDAOSpec extends DatabaseSpec with Matchers with OptionValues with BeforeAndAfter {
var dao: AccumulationDAO = null
val mantraId = 1L
val gatheringId = 1L
before {
dao = new AccumulationDAOImpl()
whenReady(db.run(sqlu"delete from accumulations")) { _ =>
cleanInsert("AccumulationDAOSpec")
}
}
after {
}
"Saving a new non-existant Accumulation" should "save and return Accumulation with the primary key" taggedAs (DbTest) in {
val accumulation = Accumulation(None, 2015, 8, 7, 100, mantraId, UUID.randomUUID(), gatheringId)
whenReady(dao.save(accumulation)) { updatedAccumulation =>
val id: Option[Long] = updatedAccumulation.id
id.value should be >= 1L
}
}
"Saving an existing Accumulation" should "save and return Accumulation with the primary key" taggedAs (DbTest) in {
val uid = UUID.randomUUID()
val accumulation = Accumulation(None, 2015, 8, 7, 200, mantraId, uid, gatheringId)
whenReady(dao.save(accumulation)) { updatedAccumulation =>
val id: Option[Long] = updatedAccumulation.id
val idValue = id.value
val accumulation2 = updatedAccumulation.copy(count = 400)
whenReady(dao.save(accumulation2)) { updatedAccumulation2 =>
val id2: Option[Long] = updatedAccumulation2.id
assert(id2.value === idValue)
}
}
}
"Find Accumulation for today" should "throw java.util.NoSuchElementException if no entry exists for today" taggedAs (DbTest) in {
intercept[java.util.NoSuchElementException] {
Await.result(dao.findForToday(UUID.randomUUID(), -1L, 1L), Duration(5000L, MILLISECONDS))
}
}
it should "return Accumulation if exists for today" taggedAs (DbTest) in {
val cal = Calendar.getInstance()
val year = cal.get(Calendar.YEAR)
val month = cal.get(Calendar.MONTH) + 1
val day = cal.get(Calendar.DAY_OF_MONTH)
val uid = UUID.randomUUID()
val count = 200L
val accumulation = Accumulation(None, year, month, day, count, mantraId, uid, gatheringId)
whenReady(dao.save(accumulation)) { a =>
whenReady(dao.findForToday(uid, gatheringId, mantraId)) { acc =>
assert(a.year == year)
assert(a.month == month)
assert(a.day == day)
assert(a.count == count)
assert(a.mantraId == mantraId)
assert(a.userId == uid)
assert(a.gatheringId == gatheringId)
}
}
}
"Counts for mantra" should "return sum total for overall, year, month, and day" taggedAs (DbTest) in {
val cal = Calendar.getInstance()
val year = cal.get(Calendar.YEAR)
val month = cal.get(Calendar.MONTH) + 1
val day = cal.get(Calendar.DAY_OF_MONTH)
val setup = for {
_ <- dao.save(Accumulation(None, year, month, day, 1, mantraId, UUID.randomUUID(), gatheringId))
_ <- dao.save(Accumulation(None, year, month, day + 1, 2, mantraId, UUID.randomUUID(), gatheringId))
_ <- dao.save(Accumulation(None, year, month, day + 2, 3, mantraId, UUID.randomUUID(), gatheringId))
_ <- dao.save(Accumulation(None, year, month - 1, day + 3, 4, mantraId, UUID.randomUUID(), gatheringId))
_ <- dao.save(Accumulation(None, year - 1, month, day, 5, mantraId, UUID.randomUUID(), gatheringId))
} yield ("done")
whenReady(setup) { i =>
whenReady(dao.counts(mantraId)(None)) { totals =>
println(totals)
totals._1 shouldBe 15L
totals._2 shouldBe 10L
totals._3 shouldBe 6L
totals._4 shouldBe 1L
}
}
}
it should "return 0 for all totals if no values" taggedAs (DbTest) in {
whenReady(dao.counts(2378)(None)) { totals =>
totals._1 shouldBe 0L
totals._2 shouldBe 0L
totals._3 shouldBe 0L
totals._4 shouldBe 0L
}
}
"Counts for mantra and gathering" should "return sum total for overall, year, month, and day" taggedAs (DbTest) in {
val cal = Calendar.getInstance()
val year = cal.get(Calendar.YEAR)
val month = cal.get(Calendar.MONTH) + 1
val day = cal.get(Calendar.DAY_OF_MONTH)
val setup = for {
_ <- dao.save(Accumulation(None, year, month, day, 1, mantraId, UUID.randomUUID(), gatheringId))
_ <- dao.save(Accumulation(None, year, month, day + 1, 2, mantraId, UUID.randomUUID(), gatheringId))
_ <- dao.save(Accumulation(None, year, month, day + 2, 3, mantraId, UUID.randomUUID(), gatheringId))
_ <- dao.save(Accumulation(None, year, month - 1, day + 3, 4, mantraId, UUID.randomUUID(), gatheringId))
_ <- dao.save(Accumulation(None, year - 1, month, day, 5, mantraId, UUID.randomUUID(), gatheringId))
_ <- dao.save(Accumulation(None, year, month, day, 5, mantraId, UUID.randomUUID(), 2L))
} yield ("done")
whenReady(setup) { _ =>
whenReady(dao.counts(mantraId)(Some(gatheringId))) { totals =>
println(totals)
totals._1 shouldBe 15L
totals._2 shouldBe 10L
totals._3 shouldBe 6L
totals._4 shouldBe 1L
}
}
}
it should "return 0 for all totals if no values" taggedAs (DbTest) in {
whenReady(dao.counts(2378)(Some(-897897))) { totals =>
totals._1 shouldBe 0L
totals._2 shouldBe 0L
totals._3 shouldBe 0L
totals._4 shouldBe 0L
}
}
}
|
leannenorthrop/play-mantra-accumulations
|
test/models/daos/AccumulationDAOSpec.scala
|
Scala
|
apache-2.0
| 5,575
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.producer
import async.{DefaultEventHandler, ProducerSendThread, EventHandler}
import kafka.utils._
import java.util.Random
import java.util.concurrent.{TimeUnit, LinkedBlockingQueue}
import kafka.serializer.Encoder
import java.util.concurrent.atomic.AtomicBoolean
import kafka.common.QueueFullException
import kafka.metrics._
class Producer[K,V](val config: ProducerConfig,
private val eventHandler: EventHandler[K,V]) // only for unit testing
extends Logging {
private val hasShutdown = new AtomicBoolean(false)
private val queue = new LinkedBlockingQueue[KeyedMessage[K,V]](config.queueBufferingMaxMessages)
private val random = new Random
private var sync: Boolean = true
private var producerSendThread: ProducerSendThread[K,V] = null
config.producerType match {
case "sync" =>
case "async" =>
sync = false
producerSendThread = new ProducerSendThread[K,V]("ProducerSendThread-" + config.clientId,
queue,
eventHandler,
config.queueBufferingMaxMs,
config.batchNumMessages,
config.clientId)
producerSendThread.start()
}
private val producerTopicStats = ProducerTopicStatsRegistry.getProducerTopicStats(config.clientId)
KafkaMetricsReporter.startReporters(config.props)
def this(config: ProducerConfig) =
this(config,
new DefaultEventHandler[K,V](config,
Utils.createObject[Partitioner[K]](config.partitionerClass, config.props),
Utils.createObject[Encoder[V]](config.serializerClass, config.props),
Utils.createObject[Encoder[K]](config.keySerializerClass, config.props),
new ProducerPool(config)))
/**
* Sends the data, partitioned by key to the topic using either the
* synchronous or the asynchronous producer
* @param messages the producer data object that encapsulates the topic, key and message data
*/
def send(messages: KeyedMessage[K,V]*) {
if (hasShutdown.get)
throw new ProducerClosedException
recordStats(messages)
sync match {
case true => eventHandler.handle(messages)
case false => asyncSend(messages)
}
}
private def recordStats(messages: Seq[KeyedMessage[K,V]]) {
for (message <- messages) {
producerTopicStats.getProducerTopicStats(message.topic).messageRate.mark()
producerTopicStats.getProducerAllTopicsStats.messageRate.mark()
}
}
private def asyncSend(messages: Seq[KeyedMessage[K,V]]) {
for (message <- messages) {
val added = config.queueEnqueueTimeoutMs match {
case 0 =>
queue.offer(message)
case _ =>
try {
config.queueEnqueueTimeoutMs < 0 match {
case true =>
queue.put(message)
true
case _ =>
queue.offer(message, config.queueEnqueueTimeoutMs, TimeUnit.MILLISECONDS)
}
}
catch {
case e: InterruptedException =>
false
}
}
if(!added) {
producerTopicStats.getProducerTopicStats(message.topic).droppedMessageRate.mark()
producerTopicStats.getProducerAllTopicsStats.droppedMessageRate.mark()
throw new QueueFullException("Event queue is full of unsent messages, could not send event: " + message.toString)
}else {
trace("Added to send queue an event: " + message.toString)
trace("Remaining queue size: " + queue.remainingCapacity)
}
}
}
/**
* Close API to close the producer pool connections to all Kafka brokers. Also closes
* the zookeeper client connection if one exists
*/
def close() = {
val canShutdown = hasShutdown.compareAndSet(false, true)
if(canShutdown) {
info("Shutting down producer")
if (producerSendThread != null)
producerSendThread.shutdown
eventHandler.close
}
}
}
|
lakshmi-kannan/kafka-sashafied
|
core/src/main/scala/kafka/producer/Producer.scala
|
Scala
|
apache-2.0
| 5,043
|
/**
* Original work: SecureSocial (https://github.com/jaliss/securesocial)
* Copyright 2013 Jorge Aliss (jaliss at gmail dot com) - twitter: @jaliss
*
* Derivative work: Silhouette (https://github.com/mohiva/play-silhouette)
* Modifications Copyright 2015 Mohiva Organisation (license at mohiva dot com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.mohiva.play.silhouette.impl.providers.oauth2
import com.mohiva.play.silhouette.api.LoginInfo
import com.mohiva.play.silhouette.api.util.HTTPLayer
import com.mohiva.play.silhouette.impl.exceptions.{ UnexpectedResponseException, ProfileRetrievalException }
import com.mohiva.play.silhouette.impl.providers.OAuth2Provider._
import com.mohiva.play.silhouette.impl.providers._
import com.mohiva.play.silhouette.impl.providers.oauth2.VKProvider._
import play.api.libs.concurrent.Execution.Implicits._
import play.api.libs.json._
import play.api.libs.functional.syntax._
import play.api.libs.ws.WSResponse
import scala.concurrent.Future
import scala.util.{ Success, Failure, Try }
/**
* Base Vk OAuth 2 provider.
*
* @see http://vk.com/dev/auth_sites
* @see http://vk.com/dev/api_requests
* @see http://vk.com/pages.php?o=-1&p=getProfiles
*/
trait BaseVKProvider extends OAuth2Provider {
/**
* The content type to parse a profile from.
*/
type Content = (JsValue, OAuth2Info)
/**
* The provider ID.
*/
val id = ID
/**
* Defines the URLs that are needed to retrieve the profile data.
*/
protected val urls = Map("api" -> API)
/**
* Builds the social profile.
*
* @param authInfo The auth info received from the provider.
* @return On success the build social profile, otherwise a failure.
*/
protected def buildProfile(authInfo: OAuth2Info): Future[Profile] = {
httpLayer.url(urls("api").format(authInfo.accessToken)).get().flatMap { response =>
val json = response.json
(json \ "error").asOpt[JsObject] match {
case Some(error) =>
val errorCode = (error \ "error_code").as[Int]
val errorMsg = (error \ "error_msg").as[String]
throw new ProfileRetrievalException(SpecifiedProfileError.format(id, errorCode, errorMsg))
case _ => profileParser.parse(json -> authInfo)
}
}
}
/**
* Builds the OAuth2 info from response.
*
* VK provider needs it own Json reads to extract the email from response.
*
* @param response The response from the provider.
* @return The OAuth2 info on success, otherwise a failure.
*/
override protected def buildInfo(response: WSResponse): Try[OAuth2Info] = {
response.json.validate[OAuth2Info].asEither.fold(
error => Failure(new UnexpectedResponseException(InvalidInfoFormat.format(id, error))),
info => Success(info)
)
}
}
/**
* The profile parser for the common social profile.
*/
class VKProfileParser extends SocialProfileParser[(JsValue, OAuth2Info), CommonSocialProfile] {
/**
* Parses the social profile.
*
* @param data The data returned from the provider.
* @return The social profile from given result.
*/
def parse(data: (JsValue, OAuth2Info)) = Future.successful {
val json = data._1
val response = (json \ "response").apply(0)
val userId = (response \ "uid").as[Long]
val firstName = (response \ "first_name").asOpt[String]
val lastName = (response \ "last_name").asOpt[String]
val avatarURL = (response \ "photo").asOpt[String]
CommonSocialProfile(
loginInfo = LoginInfo(ID, userId.toString),
firstName = firstName,
lastName = lastName,
email = data._2.params.flatMap(_.get("email")),
avatarURL = avatarURL)
}
}
/**
* The VK OAuth2 Provider.
*
* @param httpLayer The HTTP layer implementation.
* @param stateProvider The state provider implementation.
* @param settings The provider settings.
*/
class VKProvider(
protected val httpLayer: HTTPLayer,
protected val stateProvider: OAuth2StateProvider,
val settings: OAuth2Settings)
extends BaseVKProvider with CommonSocialProfileBuilder {
/**
* The type of this class.
*/
type Self = VKProvider
/**
* The profile parser implementation.
*/
val profileParser = new VKProfileParser
/**
* Gets a provider initialized with a new settings object.
*
* @param f A function which gets the settings passed and returns different settings.
* @return An instance of the provider initialized with new settings.
*/
def withSettings(f: (Settings) => Settings) = new VKProvider(httpLayer, stateProvider, f(settings))
}
/**
* The companion object.
*/
object VKProvider {
/**
* The error messages.
*/
val SpecifiedProfileError = "[Silhouette][%s] Error retrieving profile information. Error code: %s, message: %s"
/**
* The VK constants.
*/
val ID = "vk"
val API = "https://api.vk.com/method/getProfiles?fields=uid,first_name,last_name,photo&access_token=%s"
/**
* Converts the JSON into a [[OAuth2Info]] object.
*/
implicit val infoReads: Reads[OAuth2Info] = (
(__ \ AccessToken).read[String] and
(__ \ TokenType).readNullable[String] and
(__ \ ExpiresIn).readNullable[Int] and
(__ \ RefreshToken).readNullable[String] and
(__ \ "email").readNullable[String]
)((accessToken: String, tokenType: Option[String], expiresIn: Option[Int], refreshToken: Option[String], email: Option[String]) =>
new OAuth2Info(accessToken, tokenType, expiresIn, refreshToken, email.map(e => Map("email" -> e)))
)
}
|
rfranco/play-silhouette
|
silhouette/app/com/mohiva/play/silhouette/impl/providers/oauth2/VKProvider.scala
|
Scala
|
apache-2.0
| 6,032
|
package org.jetbrains.plugins.scala.lang.psi.api.expr
import com.intellij.openapi.application.ApplicationManager
import org.jetbrains.plugins.scala.lang.psi.api.base.types.ScTypeElement
import org.jetbrains.plugins.scala.lang.psi.api.expr.ExpectedTypes.ParameterType
import org.jetbrains.plugins.scala.lang.psi.types.ScType
/**
* Nikolay.Tropin
* 19-Dec-17
*/
trait ExpectedTypes {
def smartExpectedType(expr: ScExpression, fromUnderscore: Boolean = true): Option[ScType]
def expectedExprType(expr: ScExpression, fromUnderscore: Boolean = true): Option[ParameterType]
def expectedExprTypes(expr: ScExpression,
withResolvedFunction: Boolean = false,
fromUnderscore: Boolean = true): Array[ParameterType]
}
object ExpectedTypes {
type ParameterType = (ScType, Option[ScTypeElement])
def instance(): ExpectedTypes = ApplicationManager.getApplication.getService(classOf[ExpectedTypes])
}
|
JetBrains/intellij-scala
|
scala/scala-impl/src/org/jetbrains/plugins/scala/lang/psi/api/expr/ExpectedTypes.scala
|
Scala
|
apache-2.0
| 953
|
/* sbt -- Simple Build Tool
* Copyright 2008, 2009, 2010 Mark Harrah
*/
package sbt
package classpath
import java.lang.ref.{ Reference, SoftReference, WeakReference }
import java.io.File
import java.net.{ URI, URL, URLClassLoader }
import java.util.Collections
import scala.collection.{ mutable, JavaConversions, Set }
import mutable.{ HashSet, ListBuffer }
import IO.{ createTemporaryDirectory, write }
object ClasspathUtilities {
def toLoader(finder: PathFinder): ClassLoader = toLoader(finder, rootLoader)
def toLoader(finder: PathFinder, parent: ClassLoader): ClassLoader = new URLClassLoader(finder.getURLs, parent)
def toLoader(paths: Seq[File]): ClassLoader = toLoader(paths, rootLoader)
def toLoader(paths: Seq[File], parent: ClassLoader): ClassLoader = new URLClassLoader(Path.toURLs(paths), parent)
def toLoader(paths: Seq[File], parent: ClassLoader, resourceMap: Map[String, String]): ClassLoader =
new URLClassLoader(Path.toURLs(paths), parent) with RawResources { override def resources = resourceMap }
def toLoader(paths: Seq[File], parent: ClassLoader, resourceMap: Map[String, String], nativeTemp: File): ClassLoader =
new URLClassLoader(Path.toURLs(paths), parent) with RawResources with NativeCopyLoader {
override def resources = resourceMap
override val config = new NativeCopyConfig(nativeTemp, paths, javaLibraryPaths)
}
def javaLibraryPaths: Seq[File] = IO.parseClasspath(System.getProperty("java.library.path"))
lazy val rootLoader =
{
def parent(loader: ClassLoader): ClassLoader =
{
val p = loader.getParent
if (p eq null) loader else parent(p)
}
val systemLoader = ClassLoader.getSystemClassLoader
if (systemLoader ne null) parent(systemLoader)
else parent(getClass.getClassLoader)
}
lazy val xsbtiLoader = classOf[xsbti.Launcher].getClassLoader
final val AppClassPath = "app.class.path"
final val BootClassPath = "boot.class.path"
def createClasspathResources(classpath: Seq[File], instance: ScalaInstance): Map[String, String] =
createClasspathResources(classpath, instance.jars)
def createClasspathResources(appPaths: Seq[File], bootPaths: Seq[File]): Map[String, String] =
{
def make(name: String, paths: Seq[File]) = name -> Path.makeString(paths)
Map(make(AppClassPath, appPaths), make(BootClassPath, bootPaths))
}
private[sbt] def filterByClasspath(classpath: Seq[File], loader: ClassLoader): ClassLoader =
new ClasspathFilter(loader, xsbtiLoader, classpath.toSet)
def makeLoader(classpath: Seq[File], instance: ScalaInstance): ClassLoader =
filterByClasspath(classpath, makeLoader(classpath, instance.loader, instance))
def makeLoader(classpath: Seq[File], instance: ScalaInstance, nativeTemp: File): ClassLoader =
filterByClasspath(classpath, makeLoader(classpath, instance.loader, instance, nativeTemp))
def makeLoader(classpath: Seq[File], parent: ClassLoader, instance: ScalaInstance): ClassLoader =
toLoader(classpath, parent, createClasspathResources(classpath, instance))
def makeLoader(classpath: Seq[File], parent: ClassLoader, instance: ScalaInstance, nativeTemp: File): ClassLoader =
toLoader(classpath, parent, createClasspathResources(classpath, instance), nativeTemp)
private[sbt] def printSource(c: Class[_]) =
println(c.getName + " loader=" + c.getClassLoader + " location=" + IO.classLocationFile(c))
def isArchive(file: File): Boolean = isArchive(file, contentFallback = false)
def isArchive(file: File, contentFallback: Boolean): Boolean =
file.isFile && (isArchiveName(file.getName) || (contentFallback && hasZipContent(file)))
def isArchiveName(fileName: String) = fileName.endsWith(".jar") || fileName.endsWith(".zip")
def hasZipContent(file: File): Boolean = try {
Using.fileInputStream(file) { in =>
(in.read() == 0x50) &&
(in.read() == 0x4b) &&
(in.read() == 0x03) &&
(in.read() == 0x04)
}
} catch { case e: Exception => false }
/** Returns all entries in 'classpath' that correspond to a compiler plugin.*/
private[sbt] def compilerPlugins(classpath: Seq[File]): Iterable[File] =
{
import collection.JavaConversions._
val loader = new URLClassLoader(Path.toURLs(classpath))
loader.getResources("scalac-plugin.xml").toList.flatMap(asFile(true))
}
/** Converts the given URL to a File. If the URL is for an entry in a jar, the File for the jar is returned. */
private[sbt] def asFile(url: URL): List[File] = asFile(false)(url)
private[sbt] def asFile(jarOnly: Boolean)(url: URL): List[File] =
{
try {
url.getProtocol match {
case "file" if !jarOnly => IO.toFile(url) :: Nil
case "jar" =>
val path = url.getPath
val end = path.indexOf('!')
new File(new URI(if (end == -1) path else path.substring(0, end))) :: Nil
case _ => Nil
}
} catch { case e: Exception => Nil }
}
}
|
jaceklaskowski/sbt
|
util/classpath/src/main/scala/sbt/classpath/ClasspathUtilities.scala
|
Scala
|
bsd-3-clause
| 5,021
|
package org.jetbrains.plugins.scala.lang.refactoring.introduceVariable
import java.util
import com.intellij.internal.statistic.UsageTrigger
import com.intellij.openapi.application.ApplicationManager
import com.intellij.openapi.command.CommandProcessor
import com.intellij.openapi.editor.Editor
import com.intellij.openapi.project.Project
import com.intellij.openapi.util.{Computable, Pass, TextRange}
import com.intellij.openapi.wm.WindowManager
import com.intellij.psi._
import com.intellij.psi.impl.source.codeStyle.CodeEditUtil
import com.intellij.psi.util.PsiTreeUtil
import com.intellij.refactoring.introduce.inplace.OccurrencesChooser
import org.jetbrains.plugins.scala.ScalaBundle
import org.jetbrains.plugins.scala.extensions.childOf
import org.jetbrains.plugins.scala.lang.lexer.ScalaTokenTypes
import org.jetbrains.plugins.scala.lang.psi.ScalaPsiUtil
import org.jetbrains.plugins.scala.lang.psi.api.expr._
import org.jetbrains.plugins.scala.lang.psi.api.statements.ScDeclaredElementsHolder
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.ScEarlyDefinitions
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.templates.{ScClassParents, ScExtendsBlock, ScTemplateBody}
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.ScMember
import org.jetbrains.plugins.scala.lang.psi.impl.ScalaPsiElementFactory
import org.jetbrains.plugins.scala.lang.psi.types.ScType
import org.jetbrains.plugins.scala.lang.refactoring.namesSuggester.NameSuggester
import org.jetbrains.plugins.scala.lang.refactoring.util.ScalaRefactoringUtil._
import org.jetbrains.plugins.scala.lang.refactoring.util.{ScalaRefactoringUtil, ScalaVariableValidator}
import org.jetbrains.plugins.scala.settings.ScalaApplicationSettings
import org.jetbrains.plugins.scala.util.ScalaUtils
/**
* Created by Kate Ustyuzhanina
* on 9/18/15
*/
trait IntroduceExpressions {
this: ScalaIntroduceVariableHandler =>
val INTRODUCE_VARIABLE_REFACTORING_NAME = ScalaBundle.message("introduce.variable.title")
def invokeExpression(project: Project, editor: Editor, file: PsiFile, startOffset: Int, endOffset: Int) {
try {
UsageTrigger.trigger(ScalaBundle.message("introduce.variable.id"))
PsiDocumentManager.getInstance(project).commitAllDocuments()
ScalaRefactoringUtil.checkFile(file, project, editor, INTRODUCE_VARIABLE_REFACTORING_NAME)
val (expr: ScExpression, types: Array[ScType]) = ScalaRefactoringUtil.getExpression(project, editor, file, startOffset, endOffset).
getOrElse(showErrorMessageWithException(ScalaBundle.message("cannot.refactor.not.expression"), project, editor, INTRODUCE_VARIABLE_REFACTORING_NAME))
ScalaRefactoringUtil.checkCanBeIntroduced(expr, showErrorMessageWithException(_, project, editor, INTRODUCE_VARIABLE_REFACTORING_NAME))
val fileEncloser = ScalaRefactoringUtil.fileEncloser(startOffset, file)
val occurrences: Array[TextRange] = ScalaRefactoringUtil.getOccurrenceRanges(ScalaRefactoringUtil.unparExpr(expr), fileEncloser)
val validator = ScalaVariableValidator(this, project, editor, file, expr, occurrences)
def runWithDialog() {
val dialog = getDialog(project, editor, expr, types, occurrences, declareVariable = false, validator)
if (!dialog.isOK) {
occurrenceHighlighters.foreach(_.dispose())
occurrenceHighlighters = Seq.empty
return
}
val varName: String = dialog.getEnteredName
val varType: ScType = dialog.getSelectedType
val isVariable: Boolean = dialog.isDeclareVariable
val replaceAllOccurrences: Boolean = dialog.isReplaceAllOccurrences
runRefactoring(startOffset, endOffset, file, editor, expr, occurrences, varName, varType, replaceAllOccurrences, isVariable)
}
def runInplace() {
val callback = new Pass[OccurrencesChooser.ReplaceChoice] {
def pass(replaceChoice: OccurrencesChooser.ReplaceChoice) {
val replaceAll = OccurrencesChooser.ReplaceChoice.NO != replaceChoice
val suggestedNames: Array[String] = NameSuggester.suggestNames(expr, validator)
import scala.collection.JavaConversions.asJavaCollection
val suggestedNamesSet = new util.LinkedHashSet[String](suggestedNames.toIterable)
val asVar = ScalaApplicationSettings.getInstance().INTRODUCE_VARIABLE_IS_VAR
val forceInferType = expr match {
case _: ScFunctionExpr => Some(true)
case _ => None
}
val needExplicitType = forceInferType.getOrElse(ScalaApplicationSettings.getInstance().INTRODUCE_VARIABLE_EXPLICIT_TYPE)
val selectedType = if (needExplicitType) types(0) else null
val introduceRunnable: Computable[SmartPsiElementPointer[PsiElement]] =
introduceVariable(startOffset, endOffset, file, editor, expr, occurrences, suggestedNames(0), selectedType,
replaceAll, asVar)
CommandProcessor.getInstance.executeCommand(project, new Runnable {
def run() {
val newDeclaration: PsiElement = ApplicationManager.getApplication.runWriteAction(introduceRunnable).getElement
val namedElement: PsiNamedElement = newDeclaration match {
case holder: ScDeclaredElementsHolder =>
holder.declaredElements.headOption.orNull
case enum: ScEnumerator => enum.pattern.bindings.headOption.orNull
case _ => null
}
if (namedElement != null && namedElement.isValid) {
editor.getCaretModel.moveToOffset(namedElement.getTextOffset)
editor.getSelectionModel.removeSelection()
if (ScalaRefactoringUtil.isInplaceAvailable(editor)) {
PsiDocumentManager.getInstance(project).commitDocument(editor.getDocument)
PsiDocumentManager.getInstance(project).doPostponedOperationsAndUnblockDocument(editor.getDocument)
val checkedExpr = if (expr.isValid) expr else null
val variableIntroducer =
new ScalaInplaceVariableIntroducer(project, editor, checkedExpr, types, namedElement,
INTRODUCE_VARIABLE_REFACTORING_NAME, replaceAll, asVar, forceInferType)
variableIntroducer.performInplaceRefactoring(suggestedNamesSet)
}
}
}
}, INTRODUCE_VARIABLE_REFACTORING_NAME, null)
}
}
val chooser = new OccurrencesChooser[TextRange](editor) {
override def getOccurrenceRange(occurrence: TextRange) = occurrence
}
if (occurrences.isEmpty) {
callback.pass(OccurrencesChooser.ReplaceChoice.NO)
} else {
import scala.collection.JavaConverters._
chooser.showChooser(new TextRange(startOffset, endOffset), occurrences.toList.asJava, callback)
}
}
if (ScalaRefactoringUtil.isInplaceAvailable(editor)) runInplace()
else runWithDialog()
}
catch {
case _: IntroduceException =>
}
}
//returns smart pointer to ScDeclaredElementsHolder or ScEnumerator
private def runRefactoringInside(startOffset: Int, endOffset: Int, file: PsiFile, editor: Editor, expression_ : ScExpression,
occurrences_ : Array[TextRange], varName: String, varType: ScType,
replaceAllOccurrences: Boolean, isVariable: Boolean): SmartPsiElementPointer[PsiElement] = {
def isIntroduceEnumerator(parExpr: PsiElement, prev: PsiElement, firstOccurenceOffset: Int): Option[ScForStatement] = {
val result = prev match {
case forSt: ScForStatement if forSt.body.orNull == parExpr => None
case forSt: ScForStatement => Some(forSt)
case _: ScEnumerator | _: ScGenerator => Option(prev.getParent.getParent.asInstanceOf[ScForStatement])
case guard: ScGuard if guard.getParent.isInstanceOf[ScEnumerators] => Option(prev.getParent.getParent.asInstanceOf[ScForStatement])
case _ =>
parExpr match {
case forSt: ScForStatement => Some(forSt) //there are occurrences both in body and in enumerators
case _ => None
}
}
for {
//check that first occurence is after first generator
forSt <- result
enums <- forSt.enumerators
generator = enums.generators.head
if firstOccurenceOffset > generator.getTextRange.getEndOffset
} yield forSt
}
def addPrivateIfNotLocal(declaration: PsiElement) {
declaration match {
case member: ScMember if !member.isLocal =>
member.setModifierProperty("private", value = true)
case _ =>
}
}
def replaceRangeByDeclaration(range: TextRange, element: PsiElement): PsiElement = {
val (start, end) = (range.getStartOffset, range.getEndOffset)
val text: String = element.getText
val document = editor.getDocument
document.replaceString(start, end, text)
PsiDocumentManager.getInstance(element.getProject).commitDocument(document)
val newEnd = start + text.length
editor.getCaretModel.moveToOffset(newEnd)
val decl = PsiTreeUtil.findElementOfClassAtOffset(file, start, classOf[ScMember], /*strictStart =*/ false)
lazy val enum = PsiTreeUtil.findElementOfClassAtOffset(file, start, classOf[ScEnumerator], /*strictStart =*/ false)
Option(decl).getOrElse(enum)
}
object inExtendsBlock {
def unapply(e: PsiElement): Option[ScExtendsBlock] = {
e match {
case extBl: ScExtendsBlock =>
Some(extBl)
case elem if PsiTreeUtil.getParentOfType(elem, classOf[ScClassParents]) != null =>
PsiTreeUtil.getParentOfType(elem, classOf[ScExtendsBlock]) match {
case _ childOf (_: ScNewTemplateDefinition) => None
case extBl => Some(extBl)
}
case _ => None
}
}
}
def isOneLiner = {
val lineText = ScalaRefactoringUtil.getLineText(editor)
val model = editor.getSelectionModel
val document = editor.getDocument
val selectedText = model.getSelectedText
val lineNumber = document.getLineNumber(model.getSelectionStart)
val oneLineSelected = selectedText != null && lineText != null && selectedText.trim == lineText.trim
val element = file.findElementAt(model.getSelectionStart)
var parent = element
def atSameLine(elem: PsiElement) = {
val offsets = Seq(elem.getTextRange.getStartOffset, elem.getTextRange.getEndOffset)
offsets.forall(document.getLineNumber(_) == lineNumber)
}
while (parent != null && !parent.isInstanceOf[PsiFile] && atSameLine(parent)) {
parent = parent.getParent
}
val insideExpression = parent match {
case null | _: ScBlock | _: ScTemplateBody | _: ScEarlyDefinitions | _: PsiFile => false
case _ => true
}
oneLineSelected && !insideExpression
}
val revertInfo = ScalaRefactoringUtil.RevertInfo(file.getText, editor.getCaretModel.getOffset)
editor.putUserData(ScalaIntroduceVariableHandler.REVERT_INFO, revertInfo)
val typeName = if (varType != null) varType.canonicalText else ""
val expression = ScalaRefactoringUtil.expressionToIntroduce(expression_)
val isFunExpr = expression.isInstanceOf[ScFunctionExpr]
val mainRange = new TextRange(startOffset, endOffset)
val occurrences: Array[TextRange] = if (!replaceAllOccurrences) {
Array[TextRange] (mainRange)
} else occurrences_
val occCount = occurrences.length
val mainOcc = occurrences.indexWhere(range => range.contains(mainRange) || mainRange.contains(range))
val fastDefinition = occCount == 1 && isOneLiner
//changes document directly
val replacedOccurences = ScalaRefactoringUtil.replaceOccurences(occurrences, varName, file)
//only Psi-operations after this moment
var firstRange = replacedOccurences(0)
val parentExprs =
if (occCount == 1)
ScalaRefactoringUtil.findParentExpr(file, firstRange) match {
case _ childOf ((block: ScBlock) childOf ((_) childOf (call: ScMethodCall)))
if isFunExpr && block.statements.size == 1 => Seq(call)
case _ childOf ((block: ScBlock) childOf (infix: ScInfixExpr))
if isFunExpr && block.statements.size == 1 => Seq(infix)
case expr => Seq(expr)
}
else replacedOccurences.toSeq.map(ScalaRefactoringUtil.findParentExpr(file, _))
val commonParent: PsiElement = PsiTreeUtil.findCommonParent(parentExprs: _*)
val nextParent: PsiElement = ScalaRefactoringUtil.nextParent(commonParent, file)
editor.getCaretModel.moveToOffset(replacedOccurences(mainOcc).getEndOffset)
def createEnumeratorIn(forStmt: ScForStatement): ScEnumerator = {
val parent: ScEnumerators = forStmt.enumerators.orNull
val inParentheses = parent.prevSiblings.toList.exists(_.getNode.getElementType == ScalaTokenTypes.tLPARENTHESIS)
val created = ScalaPsiElementFactory.createEnumerator(varName, ScalaRefactoringUtil.unparExpr(expression), file.getManager, typeName)
val elem = parent.getChildren.filter(_.getTextRange.contains(firstRange)).head
var result: ScEnumerator = null
if (elem != null) {
var needSemicolon = true
var sibling = elem.getPrevSibling
if (inParentheses) {
while (sibling != null && sibling.getText.trim == "") sibling = sibling.getPrevSibling
if (sibling != null && sibling.getText.endsWith(";")) needSemicolon = false
val semicolon = parent.addBefore(ScalaPsiElementFactory.createSemicolon(parent.getManager), elem)
result = parent.addBefore(created, semicolon).asInstanceOf[ScEnumerator]
if (needSemicolon) {
parent.addBefore(ScalaPsiElementFactory.createSemicolon(parent.getManager), result)
}
} else {
if (sibling.getText.indexOf('\\n') != -1) needSemicolon = false
result = parent.addBefore(created, elem).asInstanceOf[ScEnumerator]
parent.addBefore(ScalaPsiElementFactory.createNewLineNode(elem.getManager).getPsi, elem)
if (needSemicolon) {
parent.addBefore(ScalaPsiElementFactory.createNewLineNode(parent.getManager).getPsi, result)
}
}
}
result
}
def createVariableDefinition(): PsiElement = {
val created = ScalaPsiElementFactory.createDeclaration(varName, typeName, isVariable,
ScalaRefactoringUtil.unparExpr(expression), file.getManager)
var result: PsiElement = null
if (fastDefinition) {
result = replaceRangeByDeclaration(replacedOccurences(0), created)
}
else {
var needFormatting = false
val parent = commonParent match {
case inExtendsBlock(extBl) =>
needFormatting = true
extBl.addEarlyDefinitions()
case _ =>
val container = ScalaRefactoringUtil.container(commonParent, file)
val needBraces = !commonParent.isInstanceOf[ScBlock] && ScalaRefactoringUtil.needBraces(commonParent, nextParent)
if (needBraces) {
firstRange = firstRange.shiftRight(1)
val replaced = commonParent.replace(ScalaPsiElementFactory.createExpressionFromText("{" + commonParent.getText + "}", file.getManager))
replaced.getPrevSibling match {
case ws: PsiWhiteSpace if ws.getText.contains("\\n") => ws.delete()
case _ =>
}
replaced
} else container
}
val anchor = parent.getChildren.find(_.getTextRange.contains(firstRange)).getOrElse(parent.getLastChild)
if (anchor != null) {
result = ScalaPsiUtil.addStatementBefore(created.asInstanceOf[ScBlockStatement], parent, Some(anchor))
CodeEditUtil.markToReformat(parent.getNode, needFormatting)
} else throw new IntroduceException
}
result
}
val createdDeclaration: PsiElement = isIntroduceEnumerator(commonParent, nextParent, firstRange.getStartOffset) match {
case Some(forStmt) => createEnumeratorIn(forStmt)
case _ => createVariableDefinition()
}
addPrivateIfNotLocal(createdDeclaration)
ScalaPsiUtil.adjustTypes(createdDeclaration)
SmartPointerManager.getInstance(file.getProject).createSmartPsiElementPointer(createdDeclaration)
}
def runRefactoring(startOffset: Int, endOffset: Int, file: PsiFile, editor: Editor, expression: ScExpression,
occurrences_ : Array[TextRange], varName: String, varType: ScType,
replaceAllOccurrences: Boolean, isVariable: Boolean) {
val runnable = new Runnable() {
def run() {
runRefactoringInside(startOffset, endOffset, file, editor, expression, occurrences_, varName,
varType, replaceAllOccurrences, isVariable) //this for better debug
}
}
ScalaUtils.runWriteAction(runnable, editor.getProject, INTRODUCE_VARIABLE_REFACTORING_NAME)
editor.getSelectionModel.removeSelection()
}
protected def introduceVariable(startOffset: Int, endOffset: Int, file: PsiFile, editor: Editor, expression: ScExpression,
occurrences_ : Array[TextRange], varName: String, varType: ScType,
replaceAllOccurrences: Boolean, isVariable: Boolean): Computable[SmartPsiElementPointer[PsiElement]] = {
new Computable[SmartPsiElementPointer[PsiElement]]() {
def compute() = runRefactoringInside(startOffset, endOffset, file, editor, expression, occurrences_, varName,
varType, replaceAllOccurrences, isVariable)
}
}
protected def getDialog(project: Project, editor: Editor, expr: ScExpression, typez: Array[ScType],
occurrences: Array[TextRange], declareVariable: Boolean,
validator: ScalaVariableValidator): ScalaIntroduceVariableDialog = {
// Add occurrences highlighting
if (occurrences.length > 1)
occurrenceHighlighters = ScalaRefactoringUtil.highlightOccurrences(project, occurrences, editor)
val possibleNames = NameSuggester.suggestNames(expr, validator)
val dialog = new ScalaIntroduceVariableDialog(project, typez, occurrences.length, validator, possibleNames)
dialog.show()
if (!dialog.isOK) {
if (occurrences.length > 1) {
WindowManager.getInstance.getStatusBar(project).
setInfo(ScalaBundle.message("press.escape.to.remove.the.highlighting"))
}
}
dialog
}
def runTest(project: Project, editor: Editor, file: PsiFile, startOffset: Int, endOffset: Int, replaceAll: Boolean) {
PsiDocumentManager.getInstance(project).commitAllDocuments()
ScalaRefactoringUtil.checkFile(file, project, editor, INTRODUCE_VARIABLE_REFACTORING_NAME)
val (expr: ScExpression, types: Array[ScType]) = ScalaRefactoringUtil.getExpression(project, editor, file, startOffset, endOffset).
getOrElse(showErrorMessageWithException(ScalaBundle.message("cannot.refactor.not.expression"), project, editor, INTRODUCE_VARIABLE_REFACTORING_NAME))
ScalaRefactoringUtil.checkCanBeIntroduced(expr, showErrorMessageWithException(_, project, editor, INTRODUCE_VARIABLE_REFACTORING_NAME))
val fileEncloser = ScalaRefactoringUtil.fileEncloser(startOffset, file)
val occurrences: Array[TextRange] = ScalaRefactoringUtil.getOccurrenceRanges(ScalaRefactoringUtil.unparExpr(expr), fileEncloser)
runRefactoring(startOffset, endOffset, file, editor, expr, occurrences, "value", types(0), replaceAll, isVariable = false)
}
}
|
whorbowicz/intellij-scala
|
src/org/jetbrains/plugins/scala/lang/refactoring/introduceVariable/IntroduceExpressions.scala
|
Scala
|
apache-2.0
| 19,761
|
package a17
trait 嫁衣神功 {
type Tail <: 嫁衣神功
type Head
}
class 第八重 extends 嫁衣神功 {
override type Tail = 开始
override type Head = 我
class 开始 extends 嫁衣神功 {
override type Tail = 继续
override type Head = 叼
class 继续 extends 嫁衣神功 {
override type Tail = 余根
override type Head = 你
class 余根 extends 嫁衣神功 {
override type Tail = 开始辛苦
override type Head = 老
class 开始辛苦 extends 嫁衣神功 {
override type Tail = 顶唔顺
override type Head = 味
class 顶唔顺 extends 嫁衣神功 {
override type Tail = 每次都重构类型系统_裸命咩
override type Head = 个
class 每次都重构类型系统_裸命咩 extends 嫁衣神功 {
override type Tail = 入咗恶人谷扑街
override type Head = 西
class 入咗恶人谷扑街 extends 嫁衣神功 {
override type Tail = 开始辛苦
override type Head = 啊
}
}
}
}
}
}
}
}
|
djx314/ubw
|
a17-432/src/main/scala/第八重.scala
|
Scala
|
bsd-3-clause
| 1,180
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.streaming.kafka010
import java.{ util => ju }
import scala.collection.mutable.ArrayBuffer
import org.apache.kafka.clients.consumer.{ ConsumerConfig, ConsumerRecord }
import org.apache.kafka.common.TopicPartition
import org.apache.spark.{Logging, Partition, SparkContext, TaskContext}
import org.apache.spark.partial.{BoundedDouble, PartialResult}
import org.apache.spark.rdd.RDD
import org.apache.spark.scheduler.ExecutorCacheTaskLocation
import org.apache.spark.storage.StorageLevel
/**
* A batch-oriented interface for consuming from Kafka.
* Starting and ending offsets are specified in advance,
* so that you can control exactly-once semantics.
* @param kafkaParams Kafka
* <a href="http://kafka.apache.org/documentation.html#newconsumerconfigs">
* configuration parameters</a>. Requires "bootstrap.servers" to be set
* with Kafka broker(s) specified in host1:port1,host2:port2 form.
* @param offsetRanges offset ranges that define the Kafka data belonging to this RDD
* @param preferredHosts map from TopicPartition to preferred host for processing that partition.
* In most cases, use [[DirectKafkaInputDStream.preferConsistent]]
* Use [[DirectKafkaInputDStream.preferBrokers]] if your executors are on same nodes as brokers.
* @param useConsumerCache whether to use a consumer from a per-jvm cache
* @tparam K type of Kafka message key
* @tparam V type of Kafka message value
*/
private[spark] class KafkaRDD[K, V](
sc: SparkContext,
val kafkaParams: ju.Map[String, Object],
val offsetRanges: Array[OffsetRange],
val preferredHosts: ju.Map[TopicPartition, String],
useConsumerCache: Boolean
) extends RDD[ConsumerRecord[K, V]](sc, Nil) with Logging with HasOffsetRanges {
assert("none" ==
kafkaParams.get(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG).asInstanceOf[String],
ConsumerConfig.AUTO_OFFSET_RESET_CONFIG +
" must be set to none for executor kafka params, else messages may not match offsetRange")
assert(false ==
kafkaParams.get(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG).asInstanceOf[Boolean],
ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG +
" must be set to false for executor kafka params, else offsets may commit before processing")
// TODO is it necessary to have separate configs for initial poll time vs ongoing poll time?
private val pollTimeout = conf.getLong("spark.streaming.kafka.consumer.poll.ms",
conf.getTimeAsMs("spark.network.timeout", "120s"))
private val cacheInitialCapacity =
conf.getInt("spark.streaming.kafka.consumer.cache.initialCapacity", 16)
private val cacheMaxCapacity =
conf.getInt("spark.streaming.kafka.consumer.cache.maxCapacity", 64)
private val cacheLoadFactor =
conf.getDouble("spark.streaming.kafka.consumer.cache.loadFactor", 0.75).toFloat
override def persist(newLevel: StorageLevel): this.type = {
logError("Kafka ConsumerRecord is not serializable. " +
"Use .map to extract fields before calling .persist or .window")
super.persist(newLevel)
}
override def getPartitions: Array[Partition] = {
offsetRanges.zipWithIndex.map { case (o, i) =>
new KafkaRDDPartition(i, o.topic, o.partition, o.fromOffset, o.untilOffset)
}.toArray
}
override def count(): Long = offsetRanges.map(_.count).sum
override def countApprox(
timeout: Long,
confidence: Double = 0.95
): PartialResult[BoundedDouble] = {
val c = count
new PartialResult(new BoundedDouble(c, 1.0, c, c), true)
}
override def isEmpty(): Boolean = count == 0L
override def take(num: Int): Array[ConsumerRecord[K, V]] = {
val nonEmptyPartitions = this.partitions
.map(_.asInstanceOf[KafkaRDDPartition])
.filter(_.count > 0)
if (num < 1 || nonEmptyPartitions.isEmpty) {
return new Array[ConsumerRecord[K, V]](0)
}
// Determine in advance how many messages need to be taken from each partition
val parts = nonEmptyPartitions.foldLeft(Map[Int, Int]()) { (result, part) =>
val remain = num - result.values.sum
if (remain > 0) {
val taken = Math.min(remain, part.count)
result + (part.index -> taken.toInt)
} else {
result
}
}
val buf = new ArrayBuffer[ConsumerRecord[K, V]]
val res = context.runJob(
this,
(tc: TaskContext, it: Iterator[ConsumerRecord[K, V]]) =>
it.take(parts(tc.partitionId)).toArray, parts.keys.toArray
)
res.foreach(buf ++= _)
buf.toArray
}
private def executors(): Array[ExecutorCacheTaskLocation] = {
val bm = sparkContext.env.blockManager
bm.master.getPeers(bm.blockManagerId).toArray
.map(x => ExecutorCacheTaskLocation(x.host, x.executorId))
.sortWith(compareExecutors)
}
protected[kafka010] def compareExecutors(
a: ExecutorCacheTaskLocation,
b: ExecutorCacheTaskLocation): Boolean =
if (a.host == b.host) {
a.executorId > b.executorId
} else {
a.host > b.host
}
/**
* Non-negative modulus, from java 8 math
*/
private def floorMod(a: Int, b: Int): Int = ((a % b) + b) % b
override def getPreferredLocations(thePart: Partition): Seq[String] = {
// The intention is best-effort consistent executor for a given topicpartition,
// so that caching consumers can be effective.
// TODO what about hosts specified by ip vs name
val part = thePart.asInstanceOf[KafkaRDDPartition]
val allExecs = executors()
val tp = part.topicPartition
val prefHost = preferredHosts.get(tp)
val prefExecs = if (null == prefHost) allExecs else allExecs.filter(_.host == prefHost)
val execs = if (prefExecs.isEmpty) allExecs else prefExecs
if (execs.isEmpty) {
Seq()
} else {
// execs is sorted, tp.hashCode depends only on topic and partition, so consistent index
val index = this.floorMod(tp.hashCode, execs.length)
val chosen = execs(index)
Seq(chosen.toString)
}
}
private def errBeginAfterEnd(part: KafkaRDDPartition): String =
s"Beginning offset ${part.fromOffset} is after the ending offset ${part.untilOffset} " +
s"for topic ${part.topic} partition ${part.partition}. " +
"You either provided an invalid fromOffset, or the Kafka topic has been damaged"
override def compute(thePart: Partition, context: TaskContext): Iterator[ConsumerRecord[K, V]] = {
val part = thePart.asInstanceOf[KafkaRDDPartition]
assert(part.fromOffset <= part.untilOffset, errBeginAfterEnd(part))
if (part.fromOffset == part.untilOffset) {
logInfo(s"Beginning offset ${part.fromOffset} is the same as ending offset " +
s"skipping ${part.topic} ${part.partition}")
Iterator.empty
} else {
new KafkaRDDIterator(part, context)
}
}
/**
* An iterator that fetches messages directly from Kafka for the offsets in partition.
* Uses a cached consumer where possible to take advantage of prefetching
*/
private class KafkaRDDIterator(
part: KafkaRDDPartition,
context: TaskContext) extends Iterator[ConsumerRecord[K, V]] {
logInfo(s"Computing topic ${part.topic}, partition ${part.partition} " +
s"offsets ${part.fromOffset} -> ${part.untilOffset}")
val groupId = kafkaParams.get(ConsumerConfig.GROUP_ID_CONFIG).asInstanceOf[String]
context.addTaskCompletionListener{ context => closeIfNeeded() }
val consumer = if (useConsumerCache) {
CachedKafkaConsumer.init(cacheInitialCapacity, cacheMaxCapacity, cacheLoadFactor)
if (context.attemptNumber > 1) {
// just in case the prior attempt failures were cache related
CachedKafkaConsumer.remove(groupId, part.topic, part.partition)
}
CachedKafkaConsumer.get[K, V](groupId, part.topic, part.partition, kafkaParams)
} else {
CachedKafkaConsumer.getUncached[K, V](groupId, part.topic, part.partition, kafkaParams)
}
var requestOffset = part.fromOffset
def closeIfNeeded(): Unit = {
if (!useConsumerCache && consumer != null) {
consumer.close
}
}
override def hasNext(): Boolean = requestOffset < part.untilOffset
override def next(): ConsumerRecord[K, V] = {
assert(hasNext(), "Can't call getNext() once untilOffset has been reached")
val r = consumer.get(requestOffset, pollTimeout)
requestOffset += 1
r
}
}
}
|
hortonworks-spark/skc
|
external/kafka-0-10/src/main/scala/org/apache/spark/streaming/kafka010/KafkaRDD.scala
|
Scala
|
apache-2.0
| 9,161
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.optimizer
import scala.collection.mutable
import org.apache.spark.sql.AnalysisException
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.analysis._
import org.apache.spark.sql.catalyst.catalog.{InMemoryCatalog, SessionCatalog}
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.expressions.aggregate._
import org.apache.spark.sql.catalyst.plans._
import org.apache.spark.sql.catalyst.plans.logical._
import org.apache.spark.sql.catalyst.rules._
import org.apache.spark.sql.connector.catalog.CatalogManager
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types._
import org.apache.spark.util.Utils
/**
* Abstract class all optimizers should inherit of, contains the standard batches (extending
* Optimizers can override this.
*/
abstract class Optimizer(catalogManager: CatalogManager)
extends RuleExecutor[LogicalPlan] {
// Check for structural integrity of the plan in test mode.
// Currently we check after the execution of each rule if a plan:
// - is still resolved
// - only host special expressions in supported operators
override protected def isPlanIntegral(plan: LogicalPlan): Boolean = {
!Utils.isTesting || (plan.resolved &&
plan.find(PlanHelper.specialExpressionsInUnsupportedOperator(_).nonEmpty).isEmpty)
}
override protected val excludedOnceBatches: Set[String] =
Set(
"PartitionPruning",
"Extract Python UDFs")
protected def fixedPoint =
FixedPoint(
SQLConf.get.optimizerMaxIterations,
maxIterationsSetting = SQLConf.OPTIMIZER_MAX_ITERATIONS.key)
/**
* Defines the default rule batches in the Optimizer.
*
* Implementations of this class should override this method, and [[nonExcludableRules]] if
* necessary, instead of [[batches]]. The rule batches that eventually run in the Optimizer,
* i.e., returned by [[batches]], will be (defaultBatches - (excludedRules - nonExcludableRules)).
*/
def defaultBatches: Seq[Batch] = {
val operatorOptimizationRuleSet =
Seq(
// Operator push down
PushProjectionThroughUnion,
ReorderJoin,
EliminateOuterJoin,
PushDownPredicates,
PushDownLeftSemiAntiJoin,
PushLeftSemiLeftAntiThroughJoin,
LimitPushDown,
ColumnPruning,
InferFiltersFromConstraints,
// Operator combine
CollapseRepartition,
CollapseProject,
CollapseWindow,
CombineFilters,
CombineLimits,
CombineUnions,
// Constant folding and strength reduction
TransposeWindow,
NullPropagation,
ConstantPropagation,
FoldablePropagation,
OptimizeIn,
ConstantFolding,
EliminateAggregateFilter,
ReorderAssociativeOperator,
LikeSimplification,
BooleanSimplification,
SimplifyConditionals,
RemoveDispensableExpressions,
SimplifyBinaryComparison,
ReplaceNullWithFalseInPredicate,
PruneFilters,
SimplifyCasts,
SimplifyCaseConversionExpressions,
RewriteCorrelatedScalarSubquery,
EliminateSerialization,
RemoveRedundantAliases,
RemoveNoopOperators,
CombineWithFields,
SimplifyExtractValueOps,
CombineConcats) ++
extendedOperatorOptimizationRules
val operatorOptimizationBatch: Seq[Batch] = {
val rulesWithoutInferFiltersFromConstraints =
operatorOptimizationRuleSet.filterNot(_ == InferFiltersFromConstraints)
Batch("Operator Optimization before Inferring Filters", fixedPoint,
rulesWithoutInferFiltersFromConstraints: _*) ::
Batch("Infer Filters", Once,
InferFiltersFromConstraints) ::
Batch("Operator Optimization after Inferring Filters", fixedPoint,
rulesWithoutInferFiltersFromConstraints: _*) ::
// Set strategy to Once to avoid pushing filter every time because we do not change the
// join condition.
Batch("Push extra predicate through join", fixedPoint,
PushExtraPredicateThroughJoin,
PushDownPredicates) :: Nil
}
val batches = (Batch("Eliminate Distinct", Once, EliminateDistinct) ::
// Technically some of the rules in Finish Analysis are not optimizer rules and belong more
// in the analyzer, because they are needed for correctness (e.g. ComputeCurrentTime).
// However, because we also use the analyzer to canonicalized queries (for view definition),
// we do not eliminate subqueries or compute current time in the analyzer.
Batch("Finish Analysis", Once,
EliminateResolvedHint,
EliminateSubqueryAliases,
EliminateView,
ReplaceExpressions,
RewriteNonCorrelatedExists,
ComputeCurrentTime,
GetCurrentDatabaseAndCatalog(catalogManager),
RewriteDistinctAggregates,
ReplaceDeduplicateWithAggregate) ::
//////////////////////////////////////////////////////////////////////////////////////////
// Optimizer rules start here
//////////////////////////////////////////////////////////////////////////////////////////
// - Do the first call of CombineUnions before starting the major Optimizer rules,
// since it can reduce the number of iteration and the other rules could add/move
// extra operators between two adjacent Union operators.
// - Call CombineUnions again in Batch("Operator Optimizations"),
// since the other rules might make two separate Unions operators adjacent.
Batch("Union", Once,
CombineUnions) ::
Batch("OptimizeLimitZero", Once,
OptimizeLimitZero) ::
// Run this once earlier. This might simplify the plan and reduce cost of optimizer.
// For example, a query such as Filter(LocalRelation) would go through all the heavy
// optimizer rules that are triggered when there is a filter
// (e.g. InferFiltersFromConstraints). If we run this batch earlier, the query becomes just
// LocalRelation and does not trigger many rules.
Batch("LocalRelation early", fixedPoint,
ConvertToLocalRelation,
PropagateEmptyRelation,
// PropagateEmptyRelation can change the nullability of an attribute from nullable to
// non-nullable when an empty relation child of a Union is removed
UpdateAttributeNullability) ::
Batch("Pullup Correlated Expressions", Once,
PullupCorrelatedPredicates) ::
// Subquery batch applies the optimizer rules recursively. Therefore, it makes no sense
// to enforce idempotence on it and we change this batch from Once to FixedPoint(1).
Batch("Subquery", FixedPoint(1),
OptimizeSubqueries) ::
Batch("Replace Operators", fixedPoint,
RewriteExceptAll,
RewriteIntersectAll,
ReplaceIntersectWithSemiJoin,
ReplaceExceptWithFilter,
ReplaceExceptWithAntiJoin,
ReplaceDistinctWithAggregate) ::
Batch("Aggregate", fixedPoint,
RemoveLiteralFromGroupExpressions,
RemoveRepetitionFromGroupExpressions) :: Nil ++
operatorOptimizationBatch) :+
// This batch pushes filters and projections into scan nodes. Before this batch, the logical
// plan may contain nodes that do not report stats. Anything that uses stats must run after
// this batch.
Batch("Early Filter and Projection Push-Down", Once, earlyScanPushDownRules: _*) :+
// Since join costs in AQP can change between multiple runs, there is no reason that we have an
// idempotence enforcement on this batch. We thus make it FixedPoint(1) instead of Once.
Batch("Join Reorder", FixedPoint(1),
CostBasedJoinReorder) :+
Batch("Eliminate Sorts", Once,
EliminateSorts) :+
Batch("Decimal Optimizations", fixedPoint,
DecimalAggregates) :+
Batch("Object Expressions Optimization", fixedPoint,
EliminateMapObjects,
CombineTypedFilters,
ObjectSerializerPruning,
ReassignLambdaVariableID) :+
Batch("LocalRelation", fixedPoint,
ConvertToLocalRelation,
PropagateEmptyRelation,
// PropagateEmptyRelation can change the nullability of an attribute from nullable to
// non-nullable when an empty relation child of a Union is removed
UpdateAttributeNullability) :+
// The following batch should be executed after batch "Join Reorder" and "LocalRelation".
Batch("Check Cartesian Products", Once,
CheckCartesianProducts) :+
Batch("RewriteSubquery", Once,
RewritePredicateSubquery,
ColumnPruning,
CollapseProject,
RemoveNoopOperators) :+
// This batch must be executed after the `RewriteSubquery` batch, which creates joins.
Batch("NormalizeFloatingNumbers", Once, NormalizeFloatingNumbers) :+
Batch("ReplaceWithFieldsExpression", Once, ReplaceWithFieldsExpression)
// remove any batches with no rules. this may happen when subclasses do not add optional rules.
batches.filter(_.rules.nonEmpty)
}
/**
* Defines rules that cannot be excluded from the Optimizer even if they are specified in
* SQL config "excludedRules".
*
* Implementations of this class can override this method if necessary. The rule batches
* that eventually run in the Optimizer, i.e., returned by [[batches]], will be
* (defaultBatches - (excludedRules - nonExcludableRules)).
*/
def nonExcludableRules: Seq[String] =
EliminateDistinct.ruleName ::
EliminateResolvedHint.ruleName ::
EliminateSubqueryAliases.ruleName ::
EliminateView.ruleName ::
ReplaceExpressions.ruleName ::
ComputeCurrentTime.ruleName ::
GetCurrentDatabaseAndCatalog(catalogManager).ruleName ::
RewriteDistinctAggregates.ruleName ::
ReplaceDeduplicateWithAggregate.ruleName ::
ReplaceIntersectWithSemiJoin.ruleName ::
ReplaceExceptWithFilter.ruleName ::
ReplaceExceptWithAntiJoin.ruleName ::
RewriteExceptAll.ruleName ::
RewriteIntersectAll.ruleName ::
ReplaceDistinctWithAggregate.ruleName ::
PullupCorrelatedPredicates.ruleName ::
RewriteCorrelatedScalarSubquery.ruleName ::
RewritePredicateSubquery.ruleName ::
NormalizeFloatingNumbers.ruleName ::
ReplaceWithFieldsExpression.ruleName :: Nil
/**
* Optimize all the subqueries inside expression.
*/
object OptimizeSubqueries extends Rule[LogicalPlan] {
private def removeTopLevelSort(plan: LogicalPlan): LogicalPlan = {
plan match {
case Sort(_, _, child) => child
case Project(fields, child) => Project(fields, removeTopLevelSort(child))
case other => other
}
}
def apply(plan: LogicalPlan): LogicalPlan = plan transformAllExpressions {
case s: SubqueryExpression =>
val Subquery(newPlan, _) = Optimizer.this.execute(Subquery.fromExpression(s))
// At this point we have an optimized subquery plan that we are going to attach
// to this subquery expression. Here we can safely remove any top level sort
// in the plan as tuples produced by a subquery are un-ordered.
s.withNewPlan(removeTopLevelSort(newPlan))
}
}
/**
* Override to provide additional rules for the operator optimization batch.
*/
def extendedOperatorOptimizationRules: Seq[Rule[LogicalPlan]] = Nil
/**
* Override to provide additional rules for early projection and filter pushdown to scans.
*/
def earlyScanPushDownRules: Seq[Rule[LogicalPlan]] = Nil
/**
* Returns (defaultBatches - (excludedRules - nonExcludableRules)), the rule batches that
* eventually run in the Optimizer.
*
* Implementations of this class should override [[defaultBatches]], and [[nonExcludableRules]]
* if necessary, instead of this method.
*/
final override def batches: Seq[Batch] = {
val excludedRulesConf =
SQLConf.get.optimizerExcludedRules.toSeq.flatMap(Utils.stringToSeq)
val excludedRules = excludedRulesConf.filter { ruleName =>
val nonExcludable = nonExcludableRules.contains(ruleName)
if (nonExcludable) {
logWarning(s"Optimization rule '${ruleName}' was not excluded from the optimizer " +
s"because this rule is a non-excludable rule.")
}
!nonExcludable
}
if (excludedRules.isEmpty) {
defaultBatches
} else {
defaultBatches.flatMap { batch =>
val filteredRules = batch.rules.filter { rule =>
val exclude = excludedRules.contains(rule.ruleName)
if (exclude) {
logInfo(s"Optimization rule '${rule.ruleName}' is excluded from the optimizer.")
}
!exclude
}
if (batch.rules == filteredRules) {
Some(batch)
} else if (filteredRules.nonEmpty) {
Some(Batch(batch.name, batch.strategy, filteredRules: _*))
} else {
logInfo(s"Optimization batch '${batch.name}' is excluded from the optimizer " +
s"as all enclosed rules have been excluded.")
None
}
}
}
}
}
/**
* Remove useless DISTINCT for MAX and MIN.
* This rule should be applied before RewriteDistinctAggregates.
*/
object EliminateDistinct extends Rule[LogicalPlan] {
override def apply(plan: LogicalPlan): LogicalPlan = plan transformExpressions {
case ae: AggregateExpression if ae.isDistinct =>
ae.aggregateFunction match {
case _: Max | _: Min => ae.copy(isDistinct = false)
case _ => ae
}
}
}
/**
* Remove useless FILTER clause for aggregate expressions.
* This rule should be applied before RewriteDistinctAggregates.
*/
object EliminateAggregateFilter extends Rule[LogicalPlan] {
override def apply(plan: LogicalPlan): LogicalPlan = plan transformExpressions {
case ae @ AggregateExpression(_, _, _, Some(Literal.TrueLiteral), _) =>
ae.copy(filter = None)
case AggregateExpression(af: DeclarativeAggregate, _, _, Some(Literal.FalseLiteral), _) =>
val initialProject = SafeProjection.create(af.initialValues)
val evalProject = SafeProjection.create(af.evaluateExpression :: Nil, af.aggBufferAttributes)
val initialBuffer = initialProject(EmptyRow)
val internalRow = evalProject(initialBuffer)
Literal.create(internalRow.get(0, af.dataType), af.dataType)
case AggregateExpression(af: ImperativeAggregate, _, _, Some(Literal.FalseLiteral), _) =>
val buffer = new SpecificInternalRow(af.aggBufferAttributes.map(_.dataType))
af.initialize(buffer)
Literal.create(af.eval(buffer), af.dataType)
}
}
/**
* An optimizer used in test code.
*
* To ensure extendability, we leave the standard rules in the abstract optimizer rules, while
* specific rules go to the subclasses
*/
object SimpleTestOptimizer extends SimpleTestOptimizer
class SimpleTestOptimizer extends Optimizer(
new CatalogManager(
new SQLConf().copy(SQLConf.CASE_SENSITIVE -> true),
FakeV2SessionCatalog,
new SessionCatalog(new InMemoryCatalog, EmptyFunctionRegistry, new SQLConf())))
/**
* Remove redundant aliases from a query plan. A redundant alias is an alias that does not change
* the name or metadata of a column, and does not deduplicate it.
*/
object RemoveRedundantAliases extends Rule[LogicalPlan] {
/**
* Create an attribute mapping from the old to the new attributes. This function will only
* return the attribute pairs that have changed.
*/
private def createAttributeMapping(current: LogicalPlan, next: LogicalPlan)
: Seq[(Attribute, Attribute)] = {
current.output.zip(next.output).filterNot {
case (a1, a2) => a1.semanticEquals(a2)
}
}
/**
* Remove the top-level alias from an expression when it is redundant.
*/
private def removeRedundantAlias(e: Expression, excludeList: AttributeSet): Expression = e match {
// Alias with metadata can not be stripped, or the metadata will be lost.
// If the alias name is different from attribute name, we can't strip it either, or we
// may accidentally change the output schema name of the root plan.
case a @ Alias(attr: Attribute, name)
if a.metadata == Metadata.empty &&
name == attr.name &&
!excludeList.contains(attr) &&
!excludeList.contains(a) =>
attr
case a => a
}
/**
* Remove redundant alias expression from a LogicalPlan and its subtree. A set of excludes is used
* to prevent the removal of seemingly redundant aliases used to deduplicate the input for a
* (self) join or to prevent the removal of top-level subquery attributes.
*/
private def removeRedundantAliases(plan: LogicalPlan, excluded: AttributeSet): LogicalPlan = {
plan match {
// We want to keep the same output attributes for subqueries. This means we cannot remove
// the aliases that produce these attributes
case Subquery(child, correlated) =>
Subquery(removeRedundantAliases(child, excluded ++ child.outputSet), correlated)
// A join has to be treated differently, because the left and the right side of the join are
// not allowed to use the same attributes. We use an exclude list to prevent us from creating
// a situation in which this happens; the rule will only remove an alias if its child
// attribute is not on the black list.
case Join(left, right, joinType, condition, hint) =>
val newLeft = removeRedundantAliases(left, excluded ++ right.outputSet)
val newRight = removeRedundantAliases(right, excluded ++ newLeft.outputSet)
val mapping = AttributeMap(
createAttributeMapping(left, newLeft) ++
createAttributeMapping(right, newRight))
val newCondition = condition.map(_.transform {
case a: Attribute => mapping.getOrElse(a, a)
})
Join(newLeft, newRight, joinType, newCondition, hint)
case _ =>
// Remove redundant aliases in the subtree(s).
val currentNextAttrPairs = mutable.Buffer.empty[(Attribute, Attribute)]
val newNode = plan.mapChildren { child =>
val newChild = removeRedundantAliases(child, excluded)
currentNextAttrPairs ++= createAttributeMapping(child, newChild)
newChild
}
// Create the attribute mapping. Note that the currentNextAttrPairs can contain duplicate
// keys in case of Union (this is caused by the PushProjectionThroughUnion rule); in this
// case we use the first mapping (which should be provided by the first child).
val mapping = AttributeMap(currentNextAttrPairs.toSeq)
// Create a an expression cleaning function for nodes that can actually produce redundant
// aliases, use identity otherwise.
val clean: Expression => Expression = plan match {
case _: Project => removeRedundantAlias(_, excluded)
case _: Aggregate => removeRedundantAlias(_, excluded)
case _: Window => removeRedundantAlias(_, excluded)
case _ => identity[Expression]
}
// Transform the expressions.
newNode.mapExpressions { expr =>
clean(expr.transform {
case a: Attribute => mapping.getOrElse(a, a)
})
}
}
}
def apply(plan: LogicalPlan): LogicalPlan = removeRedundantAliases(plan, AttributeSet.empty)
}
/**
* Remove no-op operators from the query plan that do not make any modifications.
*/
object RemoveNoopOperators extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan transform {
// Eliminate no-op Projects
case p @ Project(_, child) if child.sameOutput(p) => child
// Eliminate no-op Window
case w: Window if w.windowExpressions.isEmpty => w.child
}
}
/**
* Pushes down [[LocalLimit]] beneath UNION ALL and beneath the streamed inputs of outer joins.
*/
object LimitPushDown extends Rule[LogicalPlan] {
private def stripGlobalLimitIfPresent(plan: LogicalPlan): LogicalPlan = {
plan match {
case GlobalLimit(_, child) => child
case _ => plan
}
}
private def maybePushLocalLimit(limitExp: Expression, plan: LogicalPlan): LogicalPlan = {
(limitExp, plan.maxRowsPerPartition) match {
case (IntegerLiteral(newLimit), Some(childMaxRows)) if newLimit < childMaxRows =>
// If the child has a cap on max rows per partition and the cap is larger than
// the new limit, put a new LocalLimit there.
LocalLimit(limitExp, stripGlobalLimitIfPresent(plan))
case (_, None) =>
// If the child has no cap, put the new LocalLimit.
LocalLimit(limitExp, stripGlobalLimitIfPresent(plan))
case _ =>
// Otherwise, don't put a new LocalLimit.
plan
}
}
def apply(plan: LogicalPlan): LogicalPlan = plan transform {
// Adding extra Limits below UNION ALL for children which are not Limit or do not have Limit
// descendants whose maxRow is larger. This heuristic is valid assuming there does not exist any
// Limit push-down rule that is unable to infer the value of maxRows.
// Note: right now Union means UNION ALL, which does not de-duplicate rows, so it is safe to
// pushdown Limit through it. Once we add UNION DISTINCT, however, we will not be able to
// pushdown Limit.
case LocalLimit(exp, u: Union) =>
LocalLimit(exp, u.copy(children = u.children.map(maybePushLocalLimit(exp, _))))
// Add extra limits below OUTER JOIN. For LEFT OUTER and RIGHT OUTER JOIN we push limits to
// the left and right sides, respectively. It's not safe to push limits below FULL OUTER
// JOIN in the general case without a more invasive rewrite.
// We also need to ensure that this limit pushdown rule will not eventually introduce limits
// on both sides if it is applied multiple times. Therefore:
// - If one side is already limited, stack another limit on top if the new limit is smaller.
// The redundant limit will be collapsed by the CombineLimits rule.
case LocalLimit(exp, join @ Join(left, right, joinType, _, _)) =>
val newJoin = joinType match {
case RightOuter => join.copy(right = maybePushLocalLimit(exp, right))
case LeftOuter => join.copy(left = maybePushLocalLimit(exp, left))
case _ => join
}
LocalLimit(exp, newJoin)
}
}
/**
* Pushes Project operator to both sides of a Union operator.
* Operations that are safe to pushdown are listed as follows.
* Union:
* Right now, Union means UNION ALL, which does not de-duplicate rows. So, it is
* safe to pushdown Filters and Projections through it. Filter pushdown is handled by another
* rule PushDownPredicates. Once we add UNION DISTINCT, we will not be able to pushdown Projections.
*/
object PushProjectionThroughUnion extends Rule[LogicalPlan] with PredicateHelper {
/**
* Maps Attributes from the left side to the corresponding Attribute on the right side.
*/
private def buildRewrites(left: LogicalPlan, right: LogicalPlan): AttributeMap[Attribute] = {
assert(left.output.size == right.output.size)
AttributeMap(left.output.zip(right.output))
}
/**
* Rewrites an expression so that it can be pushed to the right side of a
* Union or Except operator. This method relies on the fact that the output attributes
* of a union/intersect/except are always equal to the left child's output.
*/
private def pushToRight[A <: Expression](e: A, rewrites: AttributeMap[Attribute]) = {
val result = e transform {
case a: Attribute => rewrites(a)
} match {
// Make sure exprId is unique in each child of Union.
case Alias(child, alias) => Alias(child, alias)()
case other => other
}
// We must promise the compiler that we did not discard the names in the case of project
// expressions. This is safe since the only transformation is from Attribute => Attribute.
result.asInstanceOf[A]
}
def apply(plan: LogicalPlan): LogicalPlan = plan transform {
// Push down deterministic projection through UNION ALL
case p @ Project(projectList, u: Union) =>
assert(u.children.nonEmpty)
if (projectList.forall(_.deterministic)) {
val newFirstChild = Project(projectList, u.children.head)
val newOtherChildren = u.children.tail.map { child =>
val rewrites = buildRewrites(u.children.head, child)
Project(projectList.map(pushToRight(_, rewrites)), child)
}
u.copy(children = newFirstChild +: newOtherChildren)
} else {
p
}
}
}
/**
* Attempts to eliminate the reading of unneeded columns from the query plan.
*
* Since adding Project before Filter conflicts with PushPredicatesThroughProject, this rule will
* remove the Project p2 in the following pattern:
*
* p1 @ Project(_, Filter(_, p2 @ Project(_, child))) if p2.outputSet.subsetOf(p2.inputSet)
*
* p2 is usually inserted by this rule and useless, p1 could prune the columns anyway.
*/
object ColumnPruning extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = removeProjectBeforeFilter(plan transform {
// Prunes the unused columns from project list of Project/Aggregate/Expand
case p @ Project(_, p2: Project) if !p2.outputSet.subsetOf(p.references) =>
p.copy(child = p2.copy(projectList = p2.projectList.filter(p.references.contains)))
case p @ Project(_, a: Aggregate) if !a.outputSet.subsetOf(p.references) =>
p.copy(
child = a.copy(aggregateExpressions = a.aggregateExpressions.filter(p.references.contains)))
case a @ Project(_, e @ Expand(_, _, grandChild)) if !e.outputSet.subsetOf(a.references) =>
val newOutput = e.output.filter(a.references.contains(_))
val newProjects = e.projections.map { proj =>
proj.zip(e.output).filter { case (_, a) =>
newOutput.contains(a)
}.unzip._1
}
a.copy(child = Expand(newProjects, newOutput, grandChild))
// Prunes the unused columns from child of `DeserializeToObject`
case d @ DeserializeToObject(_, _, child) if !child.outputSet.subsetOf(d.references) =>
d.copy(child = prunedChild(child, d.references))
// Prunes the unused columns from child of Aggregate/Expand/Generate/ScriptTransformation
case a @ Aggregate(_, _, child) if !child.outputSet.subsetOf(a.references) =>
a.copy(child = prunedChild(child, a.references))
case f @ FlatMapGroupsInPandas(_, _, _, child) if !child.outputSet.subsetOf(f.references) =>
f.copy(child = prunedChild(child, f.references))
case e @ Expand(_, _, child) if !child.outputSet.subsetOf(e.references) =>
e.copy(child = prunedChild(child, e.references))
case s @ ScriptTransformation(_, _, _, child, _)
if !child.outputSet.subsetOf(s.references) =>
s.copy(child = prunedChild(child, s.references))
// prune unrequired references
case p @ Project(_, g: Generate) if p.references != g.outputSet =>
val requiredAttrs = p.references -- g.producedAttributes ++ g.generator.references
val newChild = prunedChild(g.child, requiredAttrs)
val unrequired = g.generator.references -- p.references
val unrequiredIndices = newChild.output.zipWithIndex.filter(t => unrequired.contains(t._1))
.map(_._2)
p.copy(child = g.copy(child = newChild, unrequiredChildIndex = unrequiredIndices))
// prune unrequired nested fields from `Generate`.
case GeneratorNestedColumnAliasing(p) => p
// Eliminate unneeded attributes from right side of a Left Existence Join.
case j @ Join(_, right, LeftExistence(_), _, _) =>
j.copy(right = prunedChild(right, j.references))
// all the columns will be used to compare, so we can't prune them
case p @ Project(_, _: SetOperation) => p
case p @ Project(_, _: Distinct) => p
// Eliminate unneeded attributes from children of Union.
case p @ Project(_, u: Union) =>
if (!u.outputSet.subsetOf(p.references)) {
val firstChild = u.children.head
val newOutput = prunedChild(firstChild, p.references).output
// pruning the columns of all children based on the pruned first child.
val newChildren = u.children.map { p =>
val selected = p.output.zipWithIndex.filter { case (a, i) =>
newOutput.contains(firstChild.output(i))
}.map(_._1)
Project(selected, p)
}
p.copy(child = u.withNewChildren(newChildren))
} else {
p
}
// Prune unnecessary window expressions
case p @ Project(_, w: Window) if !w.windowOutputSet.subsetOf(p.references) =>
p.copy(child = w.copy(
windowExpressions = w.windowExpressions.filter(p.references.contains)))
// Can't prune the columns on LeafNode
case p @ Project(_, _: LeafNode) => p
case NestedColumnAliasing(p) => p
// for all other logical plans that inherits the output from it's children
// Project over project is handled by the first case, skip it here.
case p @ Project(_, child) if !child.isInstanceOf[Project] =>
val required = child.references ++ p.references
if (!child.inputSet.subsetOf(required)) {
val newChildren = child.children.map(c => prunedChild(c, required))
p.copy(child = child.withNewChildren(newChildren))
} else {
p
}
})
/** Applies a projection only when the child is producing unnecessary attributes */
private def prunedChild(c: LogicalPlan, allReferences: AttributeSet) =
if (!c.outputSet.subsetOf(allReferences)) {
Project(c.output.filter(allReferences.contains), c)
} else {
c
}
/**
* The Project before Filter is not necessary but conflict with PushPredicatesThroughProject,
* so remove it. Since the Projects have been added top-down, we need to remove in bottom-up
* order, otherwise lower Projects can be missed.
*/
private def removeProjectBeforeFilter(plan: LogicalPlan): LogicalPlan = plan transformUp {
case p1 @ Project(_, f @ Filter(_, p2 @ Project(_, child)))
if p2.outputSet.subsetOf(child.outputSet) &&
// We only remove attribute-only project.
p2.projectList.forall(_.isInstanceOf[AttributeReference]) =>
p1.copy(child = f.copy(child = child))
}
}
/**
* Combines two [[Project]] operators into one and perform alias substitution,
* merging the expressions into one single expression for the following cases.
* 1. When two [[Project]] operators are adjacent.
* 2. When two [[Project]] operators have LocalLimit/Sample/Repartition operator between them
* and the upper project consists of the same number of columns which is equal or aliasing.
* `GlobalLimit(LocalLimit)` pattern is also considered.
*/
object CollapseProject extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan transformUp {
case p1 @ Project(_, p2: Project) =>
if (haveCommonNonDeterministicOutput(p1.projectList, p2.projectList)) {
p1
} else {
p2.copy(projectList = buildCleanedProjectList(p1.projectList, p2.projectList))
}
case p @ Project(_, agg: Aggregate) =>
if (haveCommonNonDeterministicOutput(p.projectList, agg.aggregateExpressions)) {
p
} else {
agg.copy(aggregateExpressions = buildCleanedProjectList(
p.projectList, agg.aggregateExpressions))
}
case Project(l1, g @ GlobalLimit(_, limit @ LocalLimit(_, p2 @ Project(l2, _))))
if isRenaming(l1, l2) =>
val newProjectList = buildCleanedProjectList(l1, l2)
g.copy(child = limit.copy(child = p2.copy(projectList = newProjectList)))
case Project(l1, limit @ LocalLimit(_, p2 @ Project(l2, _))) if isRenaming(l1, l2) =>
val newProjectList = buildCleanedProjectList(l1, l2)
limit.copy(child = p2.copy(projectList = newProjectList))
case Project(l1, r @ Repartition(_, _, p @ Project(l2, _))) if isRenaming(l1, l2) =>
r.copy(child = p.copy(projectList = buildCleanedProjectList(l1, p.projectList)))
case Project(l1, s @ Sample(_, _, _, _, p2 @ Project(l2, _))) if isRenaming(l1, l2) =>
s.copy(child = p2.copy(projectList = buildCleanedProjectList(l1, p2.projectList)))
}
private def collectAliases(projectList: Seq[NamedExpression]): AttributeMap[Alias] = {
AttributeMap(projectList.collect {
case a: Alias => a.toAttribute -> a
})
}
private def haveCommonNonDeterministicOutput(
upper: Seq[NamedExpression], lower: Seq[NamedExpression]): Boolean = {
// Create a map of Aliases to their values from the lower projection.
// e.g., 'SELECT ... FROM (SELECT a + b AS c, d ...)' produces Map(c -> Alias(a + b, c)).
val aliases = collectAliases(lower)
// Collapse upper and lower Projects if and only if their overlapped expressions are all
// deterministic.
upper.exists(_.collect {
case a: Attribute if aliases.contains(a) => aliases(a).child
}.exists(!_.deterministic))
}
private def buildCleanedProjectList(
upper: Seq[NamedExpression],
lower: Seq[NamedExpression]): Seq[NamedExpression] = {
// Create a map of Aliases to their values from the lower projection.
// e.g., 'SELECT ... FROM (SELECT a + b AS c, d ...)' produces Map(c -> Alias(a + b, c)).
val aliases = collectAliases(lower)
// Substitute any attributes that are produced by the lower projection, so that we safely
// eliminate it.
// e.g., 'SELECT c + 1 FROM (SELECT a + b AS C ...' produces 'SELECT a + b + 1 ...'
// Use transformUp to prevent infinite recursion.
val rewrittenUpper = upper.map(_.transformUp {
case a: Attribute => aliases.getOrElse(a, a)
})
// collapse upper and lower Projects may introduce unnecessary Aliases, trim them here.
rewrittenUpper.map { p =>
CleanupAliases.trimNonTopLevelAliases(p).asInstanceOf[NamedExpression]
}
}
private def isRenaming(list1: Seq[NamedExpression], list2: Seq[NamedExpression]): Boolean = {
list1.length == list2.length && list1.zip(list2).forall {
case (e1, e2) if e1.semanticEquals(e2) => true
case (Alias(a: Attribute, _), b) if a.metadata == Metadata.empty && a.name == b.name => true
case _ => false
}
}
}
/**
* Combines adjacent [[RepartitionOperation]] operators
*/
object CollapseRepartition extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan transformUp {
// Case 1: When a Repartition has a child of Repartition or RepartitionByExpression,
// 1) When the top node does not enable the shuffle (i.e., coalesce API), but the child
// enables the shuffle. Returns the child node if the last numPartitions is bigger;
// otherwise, keep unchanged.
// 2) In the other cases, returns the top node with the child's child
case r @ Repartition(_, _, child: RepartitionOperation) => (r.shuffle, child.shuffle) match {
case (false, true) => if (r.numPartitions >= child.numPartitions) child else r
case _ => r.copy(child = child.child)
}
// Case 2: When a RepartitionByExpression has a child of Repartition or RepartitionByExpression
// we can remove the child.
case r @ RepartitionByExpression(_, child: RepartitionOperation, _) =>
r.copy(child = child.child)
}
}
/**
* Collapse Adjacent Window Expression.
* - If the partition specs and order specs are the same and the window expression are
* independent and are of the same window function type, collapse into the parent.
*/
object CollapseWindow extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan transformUp {
case w1 @ Window(we1, ps1, os1, w2 @ Window(we2, ps2, os2, grandChild))
if ps1 == ps2 && os1 == os2 && w1.references.intersect(w2.windowOutputSet).isEmpty &&
we1.nonEmpty && we2.nonEmpty &&
// This assumes Window contains the same type of window expressions. This is ensured
// by ExtractWindowFunctions.
WindowFunctionType.functionType(we1.head) == WindowFunctionType.functionType(we2.head) =>
w1.copy(windowExpressions = we2 ++ we1, child = grandChild)
}
}
/**
* Transpose Adjacent Window Expressions.
* - If the partition spec of the parent Window expression is compatible with the partition spec
* of the child window expression, transpose them.
*/
object TransposeWindow extends Rule[LogicalPlan] {
private def compatibleParititions(ps1 : Seq[Expression], ps2: Seq[Expression]): Boolean = {
ps1.length < ps2.length && ps2.take(ps1.length).permutations.exists(ps1.zip(_).forall {
case (l, r) => l.semanticEquals(r)
})
}
def apply(plan: LogicalPlan): LogicalPlan = plan transformUp {
case w1 @ Window(we1, ps1, os1, w2 @ Window(we2, ps2, os2, grandChild))
if w1.references.intersect(w2.windowOutputSet).isEmpty &&
w1.expressions.forall(_.deterministic) &&
w2.expressions.forall(_.deterministic) &&
compatibleParititions(ps1, ps2) =>
Project(w1.output, Window(we2, ps2, os2, Window(we1, ps1, os1, grandChild)))
}
}
/**
* Generate a list of additional filters from an operator's existing constraint but remove those
* that are either already part of the operator's condition or are part of the operator's child
* constraints. These filters are currently inserted to the existing conditions in the Filter
* operators and on either side of Join operators.
*
* Note: While this optimization is applicable to a lot of types of join, it primarily benefits
* Inner and LeftSemi joins.
*/
object InferFiltersFromConstraints extends Rule[LogicalPlan]
with PredicateHelper with ConstraintHelper {
def apply(plan: LogicalPlan): LogicalPlan = {
if (SQLConf.get.constraintPropagationEnabled) {
inferFilters(plan)
} else {
plan
}
}
private def inferFilters(plan: LogicalPlan): LogicalPlan = plan transform {
case filter @ Filter(condition, child) =>
val newFilters = filter.constraints --
(child.constraints ++ splitConjunctivePredicates(condition))
if (newFilters.nonEmpty) {
Filter(And(newFilters.reduce(And), condition), child)
} else {
filter
}
case join @ Join(left, right, joinType, conditionOpt, _) =>
joinType match {
// For inner join, we can infer additional filters for both sides. LeftSemi is kind of an
// inner join, it just drops the right side in the final output.
case _: InnerLike | LeftSemi =>
val allConstraints = getAllConstraints(left, right, conditionOpt)
val newLeft = inferNewFilter(left, allConstraints)
val newRight = inferNewFilter(right, allConstraints)
join.copy(left = newLeft, right = newRight)
// For right outer join, we can only infer additional filters for left side.
case RightOuter =>
val allConstraints = getAllConstraints(left, right, conditionOpt)
val newLeft = inferNewFilter(left, allConstraints)
join.copy(left = newLeft)
// For left join, we can only infer additional filters for right side.
case LeftOuter | LeftAnti =>
val allConstraints = getAllConstraints(left, right, conditionOpt)
val newRight = inferNewFilter(right, allConstraints)
join.copy(right = newRight)
case _ => join
}
}
private def getAllConstraints(
left: LogicalPlan,
right: LogicalPlan,
conditionOpt: Option[Expression]): Set[Expression] = {
val baseConstraints = left.constraints.union(right.constraints)
.union(conditionOpt.map(splitConjunctivePredicates).getOrElse(Nil).toSet)
baseConstraints.union(inferAdditionalConstraints(baseConstraints))
}
private def inferNewFilter(plan: LogicalPlan, constraints: Set[Expression]): LogicalPlan = {
val newPredicates = constraints
.union(constructIsNotNullConstraints(constraints, plan.output))
.filter { c =>
c.references.nonEmpty && c.references.subsetOf(plan.outputSet) && c.deterministic
} -- plan.constraints
if (newPredicates.isEmpty) {
plan
} else {
Filter(newPredicates.reduce(And), plan)
}
}
}
/**
* Combines all adjacent [[Union]] operators into a single [[Union]].
*/
object CombineUnions extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan transformDown {
case u: Union => flattenUnion(u, false)
case Distinct(u: Union) => Distinct(flattenUnion(u, true))
}
private def flattenUnion(union: Union, flattenDistinct: Boolean): Union = {
val topByName = union.byName
val topAllowMissingCol = union.allowMissingCol
val stack = mutable.Stack[LogicalPlan](union)
val flattened = mutable.ArrayBuffer.empty[LogicalPlan]
// Note that we should only flatten the unions with same byName and allowMissingCol.
// Although we do `UnionCoercion` at analysis phase, we manually run `CombineUnions`
// in some places like `Dataset.union`. Flattening unions with different resolution
// rules (by position and by name) could cause incorrect results.
while (stack.nonEmpty) {
stack.pop() match {
case Distinct(Union(children, byName, allowMissingCol))
if flattenDistinct && byName == topByName && allowMissingCol == topAllowMissingCol =>
stack.pushAll(children.reverse)
case Union(children, byName, allowMissingCol)
if byName == topByName && allowMissingCol == topAllowMissingCol =>
stack.pushAll(children.reverse)
case child =>
flattened += child
}
}
union.copy(children = flattened.toSeq)
}
}
/**
* Combines two adjacent [[Filter]] operators into one, merging the non-redundant conditions into
* one conjunctive predicate.
*/
object CombineFilters extends Rule[LogicalPlan] with PredicateHelper {
def apply(plan: LogicalPlan): LogicalPlan = plan transform applyLocally
val applyLocally: PartialFunction[LogicalPlan, LogicalPlan] = {
// The query execution/optimization does not guarantee the expressions are evaluated in order.
// We only can combine them if and only if both are deterministic.
case Filter(fc, nf @ Filter(nc, grandChild)) if fc.deterministic && nc.deterministic =>
(ExpressionSet(splitConjunctivePredicates(fc)) --
ExpressionSet(splitConjunctivePredicates(nc))).reduceOption(And) match {
case Some(ac) =>
Filter(And(nc, ac), grandChild)
case None =>
nf
}
}
}
/**
* Removes Sort operations if they don't affect the final output ordering.
* Note that changes in the final output ordering may affect the file size (SPARK-32318).
* This rule handles the following cases:
* 1) if the sort order is empty or the sort order does not have any reference
* 2) if the child is already sorted
* 3) if there is another Sort operator separated by 0...n Project, Filter, Repartition or
* RepartitionByExpression (with deterministic expressions) operators
* 4) if the Sort operator is within Join separated by 0...n Project, Filter, Repartition or
* RepartitionByExpression (with deterministic expressions) operators only and the Join condition
* is deterministic
* 5) if the Sort operator is within GroupBy separated by 0...n Project, Filter, Repartition or
* RepartitionByExpression (with deterministic expressions) operators only and the aggregate
* function is order irrelevant
*/
object EliminateSorts extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan transform {
case s @ Sort(orders, _, child) if orders.isEmpty || orders.exists(_.child.foldable) =>
val newOrders = orders.filterNot(_.child.foldable)
if (newOrders.isEmpty) child else s.copy(order = newOrders)
case Sort(orders, true, child) if SortOrder.orderingSatisfies(child.outputOrdering, orders) =>
child
case s @ Sort(_, _, child) => s.copy(child = recursiveRemoveSort(child))
case j @ Join(originLeft, originRight, _, cond, _) if cond.forall(_.deterministic) =>
j.copy(left = recursiveRemoveSort(originLeft), right = recursiveRemoveSort(originRight))
case g @ Aggregate(_, aggs, originChild) if isOrderIrrelevantAggs(aggs) =>
g.copy(child = recursiveRemoveSort(originChild))
}
private def recursiveRemoveSort(plan: LogicalPlan): LogicalPlan = plan match {
case Sort(_, _, child) => recursiveRemoveSort(child)
case other if canEliminateSort(other) =>
other.withNewChildren(other.children.map(recursiveRemoveSort))
case _ => plan
}
private def canEliminateSort(plan: LogicalPlan): Boolean = plan match {
case p: Project => p.projectList.forall(_.deterministic)
case f: Filter => f.condition.deterministic
case r: RepartitionByExpression => r.partitionExpressions.forall(_.deterministic)
case _: Repartition => true
case _ => false
}
private def isOrderIrrelevantAggs(aggs: Seq[NamedExpression]): Boolean = {
def isOrderIrrelevantAggFunction(func: AggregateFunction): Boolean = func match {
case _: Min | _: Max | _: Count => true
// Arithmetic operations for floating-point values are order-sensitive
// (they are not associative).
case _: Sum | _: Average | _: CentralMomentAgg =>
!Seq(FloatType, DoubleType).exists(_.sameType(func.children.head.dataType))
case _ => false
}
def checkValidAggregateExpression(expr: Expression): Boolean = expr match {
case _: AttributeReference => true
case ae: AggregateExpression => isOrderIrrelevantAggFunction(ae.aggregateFunction)
case _: UserDefinedExpression => false
case e => e.children.forall(checkValidAggregateExpression)
}
aggs.forall(checkValidAggregateExpression)
}
}
/**
* Removes filters that can be evaluated trivially. This can be done through the following ways:
* 1) by eliding the filter for cases where it will always evaluate to `true`.
* 2) by substituting a dummy empty relation when the filter will always evaluate to `false`.
* 3) by eliminating the always-true conditions given the constraints on the child's output.
*/
object PruneFilters extends Rule[LogicalPlan] with PredicateHelper {
def apply(plan: LogicalPlan): LogicalPlan = plan transform {
// If the filter condition always evaluate to true, remove the filter.
case Filter(Literal(true, BooleanType), child) => child
// If the filter condition always evaluate to null or false,
// replace the input with an empty relation.
case Filter(Literal(null, _), child) =>
LocalRelation(child.output, data = Seq.empty, isStreaming = plan.isStreaming)
case Filter(Literal(false, BooleanType), child) =>
LocalRelation(child.output, data = Seq.empty, isStreaming = plan.isStreaming)
// If any deterministic condition is guaranteed to be true given the constraints on the child's
// output, remove the condition
case f @ Filter(fc, p: LogicalPlan) =>
val (prunedPredicates, remainingPredicates) =
splitConjunctivePredicates(fc).partition { cond =>
cond.deterministic && p.constraints.contains(cond)
}
if (prunedPredicates.isEmpty) {
f
} else if (remainingPredicates.isEmpty) {
p
} else {
val newCond = remainingPredicates.reduce(And)
Filter(newCond, p)
}
}
}
/**
* The unified version for predicate pushdown of normal operators and joins.
* This rule improves performance of predicate pushdown for cascading joins such as:
* Filter-Join-Join-Join. Most predicates can be pushed down in a single pass.
*/
object PushDownPredicates extends Rule[LogicalPlan] with PredicateHelper {
def apply(plan: LogicalPlan): LogicalPlan = plan transform {
CombineFilters.applyLocally
.orElse(PushPredicateThroughNonJoin.applyLocally)
.orElse(PushPredicateThroughJoin.applyLocally)
}
}
/**
* Pushes [[Filter]] operators through many operators iff:
* 1) the operator is deterministic
* 2) the predicate is deterministic and the operator will not change any of rows.
*
* This heuristic is valid assuming the expression evaluation cost is minimal.
*/
object PushPredicateThroughNonJoin extends Rule[LogicalPlan] with PredicateHelper {
def apply(plan: LogicalPlan): LogicalPlan = plan transform applyLocally
val applyLocally: PartialFunction[LogicalPlan, LogicalPlan] = {
// SPARK-13473: We can't push the predicate down when the underlying projection output non-
// deterministic field(s). Non-deterministic expressions are essentially stateful. This
// implies that, for a given input row, the output are determined by the expression's initial
// state and all the input rows processed before. In another word, the order of input rows
// matters for non-deterministic expressions, while pushing down predicates changes the order.
// This also applies to Aggregate.
case Filter(condition, project @ Project(fields, grandChild))
if fields.forall(_.deterministic) && canPushThroughCondition(grandChild, condition) =>
val aliasMap = getAliasMap(project)
project.copy(child = Filter(replaceAlias(condition, aliasMap), grandChild))
case filter @ Filter(condition, aggregate: Aggregate)
if aggregate.aggregateExpressions.forall(_.deterministic)
&& aggregate.groupingExpressions.nonEmpty =>
val aliasMap = getAliasMap(aggregate)
// For each filter, expand the alias and check if the filter can be evaluated using
// attributes produced by the aggregate operator's child operator.
val (candidates, nonDeterministic) =
splitConjunctivePredicates(condition).partition(_.deterministic)
val (pushDown, rest) = candidates.partition { cond =>
val replaced = replaceAlias(cond, aliasMap)
cond.references.nonEmpty && replaced.references.subsetOf(aggregate.child.outputSet)
}
val stayUp = rest ++ nonDeterministic
if (pushDown.nonEmpty) {
val pushDownPredicate = pushDown.reduce(And)
val replaced = replaceAlias(pushDownPredicate, aliasMap)
val newAggregate = aggregate.copy(child = Filter(replaced, aggregate.child))
// If there is no more filter to stay up, just eliminate the filter.
// Otherwise, create "Filter(stayUp) <- Aggregate <- Filter(pushDownPredicate)".
if (stayUp.isEmpty) newAggregate else Filter(stayUp.reduce(And), newAggregate)
} else {
filter
}
// Push [[Filter]] operators through [[Window]] operators. Parts of the predicate that can be
// pushed beneath must satisfy the following conditions:
// 1. All the expressions are part of window partitioning key. The expressions can be compound.
// 2. Deterministic.
// 3. Placed before any non-deterministic predicates.
case filter @ Filter(condition, w: Window)
if w.partitionSpec.forall(_.isInstanceOf[AttributeReference]) =>
val partitionAttrs = AttributeSet(w.partitionSpec.flatMap(_.references))
val (candidates, nonDeterministic) =
splitConjunctivePredicates(condition).partition(_.deterministic)
val (pushDown, rest) = candidates.partition { cond =>
cond.references.subsetOf(partitionAttrs)
}
val stayUp = rest ++ nonDeterministic
if (pushDown.nonEmpty) {
val pushDownPredicate = pushDown.reduce(And)
val newWindow = w.copy(child = Filter(pushDownPredicate, w.child))
if (stayUp.isEmpty) newWindow else Filter(stayUp.reduce(And), newWindow)
} else {
filter
}
case filter @ Filter(condition, union: Union) =>
// Union could change the rows, so non-deterministic predicate can't be pushed down
val (pushDown, stayUp) = splitConjunctivePredicates(condition).partition(_.deterministic)
if (pushDown.nonEmpty) {
val pushDownCond = pushDown.reduceLeft(And)
val output = union.output
val newGrandChildren = union.children.map { grandchild =>
val newCond = pushDownCond transform {
case e if output.exists(_.semanticEquals(e)) =>
grandchild.output(output.indexWhere(_.semanticEquals(e)))
}
assert(newCond.references.subsetOf(grandchild.outputSet))
Filter(newCond, grandchild)
}
val newUnion = union.withNewChildren(newGrandChildren)
if (stayUp.nonEmpty) {
Filter(stayUp.reduceLeft(And), newUnion)
} else {
newUnion
}
} else {
filter
}
case filter @ Filter(condition, watermark: EventTimeWatermark) =>
val (pushDown, stayUp) = splitConjunctivePredicates(condition).partition { p =>
p.deterministic && !p.references.contains(watermark.eventTime)
}
if (pushDown.nonEmpty) {
val pushDownPredicate = pushDown.reduceLeft(And)
val newWatermark = watermark.copy(child = Filter(pushDownPredicate, watermark.child))
// If there is no more filter to stay up, just eliminate the filter.
// Otherwise, create "Filter(stayUp) <- watermark <- Filter(pushDownPredicate)".
if (stayUp.isEmpty) newWatermark else Filter(stayUp.reduceLeft(And), newWatermark)
} else {
filter
}
case filter @ Filter(_, u: UnaryNode)
if canPushThrough(u) && u.expressions.forall(_.deterministic) =>
pushDownPredicate(filter, u.child) { predicate =>
u.withNewChildren(Seq(Filter(predicate, u.child)))
}
}
def getAliasMap(plan: Project): AttributeMap[Expression] = {
// Create a map of Aliases to their values from the child projection.
// e.g., 'SELECT a + b AS c, d ...' produces Map(c -> a + b).
AttributeMap(plan.projectList.collect { case a: Alias => (a.toAttribute, a.child) })
}
def getAliasMap(plan: Aggregate): AttributeMap[Expression] = {
// Find all the aliased expressions in the aggregate list that don't include any actual
// AggregateExpression or PythonUDF, and create a map from the alias to the expression
val aliasMap = plan.aggregateExpressions.collect {
case a: Alias if a.child.find(e => e.isInstanceOf[AggregateExpression] ||
PythonUDF.isGroupedAggPandasUDF(e)).isEmpty =>
(a.toAttribute, a.child)
}
AttributeMap(aliasMap)
}
def canPushThrough(p: UnaryNode): Boolean = p match {
// Note that some operators (e.g. project, aggregate, union) are being handled separately
// (earlier in this rule).
case _: AppendColumns => true
case _: Distinct => true
case _: Generate => true
case _: Pivot => true
case _: RepartitionByExpression => true
case _: Repartition => true
case _: ScriptTransformation => true
case _: Sort => true
case _: BatchEvalPython => true
case _: ArrowEvalPython => true
case _ => false
}
private def pushDownPredicate(
filter: Filter,
grandchild: LogicalPlan)(insertFilter: Expression => LogicalPlan): LogicalPlan = {
// Only push down the predicates that is deterministic and all the referenced attributes
// come from grandchild.
// TODO: non-deterministic predicates could be pushed through some operators that do not change
// the rows.
val (candidates, nonDeterministic) =
splitConjunctivePredicates(filter.condition).partition(_.deterministic)
val (pushDown, rest) = candidates.partition { cond =>
cond.references.subsetOf(grandchild.outputSet)
}
val stayUp = rest ++ nonDeterministic
if (pushDown.nonEmpty) {
val newChild = insertFilter(pushDown.reduceLeft(And))
if (stayUp.nonEmpty) {
Filter(stayUp.reduceLeft(And), newChild)
} else {
newChild
}
} else {
filter
}
}
/**
* Check if we can safely push a filter through a projection, by making sure that predicate
* subqueries in the condition do not contain the same attributes as the plan they are moved
* into. This can happen when the plan and predicate subquery have the same source.
*/
private def canPushThroughCondition(plan: LogicalPlan, condition: Expression): Boolean = {
val attributes = plan.outputSet
val matched = condition.find {
case s: SubqueryExpression => s.plan.outputSet.intersect(attributes).nonEmpty
case _ => false
}
matched.isEmpty
}
}
/**
* Pushes down [[Filter]] operators where the `condition` can be
* evaluated using only the attributes of the left or right side of a join. Other
* [[Filter]] conditions are moved into the `condition` of the [[Join]].
*
* And also pushes down the join filter, where the `condition` can be evaluated using only the
* attributes of the left or right side of sub query when applicable.
*
* Check https://cwiki.apache.org/confluence/display/Hive/OuterJoinBehavior for more details
*/
object PushPredicateThroughJoin extends Rule[LogicalPlan] with PredicateHelper {
/**
* Splits join condition expressions or filter predicates (on a given join's output) into three
* categories based on the attributes required to evaluate them. Note that we explicitly exclude
* non-deterministic (i.e., stateful) condition expressions in canEvaluateInLeft or
* canEvaluateInRight to prevent pushing these predicates on either side of the join.
*
* @return (canEvaluateInLeft, canEvaluateInRight, haveToEvaluateInBoth)
*/
private def split(condition: Seq[Expression], left: LogicalPlan, right: LogicalPlan) = {
val (pushDownCandidates, nonDeterministic) = condition.partition(_.deterministic)
val (leftEvaluateCondition, rest) =
pushDownCandidates.partition(_.references.subsetOf(left.outputSet))
val (rightEvaluateCondition, commonCondition) =
rest.partition(expr => expr.references.subsetOf(right.outputSet))
(leftEvaluateCondition, rightEvaluateCondition, commonCondition ++ nonDeterministic)
}
private def canPushThrough(joinType: JoinType): Boolean = joinType match {
case _: InnerLike | LeftSemi | RightOuter | LeftOuter | LeftAnti | ExistenceJoin(_) => true
case _ => false
}
def apply(plan: LogicalPlan): LogicalPlan = plan transform applyLocally
val applyLocally: PartialFunction[LogicalPlan, LogicalPlan] = {
// push the where condition down into join filter
case f @ Filter(filterCondition, Join(left, right, joinType, joinCondition, hint))
if canPushThrough(joinType) =>
val (leftFilterConditions, rightFilterConditions, commonFilterCondition) =
split(splitConjunctivePredicates(filterCondition), left, right)
joinType match {
case _: InnerLike =>
// push down the single side `where` condition into respective sides
val newLeft = leftFilterConditions.
reduceLeftOption(And).map(Filter(_, left)).getOrElse(left)
val newRight = rightFilterConditions.
reduceLeftOption(And).map(Filter(_, right)).getOrElse(right)
val (newJoinConditions, others) =
commonFilterCondition.partition(canEvaluateWithinJoin)
val newJoinCond = (newJoinConditions ++ joinCondition).reduceLeftOption(And)
val join = Join(newLeft, newRight, joinType, newJoinCond, hint)
if (others.nonEmpty) {
Filter(others.reduceLeft(And), join)
} else {
join
}
case RightOuter =>
// push down the right side only `where` condition
val newLeft = left
val newRight = rightFilterConditions.
reduceLeftOption(And).map(Filter(_, right)).getOrElse(right)
val newJoinCond = joinCondition
val newJoin = Join(newLeft, newRight, RightOuter, newJoinCond, hint)
(leftFilterConditions ++ commonFilterCondition).
reduceLeftOption(And).map(Filter(_, newJoin)).getOrElse(newJoin)
case LeftOuter | LeftExistence(_) =>
// push down the left side only `where` condition
val newLeft = leftFilterConditions.
reduceLeftOption(And).map(Filter(_, left)).getOrElse(left)
val newRight = right
val newJoinCond = joinCondition
val newJoin = Join(newLeft, newRight, joinType, newJoinCond, hint)
(rightFilterConditions ++ commonFilterCondition).
reduceLeftOption(And).map(Filter(_, newJoin)).getOrElse(newJoin)
case other =>
throw new IllegalStateException(s"Unexpected join type: $other")
}
// push down the join filter into sub query scanning if applicable
case j @ Join(left, right, joinType, joinCondition, hint) if canPushThrough(joinType) =>
val (leftJoinConditions, rightJoinConditions, commonJoinCondition) =
split(joinCondition.map(splitConjunctivePredicates).getOrElse(Nil), left, right)
joinType match {
case _: InnerLike | LeftSemi =>
// push down the single side only join filter for both sides sub queries
val newLeft = leftJoinConditions.
reduceLeftOption(And).map(Filter(_, left)).getOrElse(left)
val newRight = rightJoinConditions.
reduceLeftOption(And).map(Filter(_, right)).getOrElse(right)
val newJoinCond = commonJoinCondition.reduceLeftOption(And)
Join(newLeft, newRight, joinType, newJoinCond, hint)
case RightOuter =>
// push down the left side only join filter for left side sub query
val newLeft = leftJoinConditions.
reduceLeftOption(And).map(Filter(_, left)).getOrElse(left)
val newRight = right
val newJoinCond = (rightJoinConditions ++ commonJoinCondition).reduceLeftOption(And)
Join(newLeft, newRight, RightOuter, newJoinCond, hint)
case LeftOuter | LeftAnti | ExistenceJoin(_) =>
// push down the right side only join filter for right sub query
val newLeft = left
val newRight = rightJoinConditions.
reduceLeftOption(And).map(Filter(_, right)).getOrElse(right)
val newJoinCond = (leftJoinConditions ++ commonJoinCondition).reduceLeftOption(And)
Join(newLeft, newRight, joinType, newJoinCond, hint)
case other =>
throw new IllegalStateException(s"Unexpected join type: $other")
}
}
}
/**
* Combines two adjacent [[Limit]] operators into one, merging the
* expressions into one single expression.
*/
object CombineLimits extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan transform {
case GlobalLimit(le, GlobalLimit(ne, grandChild)) =>
GlobalLimit(Least(Seq(ne, le)), grandChild)
case LocalLimit(le, LocalLimit(ne, grandChild)) =>
LocalLimit(Least(Seq(ne, le)), grandChild)
case Limit(le, Limit(ne, grandChild)) =>
Limit(Least(Seq(ne, le)), grandChild)
}
}
/**
* Check if there any cartesian products between joins of any type in the optimized plan tree.
* Throw an error if a cartesian product is found without an explicit cross join specified.
* This rule is effectively disabled if the CROSS_JOINS_ENABLED flag is true.
*
* This rule must be run AFTER the ReorderJoin rule since the join conditions for each join must be
* collected before checking if it is a cartesian product. If you have
* SELECT * from R, S where R.r = S.s,
* the join between R and S is not a cartesian product and therefore should be allowed.
* The predicate R.r = S.s is not recognized as a join condition until the ReorderJoin rule.
*
* This rule must be run AFTER the batch "LocalRelation", since a join with empty relation should
* not be a cartesian product.
*/
object CheckCartesianProducts extends Rule[LogicalPlan] with PredicateHelper {
/**
* Check if a join is a cartesian product. Returns true if
* there are no join conditions involving references from both left and right.
*/
def isCartesianProduct(join: Join): Boolean = {
val conditions = join.condition.map(splitConjunctivePredicates).getOrElse(Nil)
conditions match {
case Seq(Literal.FalseLiteral) | Seq(Literal(null, BooleanType)) => false
case _ => !conditions.map(_.references).exists(refs =>
refs.exists(join.left.outputSet.contains) && refs.exists(join.right.outputSet.contains))
}
}
def apply(plan: LogicalPlan): LogicalPlan =
if (SQLConf.get.crossJoinEnabled) {
plan
} else plan transform {
case j @ Join(left, right, Inner | LeftOuter | RightOuter | FullOuter, _, _)
if isCartesianProduct(j) =>
throw new AnalysisException(
s"""Detected implicit cartesian product for ${j.joinType.sql} join between logical plans
|${left.treeString(false).trim}
|and
|${right.treeString(false).trim}
|Join condition is missing or trivial.
|Either: use the CROSS JOIN syntax to allow cartesian products between these
|relations, or: enable implicit cartesian products by setting the configuration
|variable spark.sql.crossJoin.enabled=true"""
.stripMargin)
}
}
/**
* Speeds up aggregates on fixed-precision decimals by executing them on unscaled Long values.
*
* This uses the same rules for increasing the precision and scale of the output as
* [[org.apache.spark.sql.catalyst.analysis.DecimalPrecision]].
*/
object DecimalAggregates extends Rule[LogicalPlan] {
import Decimal.MAX_LONG_DIGITS
/** Maximum number of decimal digits representable precisely in a Double */
private val MAX_DOUBLE_DIGITS = 15
def apply(plan: LogicalPlan): LogicalPlan = plan transform {
case q: LogicalPlan => q transformExpressionsDown {
case we @ WindowExpression(ae @ AggregateExpression(af, _, _, _, _), _) => af match {
case Sum(e @ DecimalType.Expression(prec, scale)) if prec + 10 <= MAX_LONG_DIGITS =>
MakeDecimal(we.copy(windowFunction = ae.copy(aggregateFunction = Sum(UnscaledValue(e)))),
prec + 10, scale)
case Average(e @ DecimalType.Expression(prec, scale)) if prec + 4 <= MAX_DOUBLE_DIGITS =>
val newAggExpr =
we.copy(windowFunction = ae.copy(aggregateFunction = Average(UnscaledValue(e))))
Cast(
Divide(newAggExpr, Literal.create(math.pow(10.0, scale), DoubleType)),
DecimalType(prec + 4, scale + 4), Option(SQLConf.get.sessionLocalTimeZone))
case _ => we
}
case ae @ AggregateExpression(af, _, _, _, _) => af match {
case Sum(e @ DecimalType.Expression(prec, scale)) if prec + 10 <= MAX_LONG_DIGITS =>
MakeDecimal(ae.copy(aggregateFunction = Sum(UnscaledValue(e))), prec + 10, scale)
case Average(e @ DecimalType.Expression(prec, scale)) if prec + 4 <= MAX_DOUBLE_DIGITS =>
val newAggExpr = ae.copy(aggregateFunction = Average(UnscaledValue(e)))
Cast(
Divide(newAggExpr, Literal.create(math.pow(10.0, scale), DoubleType)),
DecimalType(prec + 4, scale + 4), Option(SQLConf.get.sessionLocalTimeZone))
case _ => ae
}
}
}
}
/**
* Converts local operations (i.e. ones that don't require data exchange) on `LocalRelation` to
* another `LocalRelation`.
*/
object ConvertToLocalRelation extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan transform {
case Project(projectList, LocalRelation(output, data, isStreaming))
if !projectList.exists(hasUnevaluableExpr) =>
val projection = new InterpretedMutableProjection(projectList, output)
projection.initialize(0)
LocalRelation(projectList.map(_.toAttribute), data.map(projection(_).copy()), isStreaming)
case Limit(IntegerLiteral(limit), LocalRelation(output, data, isStreaming)) =>
LocalRelation(output, data.take(limit), isStreaming)
case Filter(condition, LocalRelation(output, data, isStreaming))
if !hasUnevaluableExpr(condition) =>
val predicate = Predicate.create(condition, output)
predicate.initialize(0)
LocalRelation(output, data.filter(row => predicate.eval(row)), isStreaming)
}
private def hasUnevaluableExpr(expr: Expression): Boolean = {
expr.find(e => e.isInstanceOf[Unevaluable] && !e.isInstanceOf[AttributeReference]).isDefined
}
}
/**
* Replaces logical [[Distinct]] operator with an [[Aggregate]] operator.
* {{{
* SELECT DISTINCT f1, f2 FROM t ==> SELECT f1, f2 FROM t GROUP BY f1, f2
* }}}
*/
object ReplaceDistinctWithAggregate extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan transform {
case Distinct(child) => Aggregate(child.output, child.output, child)
}
}
/**
* Replaces logical [[Deduplicate]] operator with an [[Aggregate]] operator.
*/
object ReplaceDeduplicateWithAggregate extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan transform {
case Deduplicate(keys, child) if !child.isStreaming =>
val keyExprIds = keys.map(_.exprId)
val aggCols = child.output.map { attr =>
if (keyExprIds.contains(attr.exprId)) {
attr
} else {
Alias(new First(attr).toAggregateExpression(), attr.name)(attr.exprId)
}
}
// SPARK-22951: Physical aggregate operators distinguishes global aggregation and grouping
// aggregations by checking the number of grouping keys. The key difference here is that a
// global aggregation always returns at least one row even if there are no input rows. Here
// we append a literal when the grouping key list is empty so that the result aggregate
// operator is properly treated as a grouping aggregation.
val nonemptyKeys = if (keys.isEmpty) Literal(1) :: Nil else keys
Aggregate(nonemptyKeys, aggCols, child)
}
}
/**
* Replaces logical [[Intersect]] operator with a left-semi [[Join]] operator.
* {{{
* SELECT a1, a2 FROM Tab1 INTERSECT SELECT b1, b2 FROM Tab2
* ==> SELECT DISTINCT a1, a2 FROM Tab1 LEFT SEMI JOIN Tab2 ON a1<=>b1 AND a2<=>b2
* }}}
*
* Note:
* 1. This rule is only applicable to INTERSECT DISTINCT. Do not use it for INTERSECT ALL.
* 2. This rule has to be done after de-duplicating the attributes; otherwise, the generated
* join conditions will be incorrect.
*/
object ReplaceIntersectWithSemiJoin extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan transform {
case Intersect(left, right, false) =>
assert(left.output.size == right.output.size)
val joinCond = left.output.zip(right.output).map { case (l, r) => EqualNullSafe(l, r) }
Distinct(Join(left, right, LeftSemi, joinCond.reduceLeftOption(And), JoinHint.NONE))
}
}
/**
* Replaces logical [[Except]] operator with a left-anti [[Join]] operator.
* {{{
* SELECT a1, a2 FROM Tab1 EXCEPT SELECT b1, b2 FROM Tab2
* ==> SELECT DISTINCT a1, a2 FROM Tab1 LEFT ANTI JOIN Tab2 ON a1<=>b1 AND a2<=>b2
* }}}
*
* Note:
* 1. This rule is only applicable to EXCEPT DISTINCT. Do not use it for EXCEPT ALL.
* 2. This rule has to be done after de-duplicating the attributes; otherwise, the generated
* join conditions will be incorrect.
*/
object ReplaceExceptWithAntiJoin extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan transform {
case Except(left, right, false) =>
assert(left.output.size == right.output.size)
val joinCond = left.output.zip(right.output).map { case (l, r) => EqualNullSafe(l, r) }
Distinct(Join(left, right, LeftAnti, joinCond.reduceLeftOption(And), JoinHint.NONE))
}
}
/**
* Replaces logical [[Except]] operator using a combination of Union, Aggregate
* and Generate operator.
*
* Input Query :
* {{{
* SELECT c1 FROM ut1 EXCEPT ALL SELECT c1 FROM ut2
* }}}
*
* Rewritten Query:
* {{{
* SELECT c1
* FROM (
* SELECT replicate_rows(sum_val, c1)
* FROM (
* SELECT c1, sum_val
* FROM (
* SELECT c1, sum(vcol) AS sum_val
* FROM (
* SELECT 1L as vcol, c1 FROM ut1
* UNION ALL
* SELECT -1L as vcol, c1 FROM ut2
* ) AS union_all
* GROUP BY union_all.c1
* )
* WHERE sum_val > 0
* )
* )
* }}}
*/
object RewriteExceptAll extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan transform {
case Except(left, right, true) =>
assert(left.output.size == right.output.size)
val newColumnLeft = Alias(Literal(1L), "vcol")()
val newColumnRight = Alias(Literal(-1L), "vcol")()
val modifiedLeftPlan = Project(Seq(newColumnLeft) ++ left.output, left)
val modifiedRightPlan = Project(Seq(newColumnRight) ++ right.output, right)
val unionPlan = Union(modifiedLeftPlan, modifiedRightPlan)
val aggSumCol =
Alias(AggregateExpression(Sum(unionPlan.output.head.toAttribute), Complete, false), "sum")()
val aggOutputColumns = left.output ++ Seq(aggSumCol)
val aggregatePlan = Aggregate(left.output, aggOutputColumns, unionPlan)
val filteredAggPlan = Filter(GreaterThan(aggSumCol.toAttribute, Literal(0L)), aggregatePlan)
val genRowPlan = Generate(
ReplicateRows(Seq(aggSumCol.toAttribute) ++ left.output),
unrequiredChildIndex = Nil,
outer = false,
qualifier = None,
left.output,
filteredAggPlan
)
Project(left.output, genRowPlan)
}
}
/**
* Replaces logical [[Intersect]] operator using a combination of Union, Aggregate
* and Generate operator.
*
* Input Query :
* {{{
* SELECT c1 FROM ut1 INTERSECT ALL SELECT c1 FROM ut2
* }}}
*
* Rewritten Query:
* {{{
* SELECT c1
* FROM (
* SELECT replicate_row(min_count, c1)
* FROM (
* SELECT c1, If (vcol1_cnt > vcol2_cnt, vcol2_cnt, vcol1_cnt) AS min_count
* FROM (
* SELECT c1, count(vcol1) as vcol1_cnt, count(vcol2) as vcol2_cnt
* FROM (
* SELECT true as vcol1, null as , c1 FROM ut1
* UNION ALL
* SELECT null as vcol1, true as vcol2, c1 FROM ut2
* ) AS union_all
* GROUP BY c1
* HAVING vcol1_cnt >= 1 AND vcol2_cnt >= 1
* )
* )
* )
* }}}
*/
object RewriteIntersectAll extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan transform {
case Intersect(left, right, true) =>
assert(left.output.size == right.output.size)
val trueVcol1 = Alias(Literal(true), "vcol1")()
val nullVcol1 = Alias(Literal(null, BooleanType), "vcol1")()
val trueVcol2 = Alias(Literal(true), "vcol2")()
val nullVcol2 = Alias(Literal(null, BooleanType), "vcol2")()
// Add a projection on the top of left and right plans to project out
// the additional virtual columns.
val leftPlanWithAddedVirtualCols = Project(Seq(trueVcol1, nullVcol2) ++ left.output, left)
val rightPlanWithAddedVirtualCols = Project(Seq(nullVcol1, trueVcol2) ++ right.output, right)
val unionPlan = Union(leftPlanWithAddedVirtualCols, rightPlanWithAddedVirtualCols)
// Expressions to compute count and minimum of both the counts.
val vCol1AggrExpr =
Alias(AggregateExpression(Count(unionPlan.output(0)), Complete, false), "vcol1_count")()
val vCol2AggrExpr =
Alias(AggregateExpression(Count(unionPlan.output(1)), Complete, false), "vcol2_count")()
val ifExpression = Alias(If(
GreaterThan(vCol1AggrExpr.toAttribute, vCol2AggrExpr.toAttribute),
vCol2AggrExpr.toAttribute,
vCol1AggrExpr.toAttribute
), "min_count")()
val aggregatePlan = Aggregate(left.output,
Seq(vCol1AggrExpr, vCol2AggrExpr) ++ left.output, unionPlan)
val filterPlan = Filter(And(GreaterThanOrEqual(vCol1AggrExpr.toAttribute, Literal(1L)),
GreaterThanOrEqual(vCol2AggrExpr.toAttribute, Literal(1L))), aggregatePlan)
val projectMinPlan = Project(left.output ++ Seq(ifExpression), filterPlan)
// Apply the replicator to replicate rows based on min_count
val genRowPlan = Generate(
ReplicateRows(Seq(ifExpression.toAttribute) ++ left.output),
unrequiredChildIndex = Nil,
outer = false,
qualifier = None,
left.output,
projectMinPlan
)
Project(left.output, genRowPlan)
}
}
/**
* Removes literals from group expressions in [[Aggregate]], as they have no effect to the result
* but only makes the grouping key bigger.
*/
object RemoveLiteralFromGroupExpressions extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan transform {
case a @ Aggregate(grouping, _, _) if grouping.nonEmpty =>
val newGrouping = grouping.filter(!_.foldable)
if (newGrouping.nonEmpty) {
a.copy(groupingExpressions = newGrouping)
} else {
// All grouping expressions are literals. We should not drop them all, because this can
// change the return semantics when the input of the Aggregate is empty (SPARK-17114). We
// instead replace this by single, easy to hash/sort, literal expression.
a.copy(groupingExpressions = Seq(Literal(0, IntegerType)))
}
}
}
/**
* Removes repetition from group expressions in [[Aggregate]], as they have no effect to the result
* but only makes the grouping key bigger.
*/
object RemoveRepetitionFromGroupExpressions extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan transform {
case a @ Aggregate(grouping, _, _) if grouping.size > 1 =>
val newGrouping = ExpressionSet(grouping).toSeq
if (newGrouping.size == grouping.size) {
a
} else {
a.copy(groupingExpressions = newGrouping)
}
}
}
/**
* Replaces GlobalLimit 0 and LocalLimit 0 nodes (subtree) with empty Local Relation, as they don't
* return any rows.
*/
object OptimizeLimitZero extends Rule[LogicalPlan] {
// returns empty Local Relation corresponding to given plan
private def empty(plan: LogicalPlan) =
LocalRelation(plan.output, data = Seq.empty, isStreaming = plan.isStreaming)
def apply(plan: LogicalPlan): LogicalPlan = plan transformUp {
// Nodes below GlobalLimit or LocalLimit can be pruned if the limit value is zero (0).
// Any subtree in the logical plan that has GlobalLimit 0 or LocalLimit 0 as its root is
// semantically equivalent to an empty relation.
//
// In such cases, the effects of Limit 0 can be propagated through the Logical Plan by replacing
// the (Global/Local) Limit subtree with an empty LocalRelation, thereby pruning the subtree
// below and triggering other optimization rules of PropagateEmptyRelation to propagate the
// changes up the Logical Plan.
//
// Replace Global Limit 0 nodes with empty Local Relation
case gl @ GlobalLimit(IntegerLiteral(0), _) =>
empty(gl)
// Note: For all SQL queries, if a LocalLimit 0 node exists in the Logical Plan, then a
// GlobalLimit 0 node would also exist. Thus, the above case would be sufficient to handle
// almost all cases. However, if a user explicitly creates a Logical Plan with LocalLimit 0 node
// then the following rule will handle that case as well.
//
// Replace Local Limit 0 nodes with empty Local Relation
case ll @ LocalLimit(IntegerLiteral(0), _) =>
empty(ll)
}
}
|
rednaxelafx/apache-spark
|
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/Optimizer.scala
|
Scala
|
apache-2.0
| 80,217
|
package beam.integration
import akka.actor._
import akka.testkit.TestKitBase
import beam.agentsim.agents.PersonTestUtil
import beam.agentsim.agents.ridehail.{RideHailIterationHistory, RideHailSurgePricingManager}
import beam.agentsim.events.PathTraversalEvent
import beam.router.Modes.BeamMode
import beam.router.{BeamSkimmer, RouteHistory, TravelTimeObserved}
import beam.sflight.RouterForTest
import beam.sim.common.GeoUtilsImpl
import beam.sim.{BeamHelper, BeamMobsim}
import beam.utils.SimRunnerForTest
import beam.utils.TestConfigUtils.testConfig
import com.typesafe.config.ConfigFactory
import org.matsim.api.core.v01.events.{ActivityEndEvent, Event, PersonDepartureEvent, PersonEntersVehicleEvent}
import org.matsim.api.core.v01.population.{Activity, Leg}
import org.matsim.core.events.handler.BasicEventHandler
import org.scalatest._
import scala.collection.JavaConverters._
import scala.collection.mutable
import scala.language.postfixOps
class SingleModeSpec
extends WordSpecLike
with TestKitBase
with SimRunnerForTest
with RouterForTest
with BeamHelper
with Matchers {
def config: com.typesafe.config.Config =
ConfigFactory
.parseString("""akka.test.timefactor = 10""")
.withFallback(testConfig("test/input/sf-light/sf-light.conf").resolve())
def outputDirPath: String = basePath + "/" + testOutputDir + "single-mode-test"
lazy implicit val system: ActorSystem = ActorSystem("SingleModeSpec", config)
"The agentsim" must {
"let everybody walk when their plan says so" in {
scenario.getPopulation.getPersons.values.asScala
.foreach(p => PersonTestUtil.putDefaultBeamAttributes(p, BeamMode.allModes))
scenario.getPopulation.getPersons
.values()
.forEach { person =>
{
person.getSelectedPlan.getPlanElements.asScala.collect {
case leg: Leg =>
leg.setMode("walk")
}
}
}
val events = mutable.ListBuffer[Event]()
services.matsimServices.getEvents.addHandler(
new BasicEventHandler {
override def handleEvent(event: Event): Unit = {
event match {
case event: PersonDepartureEvent =>
events += event
case _ =>
}
}
}
)
val mobsim = new BeamMobsim(
services,
beamScenario,
beamScenario.transportNetwork,
services.tollCalculator,
scenario,
services.matsimServices.getEvents,
system,
new RideHailSurgePricingManager(services),
new RideHailIterationHistory(),
new RouteHistory(services.beamConfig),
new BeamSkimmer(beamScenario, services.geo),
new TravelTimeObserved(beamScenario, services.geo),
new GeoUtilsImpl(services.beamConfig),
services.networkHelper
)
mobsim.run()
assert(events.nonEmpty)
var seenEvent = false
events.foreach {
case event: PersonDepartureEvent =>
assert(
event.getLegMode == "walk" || event.getLegMode == "be_a_tnc_driver" || event.getLegMode == "be_a_household_cav_driver" || event.getLegMode == "be_a_transit_driver" || event.getLegMode == "cav"
)
seenEvent = true
}
assert(seenEvent, "Have not seend `PersonDepartureEvent`")
}
"let everybody take transit when their plan says so" in {
scenario.getPopulation.getPersons.values.asScala
.foreach(p => PersonTestUtil.putDefaultBeamAttributes(p, BeamMode.allModes))
scenario.getPopulation.getPersons
.values()
.forEach { person =>
person.getSelectedPlan.getPlanElements.asScala.collect {
case leg: Leg =>
leg.setMode("walk_transit")
}
}
val events = mutable.ListBuffer[Event]()
services.matsimServices.getEvents.addHandler(
new BasicEventHandler {
override def handleEvent(event: Event): Unit = {
event match {
case event: PersonDepartureEvent =>
events += event
case _ =>
}
}
}
)
val mobsim = new BeamMobsim(
services,
beamScenario,
beamScenario.transportNetwork,
services.tollCalculator,
scenario,
services.matsimServices.getEvents,
system,
new RideHailSurgePricingManager(services),
new RideHailIterationHistory(),
new RouteHistory(services.beamConfig),
new BeamSkimmer(beamScenario, services.geo),
new TravelTimeObserved(beamScenario, services.geo),
new GeoUtilsImpl(services.beamConfig),
services.networkHelper
)
mobsim.run()
assert(events.nonEmpty)
var seenEvent = false
events.foreach {
case event: PersonDepartureEvent =>
assert(
event.getLegMode == "walk" || event.getLegMode == "walk_transit" || event.getLegMode == "be_a_tnc_driver" || event.getLegMode == "be_a_household_cav_driver" || event.getLegMode == "be_a_transit_driver" || event.getLegMode == "cav"
)
seenEvent = true
}
assert(seenEvent, "Have not seend `PersonDepartureEvent`")
}
"let everybody take drive_transit when their plan says so" in {
scenario.getPopulation.getPersons.values.asScala
.foreach(p => PersonTestUtil.putDefaultBeamAttributes(p, BeamMode.allModes))
// Here, we only set the mode for the first leg of each tour -- prescribing a mode for the tour,
// but not for individual legs except the first one.
// We want to make sure that our car is returned home.
scenario.getPopulation.getPersons
.values()
.forEach { person =>
{
val newPlanElements = person.getSelectedPlan.getPlanElements.asScala.collect {
case activity: Activity if activity.getType == "Home" =>
Seq(activity, scenario.getPopulation.getFactory.createLeg("drive_transit"))
case activity: Activity =>
Seq(activity)
case leg: Leg =>
Nil
}.flatten
if (newPlanElements.last.isInstanceOf[Leg]) {
newPlanElements.remove(newPlanElements.size - 1)
}
person.getSelectedPlan.getPlanElements.clear()
newPlanElements.foreach {
case activity: Activity =>
person.getSelectedPlan.addActivity(activity)
case leg: Leg =>
person.getSelectedPlan.addLeg(leg)
}
}
}
val events = mutable.ListBuffer[Event]()
services.matsimServices.getEvents.addHandler(
new BasicEventHandler {
override def handleEvent(event: Event): Unit = {
event match {
case event @ (_: PersonDepartureEvent | _: ActivityEndEvent) =>
events += event
case _ =>
}
}
}
)
val mobsim = new BeamMobsim(
services,
beamScenario,
beamScenario.transportNetwork,
services.tollCalculator,
scenario,
services.matsimServices.getEvents,
system,
new RideHailSurgePricingManager(services),
new RideHailIterationHistory(),
new RouteHistory(services.beamConfig),
new BeamSkimmer(beamScenario, services.geo),
new TravelTimeObserved(beamScenario, services.geo),
new GeoUtilsImpl(services.beamConfig),
services.networkHelper
)
mobsim.run()
assert(events.nonEmpty)
var seenEvent = false
events.collect {
case event: PersonDepartureEvent =>
// drive_transit can fail -- maybe I don't have a car
assert(
event.getLegMode == "walk" || event.getLegMode == "walk_transit" || event.getLegMode == "drive_transit" || event.getLegMode == "be_a_tnc_driver" || event.getLegMode == "be_a_household_cav_driver" || event.getLegMode == "be_a_transit_driver" || event.getLegMode == "cav"
)
seenEvent = true
}
assert(seenEvent, "Have not seen `PersonDepartureEvent`")
val eventsByPerson = events.groupBy(_.getAttributes.get("person"))
val filteredEventsByPerson = eventsByPerson.filter {
_._2
.filter(_.isInstanceOf[ActivityEndEvent])
.sliding(2)
.exists(
pair => pair.forall(activity => activity.asInstanceOf[ActivityEndEvent].getActType != "Home")
)
}
eventsByPerson.map {
_._2.span {
case event: ActivityEndEvent if event.getActType == "Home" =>
true
case _ =>
false
}
}
// TODO: Test that what can be printed with the line below makes sense (chains of modes)
// filteredEventsByPerson.map(_._2.mkString("--\\n","\\n","--\\n")).foreach(print(_))
}
"let everybody drive when their plan says so" in {
scenario.getPopulation.getPersons.values.asScala
.foreach(p => PersonTestUtil.putDefaultBeamAttributes(p, BeamMode.allModes))
scenario.getPopulation.getPersons
.values()
.forEach { person =>
{
person.getSelectedPlan.getPlanElements.asScala.collect {
case leg: Leg =>
leg.setMode("car")
}
}
}
val events = mutable.ListBuffer[Event]()
services.matsimServices.getEvents.addHandler(
new BasicEventHandler {
override def handleEvent(event: Event): Unit = {
event match {
case event @ (_: PersonDepartureEvent | _: ActivityEndEvent | _: PathTraversalEvent |
_: PersonEntersVehicleEvent) =>
events += event
case _ =>
}
}
}
)
val mobsim = new BeamMobsim(
services,
beamScenario,
beamScenario.transportNetwork,
services.tollCalculator,
scenario,
services.matsimServices.getEvents,
system,
new RideHailSurgePricingManager(services),
new RideHailIterationHistory(),
new RouteHistory(services.beamConfig),
new BeamSkimmer(beamScenario, services.geo),
new TravelTimeObserved(beamScenario, services.geo),
new GeoUtilsImpl(services.beamConfig),
services.networkHelper
)
mobsim.run()
assert(events.nonEmpty)
var seenEvent = false
events.collect {
case event: PersonDepartureEvent =>
// Wr still get some failing car routes.
// TODO: Find root cause, fix, and remove "walk" here.
// See SfLightRouterSpec.
assert(
event.getLegMode == "walk" || event.getLegMode == "car" || event.getLegMode == "be_a_tnc_driver" || event.getLegMode == "be_a_household_cav_driver" || event.getLegMode == "be_a_transit_driver" || event.getLegMode == "cav"
)
seenEvent = true
}
assert(seenEvent, "Have not seen `PersonDepartureEvent`")
}
}
}
|
colinsheppard/beam
|
src/test/scala/beam/integration/SingleModeSpec.scala
|
Scala
|
gpl-3.0
| 11,142
|
/*
* Copyright 2014–2020 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.contrib.scalaz
import _root_.scalaz._, Scalaz._
trait bind {
implicit final class MoreBindOps[F[_], A](val self: F[A])(implicit val F: Bind[F]) extends _root_.scalaz.syntax.Ops[F[A]] {
def <<[B](b: => F[B]): F[A] = F.bind(self)(a => b.as(a))
}
}
object bind extends bind
|
slamdata/quasar
|
foundation/src/main/scala/quasar/contrib/scalaz/bind.scala
|
Scala
|
apache-2.0
| 903
|
package foil
import java.util.ArrayList
import java.io._
@SerialVersionUID(100L)
class Term(var name:String) extends Serializable{
override def toString() : String = {
name
}
def canEqual(that: Any) = that.isInstanceOf[Term]
def apply_bindings(bindings: Map[String, List[List[String]]]) : Term = {
this
}
}
object Term {
/*
* Finds target variable position in the right-side predicate variables list
*/
def findTargetVariblePosition(targetVariable: Term, rightSideRuleVariables: List[Term]) = {
// find target variable position in the variables list of right-side predicate
val result = (rightSideRuleVariables.indexOf(targetVariable), targetVariable)
//debug(targetVariable + " " + position)
result
}
/*
* we store target position and body variable position
* term object can be variable or atom
*/
def positionList(target: (String, ArrayList[Term]), predicateVars: List[Term]) = {
val targetVars = target._2 // get variables list for the target predicate
val positionList = new ArrayList[(Int, (Int, Term))]
for (index <- 0 until targetVars.size()) {
val targetVar = targetVars.get(index)
val varPosition = Term.findTargetVariblePosition(targetVar, predicateVars)
if (varPosition._1 > -1) { // target variable exists on the right side
// they both must match in body predicate and target predicate tuples
positionList.add((index, varPosition))
}
}
positionList
}
}
@SerialVersionUID(100L)
class Atom(name:String) extends Term(name:String) with Serializable {
override def hashCode = {
val hash = if (name.isEmpty()) 0 else name.hashCode
super.hashCode + hash
}
override def equals(other: Any): Boolean = other match {
case that: Atom => that.canEqual(this) && this.name == that.name
case _ => false
}
// TODO: check
override def apply_bindings(bindings: Map[String, List[List[String]]]) : Term = {
//TODO: check
//if bindings .has_key(self): return bindings[self]
//else: return self
this
}
}
@SerialVersionUID(100L)
class Var(name:String) extends Term(name:String) with Serializable {
var scope: List[String] = null
def this(name: String, scope: List[String]) {
this(name)
this.scope = scope
}
override def equals(other: Any): Boolean = other match {
case other: Var => other.canEqual(this) && this.name == other.name
case _ => false
}
// TODO: check
override def apply_bindings(bindings: Map[String, List[List[String]]]) : Term = {
/*if bindings.has_key(self): return bindings[self]
else: return self*/
this
}
}
object Var {
var unique_count: Int = 0
def get_unique(variable: Var) : Var = {
Var.unique_count += 1
new Var("@_" + Var.unique_count + "_" + variable.name)
}
}
@SerialVersionUID(100L)
class Rule (predicates: ArrayList[Predicate]) extends Serializable{
def getPredicates = {predicates}
}
|
shurkhovetskyy/foil
|
src/main/scala/foil/Terms.scala
|
Scala
|
gpl-3.0
| 2,992
|
package java.util.regex
import scala.annotation.switch
import scala.scalajs.js
final class Pattern private (jsRegExp: js.RegExp, _pattern: String, _flags: Int)
extends Serializable {
import Pattern._
def pattern(): String = _pattern
def flags(): Int = _flags
override def toString(): String = pattern
private[regex] def newJSRegExp(): js.RegExp = {
val r = new js.RegExp(jsRegExp)
if (r ne jsRegExp) {
r
} else {
/* Workaround for the PhantomJS 1.x bug
* https://github.com/ariya/phantomjs/issues/11494
* which causes new js.RegExp(jsRegExp) to return the same object,
* rather than a new one.
* We therefore reconstruct the pattern and flags used to create
* jsRegExp and create a new one from there.
*/
val jsFlags = {
(if (jsRegExp.global) "g" else "") +
(if (jsRegExp.ignoreCase) "i" else "") +
(if (jsRegExp.multiline) "m" else "")
}
new js.RegExp(jsRegExp.source, jsFlags)
}
}
def matcher(input: CharSequence): Matcher =
new Matcher(this, input, 0, input.length)
def split(input: CharSequence): Array[String] =
split(input, 0)
def split(input: CharSequence, limit: Int): Array[String] = {
val lim = if (limit > 0) limit else Int.MaxValue
val result = js.Array[String]()
val inputStr = input.toString
val matcher = this.matcher(inputStr)
var prevEnd = 0
// Actually split original string
while ((result.length < lim-1) && matcher.find()) {
result.push(inputStr.substring(prevEnd, matcher.start))
prevEnd = matcher.end
}
result.push(inputStr.substring(prevEnd))
// Remove a leading empty element iff the first match was zero-length
// and there is no other place the regex matches
if (prevEnd == 0 && result.length == 2 && (lim > 2 || !matcher.find())) {
Array(inputStr)
} else {
var len = result.length
if (limit == 0) {
while (len > 1 && result(len-1).isEmpty)
len -= 1
}
val actualResult = new Array[String](len)
result.copyToArray(actualResult)
actualResult
}
}
}
object Pattern {
final val UNIX_LINES = 0x01
final val CASE_INSENSITIVE = 0x02
final val COMMENTS = 0x04
final val MULTILINE = 0x08
final val LITERAL = 0x10
final val DOTALL = 0x20
final val UNICODE_CASE = 0x40
final val CANON_EQ = 0x80
final val UNICODE_CHARACTER_CLASS = 0x100
def compile(regex: String, flags: Int): Pattern = {
val (jsPattern, flags1) = {
if ((flags & LITERAL) != 0) {
(quote(regex), flags)
} else {
trySplitHack(regex, flags) orElse
tryFlagHack(regex, flags) getOrElse
(regex, flags)
}
}
val jsFlags = {
"g" +
(if ((flags1 & CASE_INSENSITIVE) != 0) "i" else "") +
(if ((flags1 & MULTILINE) != 0) "m" else "")
}
val jsRegExp = new js.RegExp(jsPattern, jsFlags)
new Pattern(jsRegExp, regex, flags1)
}
def compile(regex: String): Pattern =
compile(regex, 0)
def matches(regex: String, input: CharSequence): Boolean =
compile(regex).matcher(input).matches()
def quote(s: String): String = {
var result = ""
var i = 0
while (i < s.length) {
val c = s.charAt(i)
result += ((c: @switch) match {
case '\\\\' | '.' | '(' | ')' | '[' | ']' | '{' | '}' | '|'
| '?' | '*' | '+' | '^' | '$' => "\\\\"+c
case _ => c
})
i += 1
}
result
}
/** This is a hack to support StringLike.split().
* It replaces occurrences of \\Q<char>\\E by quoted(<char>)
*/
@inline
private def trySplitHack(pat: String, flags: Int) = {
val m = splitHackPat.exec(pat)
if (m != null)
Some((quote(m(1).get), flags))
else
None
}
@inline
private def tryFlagHack(pat: String, flags0: Int) = {
val m = flagHackPat.exec(pat)
if (m != null) {
val newPat = pat.substring(m(0).get.length) // cut off the flag specifiers
val flags1 = m(1).fold(flags0) { chars =>
chars.foldLeft(flags0) { (f, c) => f | charToFlag(c) }
}
val flags2 = m(2).fold(flags1) { chars =>
chars.foldLeft(flags1) { (f, c) => f & ~charToFlag(c) }
}
Some((newPat, flags2))
} else
None
}
private def charToFlag(c: Char) = (c: @switch) match {
case 'i' => CASE_INSENSITIVE
case 'd' => UNIX_LINES
case 'm' => MULTILINE
case 's' => DOTALL
case 'u' => UNICODE_CASE
case 'x' => COMMENTS
case 'U' => UNICODE_CHARACTER_CLASS
case _ => sys.error("bad in-pattern flag")
}
/** matches \\Q<char>\\E to support StringLike.split */
private val splitHackPat = new js.RegExp("^\\\\\\\\Q(.|\\\\n|\\\\r)\\\\\\\\E$")
/** regex to match flag specifiers in regex. E.g. (?u), (?-i), (?U-i) */
private val flagHackPat =
new js.RegExp("^\\\\(\\\\?([idmsuxU]*)(?:-([idmsuxU]*))?\\\\)")
}
|
jmnarloch/scala-js
|
javalib/src/main/scala/java/util/regex/Pattern.scala
|
Scala
|
bsd-3-clause
| 4,914
|
/*
* This file is part of Apparat.
*
* Copyright (C) 2010 Joa Ebert
* http://www.joa-ebert.com/
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
package apparat.tools.asmifier
import apparat.tools.{ApparatConfiguration, ApparatConfigurationFactory}
import java.io.{File => JFile}
/**
* @author Joa Ebert
*/
object ASMifierConfigurationFactory extends ApparatConfigurationFactory[ASMifierConfiguration] {
override def fromConfiguration(config: ApparatConfiguration): ASMifierConfiguration = {
val input = config("-i") map { path => new JFile(path) } getOrElse error("Input is required.")
val output = config("-o") map { path => new JFile(path) }
if(!input.exists) {
error("Input "+input+" does not exist.")
}
if(output.isDefined) {
val outputDir = output.get
if(outputDir.exists) {
if(!outputDir.isDirectory) {
error("Output must point to a directory.")
}
} else {
output.get.mkdirs()
}
}
new ASMifierConfigurationImpl(input, output)
}
}
|
joa/apparat
|
apparat-core/src/main/scala/apparat/tools/asmifier/ASMifierConfigurationFactory.scala
|
Scala
|
lgpl-2.1
| 1,696
|
package com.seanshubin.todo.sample.server
object ConsoleApplication extends App with LauncherWiring {
lazy val commandLineArguments = args.toSeq
launcher.launch()
}
|
SeanShubin/javascript-todo-samples
|
server/src/main/scala/com/seanshubin/todo/sample/server/ConsoleApplication.scala
|
Scala
|
unlicense
| 170
|
package net.warpgame.engine.core.context.loader
import java.lang.invoke.MethodHandle
import org.scalamock.scalatest.MockFactory
import org.scalatest.matchers.{MatchResult, Matcher}
import org.scalatest.{Matchers, WordSpecLike}
import net.warpgame.engine.core.context.loader.ServiceGraphBuilderSpec.{GraphMatchers, ServiceBuilders}
import net.warpgame.engine.core.context.loader.service.{DependencyInfo, ServiceGraphBuilder, ServiceInfo}
import scala.reflect.ClassTag
import ServiceGraphBuilderSpec._
import net.warpgame.engine.core.context.loader.service.ServiceGraphBuilder.AmbiguousServiceDependencyException
import net.warpgame.engine.core.graph.{CycleFoundException, DAG}
/**
* @author Jaca777
* Created 2017-09-04 at 10
*/
class ServiceGraphBuilderSpec extends WordSpecLike with Matchers with MockFactory with GraphMatchers with ServiceBuilders {
"ServiceGraphBuilder" should {
"build 1-elem service graph" in {
//given
val graphBuilder = new ServiceGraphBuilder
val services = List(service[A]())
//when
val graph = graphBuilder.build(services)
//then
val rootNodes = graph.rootNodes
rootNodes.size should be(1)
rootNodes.map(_.value).head should be(service[A]())
}
"build graph with dependent nodes" in {
//given
val graphBuilder = new ServiceGraphBuilder
val services = List(
service[A](),
service[B](dependencies = List(dep[A]())),
service[C](dependencies = List(dep[A](), dep[B]()))
)
//when
val graph = graphBuilder.build(services)
//then
val rootNodes = graph.rootNodes
rootNodes.size should be(1)
rootNodes.map(_.value).head.`type` should be(classOf[C])
graph should containDependency[B -> A]
graph should containDependency[C -> A]
}
"use qualifiers to resolve services" in {
//given
val graphBuilder = new ServiceGraphBuilder
val services = List(
service[B](dependencies = List(dep[A](qualifier = Some("test")))),
service[D](qualifier = Some("test")),
service[E](qualifier = None)
)
//when
val graph = graphBuilder.build(services)
//then
graph should containDependency[(B -> D)]
}
"throw exception when a cyclic dependency is present" in {
//given
val graphBuilder = new ServiceGraphBuilder
val services = List(
service[A](dependencies = List(dep[C]())),
service[B](dependencies = List(dep[A]())),
service[C](dependencies = List(dep[B]()))
)
//then
intercept[CycleFoundException[ServiceInfo]] {
graphBuilder.build(services)
}
}
"throw exception when a dependency is ambiguous" in {
//given
val graphBuilder = new ServiceGraphBuilder
val services = List(
service[A](dependencies = List(dep[A]())),
service[D](),
service[E]()
)
//then
intercept[AmbiguousServiceDependencyException] {
graphBuilder.build(services)
}
}
}
}
object ServiceGraphBuilderSpec {
class A
class B
class C
class D extends A
class E extends A
trait GraphMatchers {
class FileEndsWithExtensionMatcher(from: Class[_], to: Class[_]) extends Matcher[DAG[ServiceInfo]] {
def apply(graph: DAG[ServiceInfo]) = {
val node = graph.resolveNode(_.`type` == from)
val matches = node match {
case Some(b) => b.leaves.exists(_.value.`type` == to)
case None => false
}
MatchResult(
matches,
s"""Graph did not contain dependency from ${from.getName} to ${to.getName}""",
s"""Graph contains dependency from ${from.getName} to ${to.getName}"""
)
}
}
trait DependencyLike[T] {
def from: Class[_]
def to: Class[_]
}
case class DependencyLikeApply[T](from: Class[_], to: Class[_]) extends DependencyLike[T]
type ->[A, B]
implicit def arrowDependencyLike[A: ClassTag, B: ClassTag, ->[A, B]]: DependencyLike[A -> B] = {
DependencyLikeApply[A -> B](
implicitly[ClassTag[A]].runtimeClass,
implicitly[ClassTag[B]].runtimeClass
)
}
def containDependency[C: DependencyLike] = {
val dependency = implicitly[DependencyLike[C]]
new FileEndsWithExtensionMatcher(
dependency.from,
dependency.to
)
}
}
trait ServiceBuilders {
def service[T: ClassTag](
qualifier: Option[String] = None,
builder: MethodHandle = null,
dependencies: List[DependencyInfo] = List.empty
): ServiceInfo = {
ServiceInfo(implicitly[ClassTag[T]].runtimeClass, qualifier, builder, dependencies)
}
def dep[T: ClassTag](qualifier: Option[String] = None): DependencyInfo = {
DependencyInfo(implicitly[ClassTag[T]].runtimeClass, qualifier)
}
}
}
|
WarpOrganization/warp
|
core/src/test/scala/net/warpgame/engine/core/context/loader/ServiceGraphBuilderSpec.scala
|
Scala
|
lgpl-3.0
| 5,008
|
package io.argos.agent.sentinels
import java.util
import javax.management.Notification
import io.argos.agent.{Constants, SentinelConfiguration}
import io.argos.agent.bean.{CheckMetrics, JmxNotification}
import io.argos.agent.util.{CassandraVersion, CommonLoggerFactory, HostnameProvider}
import CommonLoggerFactory._
import org.apache.cassandra.streaming.{StreamEvent, StreamManagerMBean}
import scala.collection.JavaConverters._
/**
* Created by eric on 24/02/16.
*
* This class add a notification listener to the JMX MBeanServer in order to follow the progress of some action (like the repair)
*
*/
class InternalNotificationsSentinel(override val conf: SentinelConfiguration) extends Sentinel {
// TODO use the ProgressEventType enum when the v2.1 will be depreciated
val ERROR_STATUS = 2
val ABORT_STATUS = 3
this.context.system.eventStream.subscribe(this.self, classOf[JmxNotification])
override def processProtocolElement: Receive = {
case CheckMetrics() => {} // nothing to do on checkMetrics
case JmxNotification(notification) if notification.getSource().equals(StreamManagerMBean.OBJECT_NAME) =>
manageSteamNotification(notification)
case JmxNotification(notification) if notification.getSource().toString.startsWith("bootstrap") =>
manageNotificationWithHashMap(notification)
case JmxNotification(notification) if notification.getSource().toString.startsWith("repair") =>
manageRepairNotification(notification)
case JmxNotification(unknownNotif) => log.debug("Unknown JMXNotification : source={}, type={}", unknownNotif.getSource, unknownNotif.getType);
}
private def manageSteamNotification(notification: Notification): Unit = {
if ((classOf[StreamEvent].getCanonicalName + ".failure").equals(notification.getType())) {
val message =
s"""Stream process failed on Cassandra Node ${HostnameProvider.hostname}.
|
|stacktrace : ${notification.getUserData}
|
|notification : ${notification}
|
|""".stripMargin
context.system.eventStream.publish(buildNotification(message))
}
}
private def manageRepairNotification(notification: Notification): Unit = {
if (notification.getUserData.isInstanceOf[util.HashMap[String, Int]]) {
manageNotificationWithHashMap(notification);
} else if (notification.getUserData.isInstanceOf[Array[Int]]) {
// array of integer (LegacyJmxNotification)
val data = notification.getUserData.asInstanceOf[Array[Int]]
if (commonLogger.isDebugEnabled) {
commonLogger.debug(this, "Receive JMX notification=<{}> with userData = <{}>", notification.getType, data.toString())
}
val status: Int = data(1)
if (hasFailed(status)) {
val msg = if (status == ERROR_STATUS) s"${notification.getSource} has failed "
else if (status == ABORT_STATUS && CassandraVersion.version > 2.1) s"${notification.getSource} was aborted "
val action = notification.getSource
val message =
s"""${msg} for Cassandra Node ${HostnameProvider.hostname}
|
|action : ${action}
|notification : ${notification}
|
|""".stripMargin
context.system.eventStream.publish(buildNotification(conf.messageHeader.map(h => h + " \\n\\n--####--\\n\\n" + message).getOrElse(message)))
}
}
}
private def hasFailed(status: Int) : Boolean = (status == ERROR_STATUS || (status == ABORT_STATUS && CassandraVersion.version > 2.1))
private def manageNotificationWithHashMap(notification: Notification): Unit = {
val data = notification.getUserData.asInstanceOf[util.HashMap[String, Int]].asScala
if (commonLogger.isDebugEnabled) {
commonLogger.debug(this, "Receive JMX notification=<{}> with userData = <{}>", notification.getType, data.toString())
}
val status = data("type")
if (hasFailed(status)) {
val msg = if (status == ERROR_STATUS) s"${notification.getSource} has failed "
else if (status == ABORT_STATUS && CassandraVersion.version > 2.1) s"${notification.getSource} was aborted "
val percent = 100 * data("progressCount") / data("total")
val action = notification.getSource
val message =
s"""${msg} for Cassandra Node ${HostnameProvider.hostname}.
|
|action : ${action}
|progress : ${percent}%
|notification : ${notification}
|
|""".stripMargin
context.system.eventStream.publish(buildNotification(conf.messageHeader.map(h => h + " \\n\\n--####--\\n\\n" + message).getOrElse(message)))
}
}
}
/*
HERE is the list of notification for a repair command.
"data" is a Java HashMap with :
- total the number of progressCount
- progressCount : the current ProgressCount
- type : the numerical value of the ProgressEventType enum. (org.apache.cassandra.utils.progress.ProgressEventType)
"type" : the type of nofication
Notifs : javax.management.Notification[source=repair:8][type=progress][message=Starting repair command #8, repairing keyspace excelsior with repair options (parallelism: parallel, primary range: false, incremental: true, job threads: 1, ColumnFamilies: [], dataCenters: [], hosts: [], # of ranges: 3)]
.msg : Starting repair command #8, repairing keyspace excelsior with repair options (parallelism: parallel, primary range: false, incremental: true, job threads: 1, ColumnFamilies: [], dataCenters: [], hosts: [], # of ranges: 3)
.snum : 40
.time : 1456261572578
.type : progress
.data : {progressCount=0, total=100, type=0}
Notifs : javax.management.Notification[source=repair:8][type=progress][message=Repair session 44d8d9d0-da71-11e5-bfcc-8f82886fa28e for range (3074457345618258602,-9223372036854775808] finished]
.msg : Repair session 44d8d9d0-da71-11e5-bfcc-8f82886fa28e for range (3074457345618258602,-9223372036854775808] finished
.snum : 41
.time : 1456261572607
.type : progress
.data : {progressCount=4, total=6, type=1}
Notifs : javax.management.Notification[source=repair:8][type=progress][message=Repair session 44d927f1-da71-11e5-bfcc-8f82886fa28e for range (-9223372036854775808,-3074457345618258603] finished]
.msg : Repair session 44d927f1-da71-11e5-bfcc-8f82886fa28e for range (-9223372036854775808,-3074457345618258603] finished
.snum : 42
.time : 1456261572617
.type : progress
.data : {progressCount=5, total=6, type=1}
Notifs : javax.management.Notification[source=repair:8][type=progress][message=Repair session 44da3960-da71-11e5-bfcc-8f82886fa28e for range (-3074457345618258603,3074457345618258602] finished]
.msg : Repair session 44da3960-da71-11e5-bfcc-8f82886fa28e for range (-3074457345618258603,3074457345618258602] finished
.snum : 43
.time : 1456261572639
.type : progress
.data : {progressCount=6, total=6, type=1}
Notifs : javax.management.Notification[source=repair:8][type=progress][message=Repair completed successfully]
.msg : Repair completed successfully
.snum : 44
.time : 1456261572644
.type : progress
.data : {progressCount=6, total=6, type=4}
Notifs : javax.management.Notification[source=repair:8][type=progress][message=Repair command #8 finished in 0 seconds]
.msg : Repair command #8 finished in 0 seconds
.snum : 45
.time : 1456261572644
.type : progress
.data : {progressCount=6, total=6, type=5}
*/
|
leleueri/argos
|
argos-agent/src/main/scala/io/argos/agent/sentinels/InternalNotificationsSentinel.scala
|
Scala
|
apache-2.0
| 7,474
|
package org.typedsolutions.aws.kinesis.model
case class CreateStreamRequest(streamName: String, shardCount: Int) extends Command
|
mattroberts297/akka-kinesis
|
src/main/scala/org/typedsolutions/aws/kinesis/model/CreateStreamRequest.scala
|
Scala
|
mit
| 130
|
package net.itadinanta.rnkr.core.arbiter
import akka.actor.{ ActorRef, Actor, FSM }
import scala.concurrent.Future
import akka.actor.ActorSystem
import akka.actor.Props
import akka.util.Timeout
import scala.annotation.tailrec
import scala.concurrent.duration._
import scala.concurrent.duration.FiniteDuration._
import akka.pattern.{ ask, pipe }
import akka.actor.PoisonPill
import scala.reflect.ClassTag
import akka.actor.ActorContext
import akka.actor.ActorRefFactory
import grizzled.slf4j.Logging
import scala.language.postfixOps
object Arbiter {
def apply[T](t: T, name: String)(implicit context: ActorRefFactory) = new ActorArbiter(t, name)
}
trait Arbiter[T] {
def wqueue[R](f: T => R)(implicit t: ClassTag[R]): Future[R]
def rqueue[R](f: T => R)(implicit t: ClassTag[R]): Future[R]
def shutdown()
}
object Gate {
def props[T](target: T) = Props(new Gate(target))
sealed trait Response
case object ReadResponse extends Response
case object WriteResponse extends Response
sealed trait Request[T, R] { val f: T => R; val replyTo: Option[ActorRef] }
case class ReadRequest[T, R](f: T => R, replyTo: Option[ActorRef] = None) extends Request[T, R]
case class WriteRequest[T, R](f: T => R, replyTo: Option[ActorRef] = None) extends Request[T, R]
}
class Gate[T](val target: T) extends Actor with Logging {
implicit lazy val executionContext = context.system.dispatcher
import Gate._
import scala.collection.immutable.Queue
case class State(val rc: Int, val wc: Int, val q: Queue[Request[T, _]])
var state = State(0, 0, Queue[Request[T, _]]())
@tailrec private def flush(s: State): State = if (s.q.isEmpty) s else
s.q.dequeue match {
case (ReadRequest(f, Some(replyTo)), tail) if (s.wc == 0) => {
Future { replyTo ! f(target); ReadResponse } pipeTo self
flush(State(s.rc + 1, s.wc, tail))
}
case (WriteRequest(f, Some(replyTo)), tail) if (s.wc == 0 && s.rc == 0) => {
Future { replyTo ! f(target); WriteResponse } pipeTo self
State(s.rc, s.wc + 1, tail)
}
case _ => s
}
def next(s: State) { state = flush(s) }
def receive() = {
case r: ReadRequest[T, _] if r.replyTo == None => next(state.copy(q = state.q.enqueue(ReadRequest(r.f, Some(sender)))))
case w: WriteRequest[T, _] if w.replyTo == None => next(state.copy(q = state.q.enqueue(WriteRequest(w.f, Some(sender)))))
case ReadResponse => next(state.copy(rc = state.rc - 1))
case WriteResponse => next(state.copy(wc = state.wc - 1))
}
}
trait GateWrapper[T] extends Arbiter[T] {
val gate: ActorRef
implicit val timeout = Timeout(1 day)
import Gate._
override def wqueue[R](f: T => R)(implicit t: ClassTag[R]): Future[R] = ask(gate, WriteRequest(f)).mapTo[R]
override def rqueue[R](f: T => R)(implicit t: ClassTag[R]): Future[R] = ask(gate, ReadRequest(f)).mapTo[R]
override def shutdown() { gate ! PoisonPill }
}
class ActorGateWrapper[T](override val gate: ActorRef) extends GateWrapper[T]
class ActorArbiter[T](val target: T, name: String)(implicit factory: ActorRefFactory)
extends ActorGateWrapper[T](factory.actorOf(Gate.props(target), name))
|
itadinanta/rnkr
|
rnkr-support/src/main/scala/net/itadinanta/rnkr/core/arbiter/Arbiter.scala
|
Scala
|
gpl-2.0
| 3,083
|
package io.getquill
import io.getquill.Spec
import io.getquill.ast._
import io.getquill.EntityQuery
import io.getquill.testContext.InfixInterpolator
import io.getquill.Query
import io.getquill.quat._
import io.getquill.testContext._
import io.getquill.testContext.qr1
import io.getquill.testContext.query
import io.getquill.testContext.quote
import io.getquill.testContext.unquote
import io.getquill.Quoted
class OpsSpec extends Spec {
"quotes asts" - {
"explicitly" in {
val q = quote {
query[TestEntity]
}
q.ast mustEqual Entity("TestEntity", Nil, TestEntityQuat)
}
"implicitly" in {
val q: Quoted[Query[TestEntity]] =
query[TestEntity]
q.ast mustEqual Entity("TestEntity", Nil, TestEntityQuat)
}
}
"unquotes asts" - {
"explicitly" in {
val q = quote {
unquote(qr1).map(t => t)
}
val quat = TestEntityQuat
q.ast mustEqual Map(Entity("TestEntity", Nil, quat), Ident("t", quat), Ident("t", quat))
}
"implicitly" in {
val q = quote {
qr1.map(t => t)
}
val quat = TestEntityQuat
q.ast mustEqual Map(Entity("TestEntity", Nil, quat), Ident("t", quat), Ident("t", quat))
}
}
"provides the infix interpolator" - {
"boolean values" - {
"with `as`" in {
val q = quote {
infix"true".as[Boolean]
}
q.ast mustEqual Infix(List("true"), Nil, false, Quat.BooleanValue)
}
}
"other values" - {
"with `as`" in {
val q = quote {
infix"1".as[Int]
}
q.ast mustEqual Infix(List("1"), Nil, false, Quat.Value)
}
"without `as`" in {
val q = quote {
infix"1"
}
q.ast mustEqual Infix(List("1"), Nil, false, Quat.Value)
}
}
}
"unquotes duble quotations" in {
val q: Quoted[EntityQuery[TestEntity]] = quote {
quote(query[TestEntity])
}
val n = quote {
query[TestEntity]
}
q.ast mustEqual n.ast
}
implicit class QueryOps[Q <: Query[_]](q: Q) {
def allowFiltering = quote(infix"$q ALLOW FILTERING".as[Q])
}
"unquotes quoted function bodies automatically" - {
"one param" in {
val q: Quoted[Int => EntityQuery[TestEntity]] = quote {
(i: Int) =>
query[TestEntity].allowFiltering
}
val n = quote {
(i: Int) =>
unquote(query[TestEntity].allowFiltering)
}
q.ast mustEqual n.ast
}
"multiple params" in {
val q: Quoted[(Int, Int, Int) => EntityQuery[TestEntity]] = quote {
(i: Int, j: Int, k: Int) =>
query[TestEntity].allowFiltering
}
val n = quote {
(i: Int, j: Int, k: Int) =>
unquote(query[TestEntity].allowFiltering)
}
q.ast mustEqual n.ast
}
}
}
|
getquill/quill
|
quill-core/src/test/scala/io/getquill/OpsSpec.scala
|
Scala
|
apache-2.0
| 2,827
|
package io.apibuilder.validation
import io.apibuilder.spec.v0.models.Method
import io.apibuilder.validation.helpers.Helpers
import org.scalatest.matchers.should.Matchers
import org.scalatest.funspec.AnyFunSpec
class ApiBuilderServiceSpec extends AnyFunSpec with Matchers with Helpers {
private[this] lazy val service = loadService("flow-api-service.json")
it("fromUrl") {
ApiBuilderService.fromUrl("file://non-existent-tmp").left.getOrElse {
sys.error("Expected error from invalid url")
}
rightOrErrors {
ApiBuilderService.toService(readFile("apibuilder-common-service.json"))
}.service.name should be("apibuilder common")
}
it("operation") {
service.findOperation(Method.Post, "/foo") should be(None)
val op = service.findOperation(Method.Post, "/users").get
op.method should equal(Method.Post)
op.path should equal("/users")
op.parameters should be(Nil)
service.findOperation(Method.Get, "/users").get.parameters.map(_.name) should be(
Seq("id", "email", "status", "limit", "offset", "sort")
)
}
it("findType can resolve a scalar") {
service.findType("string").get should equal(ScalarType.StringType)
service.findType("STRING").get should equal(ScalarType.StringType)
ScalarType.all.forall { t =>
service.findType(t.name).isDefined
} should be(true)
}
}
|
flowcommerce/lib-apidoc-json-validation
|
src/test/scala/io/apibuilder/validation/ApiBuilderServiceSpec.scala
|
Scala
|
mit
| 1,363
|
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.nn
import com.intel.analytics.bigdl.nn.abstractnn.AbstractModule
import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.{T, Table}
import scala.reflect.ClassTag
/**
* Module to perform matrix multiplication on two mini-batch inputs,
* producing a mini-batch.
*
* @param transA specifying whether or not transpose the first input matrix
* @param transB specifying whether or not transpose the second input matrix
*/
@SerialVersionUID(8315388141765786231L)
class MM[T: ClassTag](
val transA: Boolean = false,
val transB: Boolean = false)
(implicit ev: TensorNumeric[T]) extends AbstractModule[Table, Tensor[T], T] {
gradInput = T(Tensor[T], Tensor[T]())
private def checkInputFormat(input: Table): (Tensor[T], Tensor[T]) = {
require(input.length() == 2 && input(1).isInstanceOf[Tensor[T]] &&
input(2).isInstanceOf[Tensor[T]], "Input must be two tensors")
val m1: Tensor[T] = input(1)
val m2: Tensor[T] = input(2)
require(m1.dim() == 2 || m1.dim() == 3, "input matrix must be 2D or 3D" +
s"input dim ${m1.dim()}")
require(m2.dim() == 2 || m2.dim() == 3, "input matrix must be 2D or 3D" +
s"input dim ${m2.dim()}")
(m1, m2)
}
override def updateOutput(input: Table): Tensor[T] = {
var (ma, mb) = checkInputFormat(input)
if (ma.dim() == 2) {
require(mb.dim() == 2, "second input tensor must be 2D" +
s"second input dim ${mb.dim()}")
if (transA) {
ma = ma.t()
}
if (transB) {
mb = mb.t()
}
require(ma.size(2) == mb.size(1), "matrix sizes do not match" +
s"The sizes are ${ma.size(2)} and ${mb.size(1)}")
output.resize(ma.size(1), mb.size(2))
output.mm(ma, mb)
} else {
require(mb.dim() == 3, "second input tensor must be 3D" +
s"second input dim ${mb.dim()}")
require(ma.size(1) == mb.size(1), "inputs must contain the same number of minibatches" +
s"The minibatces of each are ${ma.size(1)} and ${mb.size(1)}")
if (transA) {
ma = ma.transpose(2, 3)
}
if (transB) {
mb = mb.transpose(2, 3)
}
require(ma.size(3) == mb.size(2), "matrix sizes do not match" +
s"the matrix sizes are ${ma.size(3)} and ${mb.size(2)}")
output.resize(ma.size(1), ma.size(2), mb.size(3))
output.bmm(ma, mb)
}
output
}
override def updateGradInput(input: Table, gradOutput: Tensor[T]): Table = {
var (ma, mb) = checkInputFormat(input)
gradInput[Tensor[T]](1).resizeAs(ma)
gradInput[Tensor[T]](2).resizeAs(mb)
require(gradOutput.dim() == 2 || gradOutput.dim() == 3,
"arguments must be a 2D or 3D Tensor" +
s"arguments dim ${gradOutput.dim()}")
val (hDim, wDim, f): (Int, Int, Tensor[T] => Tensor[T] => Tensor[T] => Tensor[T]) =
if (gradOutput.dim() == 2) {
require(ma.dim() == 2, "first input tensor must be 2D" +
s"first input dim ${ma.dim()}")
require(mb.dim() == 2, "second input tensor must be 2D" +
s"second input dim ${mb.dim()}")
(1, 2, t => m1 => m2 => t.mm(m1, m2))
} else {
require(ma.dim() == 3, "first input tensor must be 3D" +
s"first input dim ${ma.dim()}")
require(mb.dim() == 3, "second input tensor must be 3D" +
s"second input dim ${mb.dim()}")
(2, 3, t => m1 => m2 => t.bmm(m1, m2))
}
if (transA == transB) {
ma = ma.transpose(hDim, wDim)
mb = mb.transpose(hDim, wDim)
}
if (transA) {
f (gradInput[Tensor[T]](1)) (mb) (gradOutput.clone().transpose(hDim, wDim))
} else {
f (gradInput[Tensor[T]](1)) (gradOutput) (mb)
}
if (transB) {
f (gradInput[Tensor[T]](2)) (gradOutput.clone().transpose(hDim, wDim)) (ma)
} else {
f (gradInput[Tensor[T]](2)) (ma) (gradOutput)
}
gradInput
}
override def toString: String = s"MM()"
override def canEqual(other: Any): Boolean = other.isInstanceOf[MM[T]]
override def equals(other: Any): Boolean = other match {
case that: MM[T] =>
super.equals(that) &&
(that canEqual this) &&
transA == that.transA &&
transB == that.transB
case _ => false
}
override def hashCode(): Int = {
val state = Seq(super.hashCode(), transA, transB)
state.map(_.hashCode()).foldLeft(0)((a, b) => 31 * a + b)
}
override def clearState(): MM.this.type = {
super.clearState()
gradInput[Tensor[T]](1).set()
gradInput[Tensor[T]](2).set()
this
}
}
object MM {
def apply[@specialized(Float, Double) T: ClassTag](
transA: Boolean = false,
transB: Boolean = false)(implicit ev: TensorNumeric[T]) : MM[T] = {
new MM[T](transA, transB)
}
}
|
jenniew/BigDL
|
spark/dl/src/main/scala/com/intel/analytics/bigdl/nn/MM.scala
|
Scala
|
apache-2.0
| 5,469
|
import language.`3.0`
import language.`3.0` // error
class C:
import language.`3.0-migration` // error
|
som-snytt/dotty
|
tests/neg/source-import.scala
|
Scala
|
apache-2.0
| 107
|
import org.scalatest.{Matchers, FlatSpec}
import com.ntk.euler.utils._
/**
* Created by kypreos on 7/5/15.
*/
class stampySpec extends FlatSpec with Matchers {
"stampy" should "just freakin be" in {
val st = new stampy
st.gwar() should equal(s"stampy")
}
}
|
doomsuckle/ntk
|
src/test/scala/com/ntk/euler/stampySpec.scala
|
Scala
|
mit
| 273
|
package com.example
import org.apache.spark._
import org.apache.spark.SparkContext._
object Hello {
def main(args: Array[String]): Unit = {
val conf = new SparkConf().setMaster("local").setAppName("cc-nsm")
val sc = new SparkContext(conf)
val file = sc.textFile("/usr/local/bro/logs/current/conn.log")
println(file.count())
sc.stop()
println("Hello, world!")
}
}
|
classcat/cc-nsm2-core
|
src/main/scala/com/example/Hello.scala
|
Scala
|
gpl-3.0
| 397
|
package org.jetbrains.plugins.scala
package lang
package structureView
package itemsPresentations
package impl
import psi.api.ScalaFile
/**
* @author Alexander Podkhalyuzin
* Date: 04.05.2008
*/
class ScalaFileItemPresentation(private val element: ScalaFile) extends ScalaItemPresentation(element) {
def getPresentableText: String = {
ScalaElementPresentation.getFilePresentableText(myElement.asInstanceOf[ScalaFile])
}
}
|
consulo/consulo-scala
|
src/org/jetbrains/plugins/scala/lang/structureView/itemsPresentations/impl/ScalaFileItemPresentation.scala
|
Scala
|
apache-2.0
| 432
|
import org.sbuild._
import org.sbuild.ant._
import org.sbuild.ant.tasks._
@version("0.7.9010")
@include("FeatureBuilder.scala")
@classpath("mvn:org.apache.ant:ant:1.8.4",
"http://sbuild.org/uploads/sbuild/0.7.9010.0-8-0-M1/org.sbuild.ant-0.7.9010.0-8-0-M1.jar")
class SBuild(implicit _project: Project) {
val namespace = "de.tototec.sbuild.eclipse.plugin"
val version = "0.4.3.9000-" + java.text.MessageFormat.format("{0,date,yyyy-MM-dd-HH-mm-ss}", new java.util.Date())
// val version = "0.4.3"
val featureXml = "target/feature/feature.xml"
val featureProperties = "target/feature/feature.properties"
val featureJar = s"target/${namespace}.feature_${version}.jar"
val pluginModules = Modules("../org.sbuild.eclipse.resolver",
"../org.sbuild.eclipse.resolver.sbuild07",
"../org.sbuild.eclipse.resolver.sbuild08",
"../de.tototec.sbuild.eclipse.plugin"
)
val siteJars = pluginModules.map(_.targetRef("jar-main"))
def extractNameVersion(jar: java.io.File): (String, String) = {
val parts = jar.getName.split("[-_]", 2)
val name = parts(0)
// version part minus the .jar suffix
val version = parts(1).substring(0, parts(1).length - 4)
(name, version)
}
val scalaLibBundleId = "org.scala-ide.scala.library"
val scalaLibBundleVersion = "2.10.1.v20130302-092018-VFINAL-33e32179fd"
val scalaLibBundleName = s"${scalaLibBundleId}_${scalaLibBundleVersion}.jar"
val scalaLibBundle = s"http://download.scala-ide.org/sdk/e37/scala210/stable/site/plugins/${scalaLibBundleName}"
val scalaLibFeatureXml = "target/scala-feature/feature.xml"
val scalaLibFeatureJar = s"target/${namespace}.scala-library.feature_${scalaLibBundleVersion}.jar"
val updateSiteZip = s"target/${namespace}-update-site-${version}.zip"
val scalaVersion = "2.10.1"
Target("phony:all") dependsOn "update-site" ~ updateSiteZip
Target("phony:clean").evictCache exec {
AntDelete(dir = Path("target"))
}
Target("phony:deep-clean") dependsOn "clean" ~ pluginModules.map(_.targetRef("clean"))
Target(featureProperties) exec { ctx: TargetContext =>
// Eclipse Update assume the first line as title, so remove trailing empty lines
val license = io.Source.fromFile(Path("LICENSE.txt")).getLines.dropWhile(l => l.trim.isEmpty)
val props = new java.util.Properties()
props.put("description", "Eclipse Integration for SBuild Buildsystem.")
props.put("license", license.mkString("\\n"))
ctx.targetFile.get.getParentFile.mkdirs
props.store(new java.io.FileWriter(ctx.targetFile.get), null)
}
Target(featureXml) dependsOn siteJars exec { ctx: TargetContext =>
val updateSiteUrl = "http://sbuild.tototec.de/svn/eclipse-update-site/stable"
val featureXml = FeatureBuilder.createFeatureXml(
id = s"${namespace}.feature",
version = version,
label = "SBuild Eclipse Plugin Feature",
providerName = "ToToTec GbR",
brandingPlugin = namespace,
license = "%license",
licenseUrl = "http://www.apache.org/licenses/LICENSE-2.0",
copyright = "Copyright © 2012, 2013, ToToTec GbR, Tobias Roeser",
description = "%description",
descriptionUrl = "http://sbuild.tototec.de/sbuild/projects/sbuild/wiki/SBuildEclipsePlugin",
featureUrls = Seq(
FeatureUrl(kind = "update", label = "SBuild Eclipse Update Site", url = updateSiteUrl),
FeatureUrl(kind = "discovery", label = "SBuild Eclipse Update Site", url = updateSiteUrl)),
requirements = Seq(
Requirement(plugin = "org.eclipse.core.runtime", version = "3.4.0", versionMatch = "compatible"),
Requirement(plugin = "org.eclipse.jdt.core", version = "3.4.0", versionMatch = "compatible"),
Requirement(plugin = "org.eclipse.jdt.ui", version = "3.4.0", versionMatch = "compatible"),
Requirement(plugin = "org.eclipse.equinox.preferences", version = "3.2.100", versionMatch = "compatible"),
Requirement(plugin = "org.eclipse.core.resources", version = "3.3.0", versionMatch = "compatible"),
Requirement(plugin = "org.eclipse.jface", version = "3.4.0", versionMatch = "compatible"),
Requirement(plugin = "org.scala-ide.scala.library", version = "2.10.0", versionMatch = "compatible")),
plugins = siteJars.files.map { jar =>
val (name, version) = extractNameVersion(jar)
Plugin(id = name, version = version, file = jar)
},
featureFileHeader = """ Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
""")
AntEcho(message = featureXml, file = ctx.targetFile.get)
}
Target(scalaLibFeatureXml) dependsOn scalaLibBundle exec { ctx: TargetContext =>
val scalaLibBundle = this.scalaLibBundle.files.head
val updateSiteUrl = "http://sbuild.tototec.de/svn/eclipse-update-site/stable"
val featureXml = FeatureBuilder.createFeatureXml(
id = s"${namespace}.scala-library.feature",
version = scalaLibBundleVersion,
label = "Scala Library for SBuild Eclipse Plugin Feature",
providerName = "ToToTec GbR",
brandingPlugin = scalaLibBundleId,
license = """ SCALA LICENSE
Copyright (c) 2002-2010 EPFL, Lausanne, unless otherwise specified.
All rights reserved.
This software was developed by the Programming Methods Laboratory of the
Swiss Federal Institute of Technology (EPFL), Lausanne, Switzerland.
Permission to use, copy, modify, and distribute this software in source
or binary form for any purpose with or without fee is hereby granted,
provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the EPFL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
SUCH DAMAGE.
""",
licenseUrl = "http://scala-lang.org/downloads/license.html",
copyright = "Copyright © 2012, 2013, ToToTec GbR, Tobias Roeser",
description = "Scala Library",
descriptionUrl = "http://sbuild.tototec.de/sbuild/projects/sbuild/wiki/SBuildEclipsePlugin",
featureUrls = Seq(
FeatureUrl(kind = "update", label = "SBuild Eclipse Update Site", url = updateSiteUrl),
FeatureUrl(kind = "discovery", label = "SBuild Eclipse Update Site", url = updateSiteUrl)),
plugins = Seq(
Plugin(id = scalaLibBundleId, version = scalaLibBundleVersion, file = scalaLibBundle)))
AntEcho(message = featureXml, file = ctx.targetFile.get)
}
Target(featureJar) dependsOn featureXml ~ featureProperties exec { ctx: TargetContext =>
AntJar(destFile = ctx.targetFile.get, baseDir = Path("target/feature"))
}
Target(scalaLibFeatureJar) dependsOn scalaLibFeatureXml exec { ctx: TargetContext =>
AntJar(destFile = ctx.targetFile.get, baseDir = Path("target/scala-feature"))
}
Target("phony:update-site") dependsOn featureJar ~ scalaLibFeatureJar ~ siteJars ~ scalaLibBundle exec { ctx: TargetContext =>
val scalaLibBundle = this.scalaLibBundle.files.head
AntDelete(dir = Path("target/update-site"))
AntMkdir(dir = Path("target/update-site/features"))
AntMkdir(dir = Path("target/update-site/plugins"))
AntCopy(file = Path(featureJar), toDir = Path("target/update-site/features"))
AntCopy(file = Path(scalaLibFeatureJar), toDir = Path("target/update-site/features"))
siteJars.files.map { jar =>
val (name, version) = extractNameVersion(jar)
AntCopy(file = jar, toFile = Path(s"target/update-site/plugins/${name}_${version}.jar"))
}
AntCopy(file = scalaLibBundle, toDir = Path("target/update-site/plugins"))
val siteXml = s"""<?xml version="1.0" encoding="UTF-8"?>
<site>
<description>Update-Site for SBuild Eclipse Plugin.</description>
<feature
url="features/${namespace}.feature_${version}.jar"
id="${namespace}.feature"
version="${version}">
<category name="SBuild"/>
</feature>
<feature
url="features/${namespace}.scala-library.feature_${scalaLibBundleVersion}.jar"
id="${namespace}.scala-library.feature"
version="${scalaLibBundleVersion}">
<category name="Scala"/>
</feature>
<category-def name="SBuild" label="SBuild Eclipse Plugin" />
<category-def name="Scala" label="Scala Runtime" />
</site>"""
AntEcho(message = siteXml, file = Path("target/update-site/site.xml"))
}
Target(updateSiteZip) dependsOn "update-site" exec { ctx: TargetContext =>
AntZip(destFile = ctx.targetFile.get, baseDir = Path("target"), includes = "update-site/**")
}
}
|
SBuild-org/sbuild-eclipse-plugin
|
update-site/SBuild.scala
|
Scala
|
apache-2.0
| 10,310
|
package org.atnos.site
package lib
import java.util.concurrent.Executors
import org.atnos.eff.syntax.all._
import scala.concurrent.ExecutionContext
import scala.concurrent.duration._
object TaskEffectPage extends UserGuidePage { def is = "Task".title ^ s2"""
The `Task` effect is a thin shim on top of Monix's `Task`. This effect is not bundled in core Eff and requires
the `eff-monix` extension to use.
Now, let's create some `Task` effects:${snippet{
import org.atnos.eff._
import org.atnos.eff.addon.monix.task._
import org.atnos.eff.syntax.addon.monix.task._
import monix.eval.Task
type R = Fx.fx2[Task, Option]
val action: Eff[R, Int] =
for {
// create a value now
a <- Eff.pure[R, Int](1)
// evaluate a value later, on the thread pool specified by a Monix `Scheduler`, and continue when it's finished
b <- taskDelay[R, Int](1)
} yield b
/*p
Then we need to pass a Monix `Scheduler` in to begin the computation.
*/
implicit val scheduler =
monix.execution.Scheduler(ExecutionContext.fromExecutorService(Executors.newScheduledThreadPool(10)): ExecutionContext)
/*p
Monix doesn't natively offer an Await API to block on a Task result.
Instead it advises converting to a Scala `Future` and using `Await.result`.
See https://monix.io/docs/2x/eval/task.html#blocking-for-a-result
*/
import scala.concurrent.Await
Await.result(action.runOption.runAsync.runToFuture, 1 second)
}.eval}
"""
}
|
atnos-org/eff-cats
|
src/test/scala/org/atnos/site/lib/TaskEffectPage.scala
|
Scala
|
mit
| 1,426
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.parser
import java.util.Locale
import javax.xml.bind.DatatypeConverter
import scala.collection.JavaConverters._
import scala.collection.mutable.ArrayBuffer
import org.antlr.v4.runtime.{ParserRuleContext, Token}
import org.antlr.v4.runtime.tree.{ParseTree, RuleNode, TerminalNode}
import org.apache.spark.internal.Logging
import org.apache.spark.sql.AnalysisException
import org.apache.spark.sql.catalyst.{FunctionIdentifier, SQLConfHelper, TableIdentifier}
import org.apache.spark.sql.catalyst.analysis._
import org.apache.spark.sql.catalyst.catalog.{BucketSpec, CatalogStorageFormat, FunctionResource, FunctionResourceType}
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.expressions.aggregate.{First, Last}
import org.apache.spark.sql.catalyst.parser.SqlBaseParser._
import org.apache.spark.sql.catalyst.plans._
import org.apache.spark.sql.catalyst.plans.logical._
import org.apache.spark.sql.catalyst.trees.CurrentOrigin
import org.apache.spark.sql.catalyst.util.{CharVarcharUtils, IntervalUtils}
import org.apache.spark.sql.catalyst.util.DateTimeUtils.{getZoneId, stringToDate, stringToTimestamp}
import org.apache.spark.sql.catalyst.util.IntervalUtils.IntervalUnit
import org.apache.spark.sql.connector.catalog.{SupportsNamespaces, TableCatalog}
import org.apache.spark.sql.connector.catalog.TableChange.ColumnPosition
import org.apache.spark.sql.connector.expressions.{ApplyTransform, BucketTransform, DaysTransform, Expression => V2Expression, FieldReference, HoursTransform, IdentityTransform, LiteralValue, MonthsTransform, Transform, YearsTransform}
import org.apache.spark.sql.errors.QueryParsingErrors
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types._
import org.apache.spark.unsafe.types.{CalendarInterval, UTF8String}
import org.apache.spark.util.random.RandomSampler
/**
* The AstBuilder converts an ANTLR4 ParseTree into a catalyst Expression, LogicalPlan or
* TableIdentifier.
*/
class AstBuilder extends SqlBaseBaseVisitor[AnyRef] with SQLConfHelper with Logging {
import ParserUtils._
protected def typedVisit[T](ctx: ParseTree): T = {
ctx.accept(this).asInstanceOf[T]
}
/**
* Override the default behavior for all visit methods. This will only return a non-null result
* when the context has only one child. This is done because there is no generic method to
* combine the results of the context children. In all other cases null is returned.
*/
override def visitChildren(node: RuleNode): AnyRef = {
if (node.getChildCount == 1) {
node.getChild(0).accept(this)
} else {
null
}
}
override def visitSingleStatement(ctx: SingleStatementContext): LogicalPlan = withOrigin(ctx) {
visit(ctx.statement).asInstanceOf[LogicalPlan]
}
override def visitSingleExpression(ctx: SingleExpressionContext): Expression = withOrigin(ctx) {
visitNamedExpression(ctx.namedExpression)
}
override def visitSingleTableIdentifier(
ctx: SingleTableIdentifierContext): TableIdentifier = withOrigin(ctx) {
visitTableIdentifier(ctx.tableIdentifier)
}
override def visitSingleFunctionIdentifier(
ctx: SingleFunctionIdentifierContext): FunctionIdentifier = withOrigin(ctx) {
visitFunctionIdentifier(ctx.functionIdentifier)
}
override def visitSingleMultipartIdentifier(
ctx: SingleMultipartIdentifierContext): Seq[String] = withOrigin(ctx) {
visitMultipartIdentifier(ctx.multipartIdentifier)
}
override def visitSingleDataType(ctx: SingleDataTypeContext): DataType = withOrigin(ctx) {
typedVisit[DataType](ctx.dataType)
}
override def visitSingleTableSchema(ctx: SingleTableSchemaContext): StructType = {
val schema = StructType(visitColTypeList(ctx.colTypeList))
withOrigin(ctx)(schema)
}
/* ********************************************************************************************
* Plan parsing
* ******************************************************************************************** */
protected def plan(tree: ParserRuleContext): LogicalPlan = typedVisit(tree)
/**
* Create a top-level plan with Common Table Expressions.
*/
override def visitQuery(ctx: QueryContext): LogicalPlan = withOrigin(ctx) {
val query = plan(ctx.queryTerm).optionalMap(ctx.queryOrganization)(withQueryResultClauses)
// Apply CTEs
query.optionalMap(ctx.ctes)(withCTE)
}
override def visitDmlStatement(ctx: DmlStatementContext): AnyRef = withOrigin(ctx) {
val dmlStmt = plan(ctx.dmlStatementNoWith)
// Apply CTEs
dmlStmt.optionalMap(ctx.ctes)(withCTE)
}
private def withCTE(ctx: CtesContext, plan: LogicalPlan): LogicalPlan = {
val ctes = ctx.namedQuery.asScala.map { nCtx =>
val namedQuery = visitNamedQuery(nCtx)
(namedQuery.alias, namedQuery)
}
// Check for duplicate names.
val duplicates = ctes.groupBy(_._1).filter(_._2.size > 1).keys
if (duplicates.nonEmpty) {
throw QueryParsingErrors.duplicateCteDefinitionNamesError(
duplicates.mkString("'", "', '", "'"), ctx)
}
With(plan, ctes.toSeq)
}
/**
* Create a logical query plan for a hive-style FROM statement body.
*/
private def withFromStatementBody(
ctx: FromStatementBodyContext, plan: LogicalPlan): LogicalPlan = withOrigin(ctx) {
// two cases for transforms and selects
if (ctx.transformClause != null) {
withTransformQuerySpecification(
ctx,
ctx.transformClause,
ctx.whereClause,
plan
)
} else {
withSelectQuerySpecification(
ctx,
ctx.selectClause,
ctx.lateralView,
ctx.whereClause,
ctx.aggregationClause,
ctx.havingClause,
ctx.windowClause,
plan
)
}
}
override def visitFromStatement(ctx: FromStatementContext): LogicalPlan = withOrigin(ctx) {
val from = visitFromClause(ctx.fromClause)
val selects = ctx.fromStatementBody.asScala.map { body =>
withFromStatementBody(body, from).
// Add organization statements.
optionalMap(body.queryOrganization)(withQueryResultClauses)
}
// If there are multiple SELECT just UNION them together into one query.
if (selects.length == 1) {
selects.head
} else {
Union(selects.toSeq)
}
}
/**
* Create a named logical plan.
*
* This is only used for Common Table Expressions.
*/
override def visitNamedQuery(ctx: NamedQueryContext): SubqueryAlias = withOrigin(ctx) {
val subQuery: LogicalPlan = plan(ctx.query).optionalMap(ctx.columnAliases)(
(columnAliases, plan) =>
UnresolvedSubqueryColumnAliases(visitIdentifierList(columnAliases), plan)
)
SubqueryAlias(ctx.name.getText, subQuery)
}
/**
* Create a logical plan which allows for multiple inserts using one 'from' statement. These
* queries have the following SQL form:
* {{{
* [WITH cte...]?
* FROM src
* [INSERT INTO tbl1 SELECT *]+
* }}}
* For example:
* {{{
* FROM db.tbl1 A
* INSERT INTO dbo.tbl1 SELECT * WHERE A.value = 10 LIMIT 5
* INSERT INTO dbo.tbl2 SELECT * WHERE A.value = 12
* }}}
* This (Hive) feature cannot be combined with set-operators.
*/
override def visitMultiInsertQuery(ctx: MultiInsertQueryContext): LogicalPlan = withOrigin(ctx) {
val from = visitFromClause(ctx.fromClause)
// Build the insert clauses.
val inserts = ctx.multiInsertQueryBody.asScala.map { body =>
withInsertInto(body.insertInto,
withFromStatementBody(body.fromStatementBody, from).
optionalMap(body.fromStatementBody.queryOrganization)(withQueryResultClauses))
}
// If there are multiple INSERTS just UNION them together into one query.
if (inserts.length == 1) {
inserts.head
} else {
Union(inserts.toSeq)
}
}
/**
* Create a logical plan for a regular (single-insert) query.
*/
override def visitSingleInsertQuery(
ctx: SingleInsertQueryContext): LogicalPlan = withOrigin(ctx) {
withInsertInto(
ctx.insertInto(),
plan(ctx.queryTerm).optionalMap(ctx.queryOrganization)(withQueryResultClauses))
}
/**
* Parameters used for writing query to a table:
* (UnresolvedRelation, tableColumnList, partitionKeys, ifPartitionNotExists).
*/
type InsertTableParams = (UnresolvedRelation, Seq[String], Map[String, Option[String]], Boolean)
/**
* Parameters used for writing query to a directory: (isLocal, CatalogStorageFormat, provider).
*/
type InsertDirParams = (Boolean, CatalogStorageFormat, Option[String])
/**
* Add an
* {{{
* INSERT OVERWRITE TABLE tableIdentifier [partitionSpec [IF NOT EXISTS]]? [identifierList]
* INSERT INTO [TABLE] tableIdentifier [partitionSpec] [identifierList]
* INSERT OVERWRITE [LOCAL] DIRECTORY STRING [rowFormat] [createFileFormat]
* INSERT OVERWRITE [LOCAL] DIRECTORY [STRING] tableProvider [OPTIONS tablePropertyList]
* }}}
* operation to logical plan
*/
private def withInsertInto(
ctx: InsertIntoContext,
query: LogicalPlan): LogicalPlan = withOrigin(ctx) {
ctx match {
case table: InsertIntoTableContext =>
val (relation, cols, partition, ifPartitionNotExists) = visitInsertIntoTable(table)
InsertIntoStatement(
relation,
partition,
cols,
query,
overwrite = false,
ifPartitionNotExists)
case table: InsertOverwriteTableContext =>
val (relation, cols, partition, ifPartitionNotExists) = visitInsertOverwriteTable(table)
InsertIntoStatement(
relation,
partition,
cols,
query,
overwrite = true,
ifPartitionNotExists)
case dir: InsertOverwriteDirContext =>
val (isLocal, storage, provider) = visitInsertOverwriteDir(dir)
InsertIntoDir(isLocal, storage, provider, query, overwrite = true)
case hiveDir: InsertOverwriteHiveDirContext =>
val (isLocal, storage, provider) = visitInsertOverwriteHiveDir(hiveDir)
InsertIntoDir(isLocal, storage, provider, query, overwrite = true)
case _ =>
throw QueryParsingErrors.invalidInsertIntoError(ctx)
}
}
/**
* Add an INSERT INTO TABLE operation to the logical plan.
*/
override def visitInsertIntoTable(
ctx: InsertIntoTableContext): InsertTableParams = withOrigin(ctx) {
val cols = Option(ctx.identifierList()).map(visitIdentifierList).getOrElse(Nil)
val partitionKeys = Option(ctx.partitionSpec).map(visitPartitionSpec).getOrElse(Map.empty)
if (ctx.EXISTS != null) {
operationNotAllowed("INSERT INTO ... IF NOT EXISTS", ctx)
}
(createUnresolvedRelation(ctx.multipartIdentifier), cols, partitionKeys, false)
}
/**
* Add an INSERT OVERWRITE TABLE operation to the logical plan.
*/
override def visitInsertOverwriteTable(
ctx: InsertOverwriteTableContext): InsertTableParams = withOrigin(ctx) {
assert(ctx.OVERWRITE() != null)
val cols = Option(ctx.identifierList()).map(visitIdentifierList).getOrElse(Nil)
val partitionKeys = Option(ctx.partitionSpec).map(visitPartitionSpec).getOrElse(Map.empty)
val dynamicPartitionKeys: Map[String, Option[String]] = partitionKeys.filter(_._2.isEmpty)
if (ctx.EXISTS != null && dynamicPartitionKeys.nonEmpty) {
operationNotAllowed("IF NOT EXISTS with dynamic partitions: " +
dynamicPartitionKeys.keys.mkString(", "), ctx)
}
(createUnresolvedRelation(ctx.multipartIdentifier), cols, partitionKeys, ctx.EXISTS() != null)
}
/**
* Write to a directory, returning a [[InsertIntoDir]] logical plan.
*/
override def visitInsertOverwriteDir(
ctx: InsertOverwriteDirContext): InsertDirParams = withOrigin(ctx) {
throw QueryParsingErrors.insertOverwriteDirectoryUnsupportedError(ctx)
}
/**
* Write to a directory, returning a [[InsertIntoDir]] logical plan.
*/
override def visitInsertOverwriteHiveDir(
ctx: InsertOverwriteHiveDirContext): InsertDirParams = withOrigin(ctx) {
throw QueryParsingErrors.insertOverwriteDirectoryUnsupportedError(ctx)
}
private def getTableAliasWithoutColumnAlias(
ctx: TableAliasContext, op: String): Option[String] = {
if (ctx == null) {
None
} else {
val ident = ctx.strictIdentifier()
if (ctx.identifierList() != null) {
throw QueryParsingErrors.columnAliasInOperationNotAllowedError(op, ctx)
}
if (ident != null) Some(ident.getText) else None
}
}
override def visitDeleteFromTable(
ctx: DeleteFromTableContext): LogicalPlan = withOrigin(ctx) {
val table = createUnresolvedRelation(ctx.multipartIdentifier())
val tableAlias = getTableAliasWithoutColumnAlias(ctx.tableAlias(), "DELETE")
val aliasedTable = tableAlias.map(SubqueryAlias(_, table)).getOrElse(table)
val predicate = if (ctx.whereClause() != null) {
Some(expression(ctx.whereClause().booleanExpression()))
} else {
None
}
DeleteFromTable(aliasedTable, predicate)
}
override def visitUpdateTable(ctx: UpdateTableContext): LogicalPlan = withOrigin(ctx) {
val table = createUnresolvedRelation(ctx.multipartIdentifier())
val tableAlias = getTableAliasWithoutColumnAlias(ctx.tableAlias(), "UPDATE")
val aliasedTable = tableAlias.map(SubqueryAlias(_, table)).getOrElse(table)
val assignments = withAssignments(ctx.setClause().assignmentList())
val predicate = if (ctx.whereClause() != null) {
Some(expression(ctx.whereClause().booleanExpression()))
} else {
None
}
UpdateTable(aliasedTable, assignments, predicate)
}
private def withAssignments(assignCtx: SqlBaseParser.AssignmentListContext): Seq[Assignment] =
withOrigin(assignCtx) {
assignCtx.assignment().asScala.map { assign =>
Assignment(UnresolvedAttribute(visitMultipartIdentifier(assign.key)),
expression(assign.value))
}.toSeq
}
override def visitMergeIntoTable(ctx: MergeIntoTableContext): LogicalPlan = withOrigin(ctx) {
val targetTable = createUnresolvedRelation(ctx.target)
val targetTableAlias = getTableAliasWithoutColumnAlias(ctx.targetAlias, "MERGE")
val aliasedTarget = targetTableAlias.map(SubqueryAlias(_, targetTable)).getOrElse(targetTable)
val sourceTableOrQuery = if (ctx.source != null) {
createUnresolvedRelation(ctx.source)
} else if (ctx.sourceQuery != null) {
visitQuery(ctx.sourceQuery)
} else {
throw QueryParsingErrors.emptySourceForMergeError(ctx)
}
val sourceTableAlias = getTableAliasWithoutColumnAlias(ctx.sourceAlias, "MERGE")
val aliasedSource =
sourceTableAlias.map(SubqueryAlias(_, sourceTableOrQuery)).getOrElse(sourceTableOrQuery)
val mergeCondition = expression(ctx.mergeCondition)
val matchedActions = ctx.matchedClause().asScala.map {
clause => {
if (clause.matchedAction().DELETE() != null) {
DeleteAction(Option(clause.matchedCond).map(expression))
} else if (clause.matchedAction().UPDATE() != null) {
val condition = Option(clause.matchedCond).map(expression)
if (clause.matchedAction().ASTERISK() != null) {
UpdateAction(condition, Seq())
} else {
UpdateAction(condition, withAssignments(clause.matchedAction().assignmentList()))
}
} else {
// It should not be here.
throw QueryParsingErrors.unrecognizedMatchedActionError(clause)
}
}
}
val notMatchedActions = ctx.notMatchedClause().asScala.map {
clause => {
if (clause.notMatchedAction().INSERT() != null) {
val condition = Option(clause.notMatchedCond).map(expression)
if (clause.notMatchedAction().ASTERISK() != null) {
InsertAction(condition, Seq())
} else {
val columns = clause.notMatchedAction().columns.multipartIdentifier()
.asScala.map(attr => UnresolvedAttribute(visitMultipartIdentifier(attr)))
val values = clause.notMatchedAction().expression().asScala.map(expression)
if (columns.size != values.size) {
throw QueryParsingErrors.insertedValueNumberNotMatchFieldNumberError(clause)
}
InsertAction(condition, columns.zip(values).map(kv => Assignment(kv._1, kv._2)).toSeq)
}
} else {
// It should not be here.
throw QueryParsingErrors.unrecognizedNotMatchedActionError(clause)
}
}
}
if (matchedActions.isEmpty && notMatchedActions.isEmpty) {
throw QueryParsingErrors.mergeStatementWithoutWhenClauseError(ctx)
}
// children being empty means that the condition is not set
val matchedActionSize = matchedActions.length
if (matchedActionSize >= 2 && !matchedActions.init.forall(_.condition.nonEmpty)) {
throw QueryParsingErrors.nonLastMatchedClauseOmitConditionError(ctx)
}
val notMatchedActionSize = notMatchedActions.length
if (notMatchedActionSize >= 2 && !notMatchedActions.init.forall(_.condition.nonEmpty)) {
throw QueryParsingErrors.nonLastNotMatchedClauseOmitConditionError(ctx)
}
MergeIntoTable(
aliasedTarget,
aliasedSource,
mergeCondition,
matchedActions.toSeq,
notMatchedActions.toSeq)
}
/**
* Create a partition specification map.
*/
override def visitPartitionSpec(
ctx: PartitionSpecContext): Map[String, Option[String]] = withOrigin(ctx) {
val legacyNullAsString =
conf.getConf(SQLConf.LEGACY_PARSE_NULL_PARTITION_SPEC_AS_STRING_LITERAL)
val parts = ctx.partitionVal.asScala.map { pVal =>
val name = pVal.identifier.getText
val value = Option(pVal.constant).map(v => visitStringConstant(v, legacyNullAsString))
name -> value
}
// Before calling `toMap`, we check duplicated keys to avoid silently ignore partition values
// in partition spec like PARTITION(a='1', b='2', a='3'). The real semantical check for
// partition columns will be done in analyzer.
checkDuplicateKeys(parts.toSeq, ctx)
parts.toMap
}
/**
* Create a partition specification map without optional values.
*/
protected def visitNonOptionalPartitionSpec(
ctx: PartitionSpecContext): Map[String, String] = withOrigin(ctx) {
visitPartitionSpec(ctx).map {
case (key, None) => throw QueryParsingErrors.emptyPartitionKeyError(key, ctx)
case (key, Some(value)) => key -> value
}
}
/**
* Convert a constant of any type into a string. This is typically used in DDL commands, and its
* main purpose is to prevent slight differences due to back to back conversions i.e.:
* String -> Literal -> String.
*/
protected def visitStringConstant(
ctx: ConstantContext,
legacyNullAsString: Boolean): String = withOrigin(ctx) {
ctx match {
case _: NullLiteralContext if !legacyNullAsString => null
case s: StringLiteralContext => createString(s)
case o => o.getText
}
}
/**
* Add ORDER BY/SORT BY/CLUSTER BY/DISTRIBUTE BY/LIMIT/WINDOWS clauses to the logical plan. These
* clauses determine the shape (ordering/partitioning/rows) of the query result.
*/
private def withQueryResultClauses(
ctx: QueryOrganizationContext,
query: LogicalPlan): LogicalPlan = withOrigin(ctx) {
import ctx._
// Handle ORDER BY, SORT BY, DISTRIBUTE BY, and CLUSTER BY clause.
val withOrder = if (
!order.isEmpty && sort.isEmpty && distributeBy.isEmpty && clusterBy.isEmpty) {
// ORDER BY ...
Sort(order.asScala.map(visitSortItem).toSeq, global = true, query)
} else if (order.isEmpty && !sort.isEmpty && distributeBy.isEmpty && clusterBy.isEmpty) {
// SORT BY ...
Sort(sort.asScala.map(visitSortItem).toSeq, global = false, query)
} else if (order.isEmpty && sort.isEmpty && !distributeBy.isEmpty && clusterBy.isEmpty) {
// DISTRIBUTE BY ...
withRepartitionByExpression(ctx, expressionList(distributeBy), query)
} else if (order.isEmpty && !sort.isEmpty && !distributeBy.isEmpty && clusterBy.isEmpty) {
// SORT BY ... DISTRIBUTE BY ...
Sort(
sort.asScala.map(visitSortItem).toSeq,
global = false,
withRepartitionByExpression(ctx, expressionList(distributeBy), query))
} else if (order.isEmpty && sort.isEmpty && distributeBy.isEmpty && !clusterBy.isEmpty) {
// CLUSTER BY ...
val expressions = expressionList(clusterBy)
Sort(
expressions.map(SortOrder(_, Ascending)),
global = false,
withRepartitionByExpression(ctx, expressions, query))
} else if (order.isEmpty && sort.isEmpty && distributeBy.isEmpty && clusterBy.isEmpty) {
// [EMPTY]
query
} else {
throw QueryParsingErrors.combinationQueryResultClausesUnsupportedError(ctx)
}
// WINDOWS
val withWindow = withOrder.optionalMap(windowClause)(withWindowClause)
// LIMIT
// - LIMIT ALL is the same as omitting the LIMIT clause
withWindow.optional(limit) {
Limit(typedVisit(limit), withWindow)
}
}
/**
* Create a clause for DISTRIBUTE BY.
*/
protected def withRepartitionByExpression(
ctx: QueryOrganizationContext,
expressions: Seq[Expression],
query: LogicalPlan): LogicalPlan = {
throw QueryParsingErrors.distributeByUnsupportedError(ctx)
}
override def visitTransformQuerySpecification(
ctx: TransformQuerySpecificationContext): LogicalPlan = withOrigin(ctx) {
val from = OneRowRelation().optional(ctx.fromClause) {
visitFromClause(ctx.fromClause)
}
withTransformQuerySpecification(ctx, ctx.transformClause, ctx.whereClause, from)
}
override def visitRegularQuerySpecification(
ctx: RegularQuerySpecificationContext): LogicalPlan = withOrigin(ctx) {
val from = OneRowRelation().optional(ctx.fromClause) {
visitFromClause(ctx.fromClause)
}
withSelectQuerySpecification(
ctx,
ctx.selectClause,
ctx.lateralView,
ctx.whereClause,
ctx.aggregationClause,
ctx.havingClause,
ctx.windowClause,
from
)
}
override def visitNamedExpressionSeq(
ctx: NamedExpressionSeqContext): Seq[Expression] = {
Option(ctx).toSeq
.flatMap(_.namedExpression.asScala)
.map(typedVisit[Expression])
}
/**
* Create a logical plan using a having clause.
*/
private def withHavingClause(
ctx: HavingClauseContext, plan: LogicalPlan): LogicalPlan = {
// Note that we add a cast to non-predicate expressions. If the expression itself is
// already boolean, the optimizer will get rid of the unnecessary cast.
val predicate = expression(ctx.booleanExpression) match {
case p: Predicate => p
case e => Cast(e, BooleanType)
}
UnresolvedHaving(predicate, plan)
}
/**
* Create a logical plan using a where clause.
*/
private def withWhereClause(ctx: WhereClauseContext, plan: LogicalPlan): LogicalPlan = {
Filter(expression(ctx.booleanExpression), plan)
}
/**
* Add a hive-style transform (SELECT TRANSFORM/MAP/REDUCE) query specification to a logical plan.
*/
private def withTransformQuerySpecification(
ctx: ParserRuleContext,
transformClause: TransformClauseContext,
whereClause: WhereClauseContext,
relation: LogicalPlan): LogicalPlan = withOrigin(ctx) {
// Add where.
val withFilter = relation.optionalMap(whereClause)(withWhereClause)
// Create the transform.
val expressions = visitNamedExpressionSeq(transformClause.namedExpressionSeq)
// Create the attributes.
val (attributes, schemaLess) = if (transformClause.colTypeList != null) {
// Typed return columns.
(createSchema(transformClause.colTypeList).toAttributes, false)
} else if (transformClause.identifierSeq != null) {
// Untyped return columns.
val attrs = visitIdentifierSeq(transformClause.identifierSeq).map { name =>
AttributeReference(name, StringType, nullable = true)()
}
(attrs, false)
} else {
(Seq(AttributeReference("key", StringType)(),
AttributeReference("value", StringType)()), true)
}
// Create the transform.
ScriptTransformation(
expressions,
string(transformClause.script),
attributes,
withFilter,
withScriptIOSchema(
ctx,
transformClause.inRowFormat,
transformClause.recordWriter,
transformClause.outRowFormat,
transformClause.recordReader,
schemaLess
)
)
}
/**
* Add a regular (SELECT) query specification to a logical plan. The query specification
* is the core of the logical plan, this is where sourcing (FROM clause), projection (SELECT),
* aggregation (GROUP BY ... HAVING ...) and filtering (WHERE) takes place.
*
* Note that query hints are ignored (both by the parser and the builder).
*/
private def withSelectQuerySpecification(
ctx: ParserRuleContext,
selectClause: SelectClauseContext,
lateralView: java.util.List[LateralViewContext],
whereClause: WhereClauseContext,
aggregationClause: AggregationClauseContext,
havingClause: HavingClauseContext,
windowClause: WindowClauseContext,
relation: LogicalPlan): LogicalPlan = withOrigin(ctx) {
// Add lateral views.
val withLateralView = lateralView.asScala.foldLeft(relation)(withGenerate)
// Add where.
val withFilter = withLateralView.optionalMap(whereClause)(withWhereClause)
val expressions = visitNamedExpressionSeq(selectClause.namedExpressionSeq)
// Add aggregation or a project.
val namedExpressions = expressions.map {
case e: NamedExpression => e
case e: Expression => UnresolvedAlias(e)
}
def createProject() = if (namedExpressions.nonEmpty) {
Project(namedExpressions, withFilter)
} else {
withFilter
}
val withProject = if (aggregationClause == null && havingClause != null) {
if (conf.getConf(SQLConf.LEGACY_HAVING_WITHOUT_GROUP_BY_AS_WHERE)) {
// If the legacy conf is set, treat HAVING without GROUP BY as WHERE.
val predicate = expression(havingClause.booleanExpression) match {
case p: Predicate => p
case e => Cast(e, BooleanType)
}
Filter(predicate, createProject())
} else {
// According to SQL standard, HAVING without GROUP BY means global aggregate.
withHavingClause(havingClause, Aggregate(Nil, namedExpressions, withFilter))
}
} else if (aggregationClause != null) {
val aggregate = withAggregationClause(aggregationClause, namedExpressions, withFilter)
aggregate.optionalMap(havingClause)(withHavingClause)
} else {
// When hitting this branch, `having` must be null.
createProject()
}
// Distinct
val withDistinct = if (
selectClause.setQuantifier() != null &&
selectClause.setQuantifier().DISTINCT() != null) {
Distinct(withProject)
} else {
withProject
}
// Window
val withWindow = withDistinct.optionalMap(windowClause)(withWindowClause)
// Hint
selectClause.hints.asScala.foldRight(withWindow)(withHints)
}
// Script Transform's input/output format.
type ScriptIOFormat =
(Seq[(String, String)], Option[String], Seq[(String, String)], Option[String])
protected def getRowFormatDelimited(ctx: RowFormatDelimitedContext): ScriptIOFormat = {
// TODO we should use the visitRowFormatDelimited function here. However HiveScriptIOSchema
// expects a seq of pairs in which the old parsers' token names are used as keys.
// Transforming the result of visitRowFormatDelimited would be quite a bit messier than
// retrieving the key value pairs ourselves.
val entries = entry("TOK_TABLEROWFORMATFIELD", ctx.fieldsTerminatedBy) ++
entry("TOK_TABLEROWFORMATCOLLITEMS", ctx.collectionItemsTerminatedBy) ++
entry("TOK_TABLEROWFORMATMAPKEYS", ctx.keysTerminatedBy) ++
entry("TOK_TABLEROWFORMATNULL", ctx.nullDefinedAs) ++
Option(ctx.linesSeparatedBy).toSeq.map { token =>
val value = string(token)
validate(
value == "\\n",
s"LINES TERMINATED BY only supports newline '\\\\n' right now: $value",
ctx)
"TOK_TABLEROWFORMATLINES" -> value
}
(entries, None, Seq.empty, None)
}
/**
* Create a [[ScriptInputOutputSchema]].
*/
protected def withScriptIOSchema(
ctx: ParserRuleContext,
inRowFormat: RowFormatContext,
recordWriter: Token,
outRowFormat: RowFormatContext,
recordReader: Token,
schemaLess: Boolean): ScriptInputOutputSchema = {
def format(fmt: RowFormatContext): ScriptIOFormat = fmt match {
case c: RowFormatDelimitedContext =>
getRowFormatDelimited(c)
case c: RowFormatSerdeContext =>
throw QueryParsingErrors.transformWithSerdeUnsupportedError(ctx)
// SPARK-32106: When there is no definition about format, we return empty result
// to use a built-in default Serde in SparkScriptTransformationExec.
case null =>
(Nil, None, Seq.empty, None)
}
val (inFormat, inSerdeClass, inSerdeProps, reader) = format(inRowFormat)
val (outFormat, outSerdeClass, outSerdeProps, writer) = format(outRowFormat)
ScriptInputOutputSchema(
inFormat, outFormat,
inSerdeClass, outSerdeClass,
inSerdeProps, outSerdeProps,
reader, writer,
schemaLess)
}
/**
* Create a logical plan for a given 'FROM' clause. Note that we support multiple (comma
* separated) relations here, these get converted into a single plan by condition-less inner join.
*/
override def visitFromClause(ctx: FromClauseContext): LogicalPlan = withOrigin(ctx) {
val from = ctx.relation.asScala.foldLeft(null: LogicalPlan) { (left, relation) =>
val right = plan(relation.relationPrimary)
val join = right.optionalMap(left)(Join(_, _, Inner, None, JoinHint.NONE))
withJoinRelations(join, relation)
}
if (ctx.pivotClause() != null) {
if (!ctx.lateralView.isEmpty) {
throw QueryParsingErrors.lateralWithPivotInFromClauseNotAllowedError(ctx)
}
withPivot(ctx.pivotClause, from)
} else {
ctx.lateralView.asScala.foldLeft(from)(withGenerate)
}
}
/**
* Connect two queries by a Set operator.
*
* Supported Set operators are:
* - UNION [ DISTINCT | ALL ]
* - EXCEPT [ DISTINCT | ALL ]
* - MINUS [ DISTINCT | ALL ]
* - INTERSECT [DISTINCT | ALL]
*/
override def visitSetOperation(ctx: SetOperationContext): LogicalPlan = withOrigin(ctx) {
val left = plan(ctx.left)
val right = plan(ctx.right)
val all = Option(ctx.setQuantifier()).exists(_.ALL != null)
ctx.operator.getType match {
case SqlBaseParser.UNION if all =>
Union(left, right)
case SqlBaseParser.UNION =>
Distinct(Union(left, right))
case SqlBaseParser.INTERSECT if all =>
Intersect(left, right, isAll = true)
case SqlBaseParser.INTERSECT =>
Intersect(left, right, isAll = false)
case SqlBaseParser.EXCEPT if all =>
Except(left, right, isAll = true)
case SqlBaseParser.EXCEPT =>
Except(left, right, isAll = false)
case SqlBaseParser.SETMINUS if all =>
Except(left, right, isAll = true)
case SqlBaseParser.SETMINUS =>
Except(left, right, isAll = false)
}
}
/**
* Add a [[WithWindowDefinition]] operator to a logical plan.
*/
private def withWindowClause(
ctx: WindowClauseContext,
query: LogicalPlan): LogicalPlan = withOrigin(ctx) {
// Collect all window specifications defined in the WINDOW clause.
val baseWindowTuples = ctx.namedWindow.asScala.map {
wCtx =>
(wCtx.name.getText, typedVisit[WindowSpec](wCtx.windowSpec))
}
baseWindowTuples.groupBy(_._1).foreach { kv =>
if (kv._2.size > 1) {
throw QueryParsingErrors.repetitiveWindowDefinitionError(kv._1, ctx)
}
}
val baseWindowMap = baseWindowTuples.toMap
// Handle cases like
// window w1 as (partition by p_mfgr order by p_name
// range between 2 preceding and 2 following),
// w2 as w1
val windowMapView = baseWindowMap.mapValues {
case WindowSpecReference(name) =>
baseWindowMap.get(name) match {
case Some(spec: WindowSpecDefinition) =>
spec
case Some(ref) =>
throw QueryParsingErrors.invalidWindowReferenceError(name, ctx)
case None =>
throw QueryParsingErrors.cannotResolveWindowReferenceError(name, ctx)
}
case spec: WindowSpecDefinition => spec
}
// Note that mapValues creates a view instead of materialized map. We force materialization by
// mapping over identity.
WithWindowDefinition(windowMapView.map(identity).toMap, query)
}
/**
* Add an [[Aggregate]] or [[GroupingSets]] to a logical plan.
*/
private def withAggregationClause(
ctx: AggregationClauseContext,
selectExpressions: Seq[NamedExpression],
query: LogicalPlan): LogicalPlan = withOrigin(ctx) {
val groupByExpressions = expressionList(ctx.groupingExpressions)
if (ctx.GROUPING != null) {
// GROUP BY .... GROUPING SETS (...)
val selectedGroupByExprs =
ctx.groupingSet.asScala.map(_.expression.asScala.map(e => expression(e)).toSeq)
GroupingSets(selectedGroupByExprs.toSeq, groupByExpressions, query, selectExpressions)
} else {
// GROUP BY .... (WITH CUBE | WITH ROLLUP)?
val mappedGroupByExpressions = if (ctx.CUBE != null) {
Seq(Cube(groupByExpressions))
} else if (ctx.ROLLUP != null) {
Seq(Rollup(groupByExpressions))
} else {
groupByExpressions
}
Aggregate(mappedGroupByExpressions, selectExpressions, query)
}
}
/**
* Add [[UnresolvedHint]]s to a logical plan.
*/
private def withHints(
ctx: HintContext,
query: LogicalPlan): LogicalPlan = withOrigin(ctx) {
var plan = query
ctx.hintStatements.asScala.reverse.foreach { stmt =>
plan = UnresolvedHint(stmt.hintName.getText,
stmt.parameters.asScala.map(expression).toSeq, plan)
}
plan
}
/**
* Add a [[Pivot]] to a logical plan.
*/
private def withPivot(
ctx: PivotClauseContext,
query: LogicalPlan): LogicalPlan = withOrigin(ctx) {
val aggregates = Option(ctx.aggregates).toSeq
.flatMap(_.namedExpression.asScala)
.map(typedVisit[Expression])
val pivotColumn = if (ctx.pivotColumn.identifiers.size == 1) {
UnresolvedAttribute.quoted(ctx.pivotColumn.identifier.getText)
} else {
CreateStruct(
ctx.pivotColumn.identifiers.asScala.map(
identifier => UnresolvedAttribute.quoted(identifier.getText)).toSeq)
}
val pivotValues = ctx.pivotValues.asScala.map(visitPivotValue)
Pivot(None, pivotColumn, pivotValues.toSeq, aggregates, query)
}
/**
* Create a Pivot column value with or without an alias.
*/
override def visitPivotValue(ctx: PivotValueContext): Expression = withOrigin(ctx) {
val e = expression(ctx.expression)
if (ctx.identifier != null) {
Alias(e, ctx.identifier.getText)()
} else {
e
}
}
/**
* Add a [[Generate]] (Lateral View) to a logical plan.
*/
private def withGenerate(
query: LogicalPlan,
ctx: LateralViewContext): LogicalPlan = withOrigin(ctx) {
val expressions = expressionList(ctx.expression)
Generate(
UnresolvedGenerator(visitFunctionName(ctx.qualifiedName), expressions),
unrequiredChildIndex = Nil,
outer = ctx.OUTER != null,
// scalastyle:off caselocale
Some(ctx.tblName.getText.toLowerCase),
// scalastyle:on caselocale
ctx.colName.asScala.map(_.getText).map(UnresolvedAttribute.apply).toSeq,
query)
}
/**
* Create a single relation referenced in a FROM clause. This method is used when a part of the
* join condition is nested, for example:
* {{{
* select * from t1 join (t2 cross join t3) on col1 = col2
* }}}
*/
override def visitRelation(ctx: RelationContext): LogicalPlan = withOrigin(ctx) {
withJoinRelations(plan(ctx.relationPrimary), ctx)
}
/**
* Join one more [[LogicalPlan]]s to the current logical plan.
*/
private def withJoinRelations(base: LogicalPlan, ctx: RelationContext): LogicalPlan = {
ctx.joinRelation.asScala.foldLeft(base) { (left, join) =>
withOrigin(join) {
val baseJoinType = join.joinType match {
case null => Inner
case jt if jt.CROSS != null => Cross
case jt if jt.FULL != null => FullOuter
case jt if jt.SEMI != null => LeftSemi
case jt if jt.ANTI != null => LeftAnti
case jt if jt.LEFT != null => LeftOuter
case jt if jt.RIGHT != null => RightOuter
case _ => Inner
}
// Resolve the join type and join condition
val (joinType, condition) = Option(join.joinCriteria) match {
case Some(c) if c.USING != null =>
(UsingJoin(baseJoinType, visitIdentifierList(c.identifierList)), None)
case Some(c) if c.booleanExpression != null =>
(baseJoinType, Option(expression(c.booleanExpression)))
case Some(c) =>
throw QueryParsingErrors.joinCriteriaUnimplementedError(c, ctx)
case None if join.NATURAL != null =>
if (baseJoinType == Cross) {
throw QueryParsingErrors.naturalCrossJoinUnsupportedError(ctx)
}
(NaturalJoin(baseJoinType), None)
case None =>
(baseJoinType, None)
}
Join(left, plan(join.right), joinType, condition, JoinHint.NONE)
}
}
}
/**
* Add a [[Sample]] to a logical plan.
*
* This currently supports the following sampling methods:
* - TABLESAMPLE(x ROWS): Sample the table down to the given number of rows.
* - TABLESAMPLE(x PERCENT): Sample the table down to the given percentage. Note that percentages
* are defined as a number between 0 and 100.
* - TABLESAMPLE(BUCKET x OUT OF y): Sample the table down to a 'x' divided by 'y' fraction.
*/
private def withSample(ctx: SampleContext, query: LogicalPlan): LogicalPlan = withOrigin(ctx) {
// Create a sampled plan if we need one.
def sample(fraction: Double): Sample = {
// The range of fraction accepted by Sample is [0, 1]. Because Hive's block sampling
// function takes X PERCENT as the input and the range of X is [0, 100], we need to
// adjust the fraction.
val eps = RandomSampler.roundingEpsilon
validate(fraction >= 0.0 - eps && fraction <= 1.0 + eps,
s"Sampling fraction ($fraction) must be on interval [0, 1]",
ctx)
Sample(0.0, fraction, withReplacement = false, (math.random * 1000).toInt, query)
}
if (ctx.sampleMethod() == null) {
throw QueryParsingErrors.emptyInputForTableSampleError(ctx)
}
ctx.sampleMethod() match {
case ctx: SampleByRowsContext =>
Limit(expression(ctx.expression), query)
case ctx: SampleByPercentileContext =>
val fraction = ctx.percentage.getText.toDouble
val sign = if (ctx.negativeSign == null) 1 else -1
sample(sign * fraction / 100.0d)
case ctx: SampleByBytesContext =>
val bytesStr = ctx.bytes.getText
if (bytesStr.matches("[0-9]+[bBkKmMgG]")) {
throw QueryParsingErrors.tableSampleByBytesUnsupportedError("byteLengthLiteral", ctx)
} else {
throw QueryParsingErrors.invalidByteLengthLiteralError(bytesStr, ctx)
}
case ctx: SampleByBucketContext if ctx.ON() != null =>
if (ctx.identifier != null) {
throw QueryParsingErrors.tableSampleByBytesUnsupportedError(
"BUCKET x OUT OF y ON colname", ctx)
} else {
throw QueryParsingErrors.tableSampleByBytesUnsupportedError(
"BUCKET x OUT OF y ON function", ctx)
}
case ctx: SampleByBucketContext =>
sample(ctx.numerator.getText.toDouble / ctx.denominator.getText.toDouble)
}
}
/**
* Create a logical plan for a sub-query.
*/
override def visitSubquery(ctx: SubqueryContext): LogicalPlan = withOrigin(ctx) {
plan(ctx.query)
}
/**
* Create an un-aliased table reference. This is typically used for top-level table references,
* for example:
* {{{
* INSERT INTO db.tbl2
* TABLE db.tbl1
* }}}
*/
override def visitTable(ctx: TableContext): LogicalPlan = withOrigin(ctx) {
UnresolvedRelation(visitMultipartIdentifier(ctx.multipartIdentifier))
}
/**
* Create an aliased table reference. This is typically used in FROM clauses.
*/
override def visitTableName(ctx: TableNameContext): LogicalPlan = withOrigin(ctx) {
val tableId = visitMultipartIdentifier(ctx.multipartIdentifier)
val table = mayApplyAliasPlan(ctx.tableAlias, UnresolvedRelation(tableId))
table.optionalMap(ctx.sample)(withSample)
}
/**
* Create a table-valued function call with arguments, e.g. range(1000)
*/
override def visitTableValuedFunction(ctx: TableValuedFunctionContext)
: LogicalPlan = withOrigin(ctx) {
val func = ctx.functionTable
val aliases = if (func.tableAlias.identifierList != null) {
visitIdentifierList(func.tableAlias.identifierList)
} else {
Seq.empty
}
val tvf = UnresolvedTableValuedFunction(
func.funcName.getText, func.expression.asScala.map(expression).toSeq, aliases)
tvf.optionalMap(func.tableAlias.strictIdentifier)(aliasPlan)
}
/**
* Create an inline table (a virtual table in Hive parlance).
*/
override def visitInlineTable(ctx: InlineTableContext): LogicalPlan = withOrigin(ctx) {
// Get the backing expressions.
val rows = ctx.expression.asScala.map { e =>
expression(e) match {
// inline table comes in two styles:
// style 1: values (1), (2), (3) -- multiple columns are supported
// style 2: values 1, 2, 3 -- only a single column is supported here
case struct: CreateNamedStruct => struct.valExprs // style 1
case child => Seq(child) // style 2
}
}
val aliases = if (ctx.tableAlias.identifierList != null) {
visitIdentifierList(ctx.tableAlias.identifierList)
} else {
Seq.tabulate(rows.head.size)(i => s"col${i + 1}")
}
val table = UnresolvedInlineTable(aliases, rows.toSeq)
table.optionalMap(ctx.tableAlias.strictIdentifier)(aliasPlan)
}
/**
* Create an alias (SubqueryAlias) for a join relation. This is practically the same as
* visitAliasedQuery and visitNamedExpression, ANTLR4 however requires us to use 3 different
* hooks. We could add alias names for output columns, for example:
* {{{
* SELECT a, b, c, d FROM (src1 s1 INNER JOIN src2 s2 ON s1.id = s2.id) dst(a, b, c, d)
* }}}
*/
override def visitAliasedRelation(ctx: AliasedRelationContext): LogicalPlan = withOrigin(ctx) {
val relation = plan(ctx.relation).optionalMap(ctx.sample)(withSample)
mayApplyAliasPlan(ctx.tableAlias, relation)
}
/**
* Create an alias (SubqueryAlias) for a sub-query. This is practically the same as
* visitAliasedRelation and visitNamedExpression, ANTLR4 however requires us to use 3 different
* hooks. We could add alias names for output columns, for example:
* {{{
* SELECT col1, col2 FROM testData AS t(col1, col2)
* }}}
*/
override def visitAliasedQuery(ctx: AliasedQueryContext): LogicalPlan = withOrigin(ctx) {
val relation = plan(ctx.query).optionalMap(ctx.sample)(withSample)
if (ctx.tableAlias.strictIdentifier == null) {
// For un-aliased subqueries, use a default alias name that is not likely to conflict with
// normal subquery names, so that parent operators can only access the columns in subquery by
// unqualified names. Users can still use this special qualifier to access columns if they
// know it, but that's not recommended.
SubqueryAlias("__auto_generated_subquery_name", relation)
} else {
mayApplyAliasPlan(ctx.tableAlias, relation)
}
}
/**
* Create an alias ([[SubqueryAlias]]) for a [[LogicalPlan]].
*/
private def aliasPlan(alias: ParserRuleContext, plan: LogicalPlan): LogicalPlan = {
SubqueryAlias(alias.getText, plan)
}
/**
* If aliases specified in a FROM clause, create a subquery alias ([[SubqueryAlias]]) and
* column aliases for a [[LogicalPlan]].
*/
private def mayApplyAliasPlan(tableAlias: TableAliasContext, plan: LogicalPlan): LogicalPlan = {
if (tableAlias.strictIdentifier != null) {
val alias = tableAlias.strictIdentifier.getText
if (tableAlias.identifierList != null) {
val columnNames = visitIdentifierList(tableAlias.identifierList)
SubqueryAlias(alias, UnresolvedSubqueryColumnAliases(columnNames, plan))
} else {
SubqueryAlias(alias, plan)
}
} else {
plan
}
}
/**
* Create a Sequence of Strings for a parenthesis enclosed alias list.
*/
override def visitIdentifierList(ctx: IdentifierListContext): Seq[String] = withOrigin(ctx) {
visitIdentifierSeq(ctx.identifierSeq)
}
/**
* Create a Sequence of Strings for an identifier list.
*/
override def visitIdentifierSeq(ctx: IdentifierSeqContext): Seq[String] = withOrigin(ctx) {
ctx.ident.asScala.map(_.getText).toSeq
}
/* ********************************************************************************************
* Table Identifier parsing
* ******************************************************************************************** */
/**
* Create a [[TableIdentifier]] from a 'tableName' or 'databaseName'.'tableName' pattern.
*/
override def visitTableIdentifier(
ctx: TableIdentifierContext): TableIdentifier = withOrigin(ctx) {
TableIdentifier(ctx.table.getText, Option(ctx.db).map(_.getText))
}
/**
* Create a [[FunctionIdentifier]] from a 'functionName' or 'databaseName'.'functionName' pattern.
*/
override def visitFunctionIdentifier(
ctx: FunctionIdentifierContext): FunctionIdentifier = withOrigin(ctx) {
FunctionIdentifier(ctx.function.getText, Option(ctx.db).map(_.getText))
}
/**
* Create a multi-part identifier.
*/
override def visitMultipartIdentifier(ctx: MultipartIdentifierContext): Seq[String] =
withOrigin(ctx) {
ctx.parts.asScala.map(_.getText).toSeq
}
/* ********************************************************************************************
* Expression parsing
* ******************************************************************************************** */
/**
* Create an expression from the given context. This method just passes the context on to the
* visitor and only takes care of typing (We assume that the visitor returns an Expression here).
*/
protected def expression(ctx: ParserRuleContext): Expression = typedVisit(ctx)
/**
* Create sequence of expressions from the given sequence of contexts.
*/
private def expressionList(trees: java.util.List[ExpressionContext]): Seq[Expression] = {
trees.asScala.map(expression).toSeq
}
/**
* Create a star (i.e. all) expression; this selects all elements (in the specified object).
* Both un-targeted (global) and targeted aliases are supported.
*/
override def visitStar(ctx: StarContext): Expression = withOrigin(ctx) {
UnresolvedStar(Option(ctx.qualifiedName()).map(_.identifier.asScala.map(_.getText).toSeq))
}
/**
* Create an aliased expression if an alias is specified. Both single and multi-aliases are
* supported.
*/
override def visitNamedExpression(ctx: NamedExpressionContext): Expression = withOrigin(ctx) {
val e = expression(ctx.expression)
if (ctx.name != null) {
Alias(e, ctx.name.getText)()
} else if (ctx.identifierList != null) {
MultiAlias(e, visitIdentifierList(ctx.identifierList))
} else {
e
}
}
/**
* Combine a number of boolean expressions into a balanced expression tree. These expressions are
* either combined by a logical [[And]] or a logical [[Or]].
*
* A balanced binary tree is created because regular left recursive trees cause considerable
* performance degradations and can cause stack overflows.
*/
override def visitLogicalBinary(ctx: LogicalBinaryContext): Expression = withOrigin(ctx) {
val expressionType = ctx.operator.getType
val expressionCombiner = expressionType match {
case SqlBaseParser.AND => And.apply _
case SqlBaseParser.OR => Or.apply _
}
// Collect all similar left hand contexts.
val contexts = ArrayBuffer(ctx.right)
var current = ctx.left
def collectContexts: Boolean = current match {
case lbc: LogicalBinaryContext if lbc.operator.getType == expressionType =>
contexts += lbc.right
current = lbc.left
true
case _ =>
contexts += current
false
}
while (collectContexts) {
// No body - all updates take place in the collectContexts.
}
// Reverse the contexts to have them in the same sequence as in the SQL statement & turn them
// into expressions.
val expressions = contexts.reverseMap(expression)
// Create a balanced tree.
def reduceToExpressionTree(low: Int, high: Int): Expression = high - low match {
case 0 =>
expressions(low)
case 1 =>
expressionCombiner(expressions(low), expressions(high))
case x =>
val mid = low + x / 2
expressionCombiner(
reduceToExpressionTree(low, mid),
reduceToExpressionTree(mid + 1, high))
}
reduceToExpressionTree(0, expressions.size - 1)
}
/**
* Invert a boolean expression.
*/
override def visitLogicalNot(ctx: LogicalNotContext): Expression = withOrigin(ctx) {
Not(expression(ctx.booleanExpression()))
}
/**
* Create a filtering correlated sub-query (EXISTS).
*/
override def visitExists(ctx: ExistsContext): Expression = {
Exists(plan(ctx.query))
}
/**
* Create a comparison expression. This compares two expressions. The following comparison
* operators are supported:
* - Equal: '=' or '=='
* - Null-safe Equal: '<=>'
* - Not Equal: '<>' or '!='
* - Less than: '<'
* - Less then or Equal: '<='
* - Greater than: '>'
* - Greater then or Equal: '>='
*/
override def visitComparison(ctx: ComparisonContext): Expression = withOrigin(ctx) {
val left = expression(ctx.left)
val right = expression(ctx.right)
val operator = ctx.comparisonOperator().getChild(0).asInstanceOf[TerminalNode]
operator.getSymbol.getType match {
case SqlBaseParser.EQ =>
EqualTo(left, right)
case SqlBaseParser.NSEQ =>
EqualNullSafe(left, right)
case SqlBaseParser.NEQ | SqlBaseParser.NEQJ =>
Not(EqualTo(left, right))
case SqlBaseParser.LT =>
LessThan(left, right)
case SqlBaseParser.LTE =>
LessThanOrEqual(left, right)
case SqlBaseParser.GT =>
GreaterThan(left, right)
case SqlBaseParser.GTE =>
GreaterThanOrEqual(left, right)
}
}
/**
* Create a predicated expression. A predicated expression is a normal expression with a
* predicate attached to it, for example:
* {{{
* a + 1 IS NULL
* }}}
*/
override def visitPredicated(ctx: PredicatedContext): Expression = withOrigin(ctx) {
val e = expression(ctx.valueExpression)
if (ctx.predicate != null) {
withPredicate(e, ctx.predicate)
} else {
e
}
}
/**
* Add a predicate to the given expression. Supported expressions are:
* - (NOT) BETWEEN
* - (NOT) IN
* - (NOT) LIKE (ANY | SOME | ALL)
* - (NOT) RLIKE
* - IS (NOT) NULL.
* - IS (NOT) (TRUE | FALSE | UNKNOWN)
* - IS (NOT) DISTINCT FROM
*/
private def withPredicate(e: Expression, ctx: PredicateContext): Expression = withOrigin(ctx) {
// Invert a predicate if it has a valid NOT clause.
def invertIfNotDefined(e: Expression): Expression = ctx.NOT match {
case null => e
case not => Not(e)
}
def getValueExpressions(e: Expression): Seq[Expression] = e match {
case c: CreateNamedStruct => c.valExprs
case other => Seq(other)
}
// Create the predicate.
ctx.kind.getType match {
case SqlBaseParser.BETWEEN =>
// BETWEEN is translated to lower <= e && e <= upper
invertIfNotDefined(And(
GreaterThanOrEqual(e, expression(ctx.lower)),
LessThanOrEqual(e, expression(ctx.upper))))
case SqlBaseParser.IN if ctx.query != null =>
invertIfNotDefined(InSubquery(getValueExpressions(e), ListQuery(plan(ctx.query))))
case SqlBaseParser.IN =>
invertIfNotDefined(In(e, ctx.expression.asScala.map(expression).toSeq))
case SqlBaseParser.LIKE =>
Option(ctx.quantifier).map(_.getType) match {
case Some(SqlBaseParser.ANY) | Some(SqlBaseParser.SOME) =>
validate(!ctx.expression.isEmpty, "Expected something between '(' and ')'.", ctx)
val expressions = expressionList(ctx.expression)
if (expressions.forall(_.foldable) && expressions.forall(_.dataType == StringType)) {
// If there are many pattern expressions, will throw StackOverflowError.
// So we use LikeAny or NotLikeAny instead.
val patterns = expressions.map(_.eval(EmptyRow).asInstanceOf[UTF8String])
ctx.NOT match {
case null => LikeAny(e, patterns)
case _ => NotLikeAny(e, patterns)
}
} else {
ctx.expression.asScala.map(expression)
.map(p => invertIfNotDefined(new Like(e, p))).toSeq.reduceLeft(Or)
}
case Some(SqlBaseParser.ALL) =>
validate(!ctx.expression.isEmpty, "Expected something between '(' and ')'.", ctx)
val expressions = expressionList(ctx.expression)
if (expressions.forall(_.foldable) && expressions.forall(_.dataType == StringType)) {
// If there are many pattern expressions, will throw StackOverflowError.
// So we use LikeAll or NotLikeAll instead.
val patterns = expressions.map(_.eval(EmptyRow).asInstanceOf[UTF8String])
ctx.NOT match {
case null => LikeAll(e, patterns)
case _ => NotLikeAll(e, patterns)
}
} else {
ctx.expression.asScala.map(expression)
.map(p => invertIfNotDefined(new Like(e, p))).toSeq.reduceLeft(And)
}
case _ =>
val escapeChar = Option(ctx.escapeChar).map(string).map { str =>
if (str.length != 1) {
throw QueryParsingErrors.invalidEscapeStringError(ctx)
}
str.charAt(0)
}.getOrElse('\\\\')
invertIfNotDefined(Like(e, expression(ctx.pattern), escapeChar))
}
case SqlBaseParser.RLIKE =>
invertIfNotDefined(RLike(e, expression(ctx.pattern)))
case SqlBaseParser.NULL if ctx.NOT != null =>
IsNotNull(e)
case SqlBaseParser.NULL =>
IsNull(e)
case SqlBaseParser.TRUE => ctx.NOT match {
case null => EqualNullSafe(e, Literal(true))
case _ => Not(EqualNullSafe(e, Literal(true)))
}
case SqlBaseParser.FALSE => ctx.NOT match {
case null => EqualNullSafe(e, Literal(false))
case _ => Not(EqualNullSafe(e, Literal(false)))
}
case SqlBaseParser.UNKNOWN => ctx.NOT match {
case null => IsUnknown(e)
case _ => IsNotUnknown(e)
}
case SqlBaseParser.DISTINCT if ctx.NOT != null =>
EqualNullSafe(e, expression(ctx.right))
case SqlBaseParser.DISTINCT =>
Not(EqualNullSafe(e, expression(ctx.right)))
}
}
/**
* Create a binary arithmetic expression. The following arithmetic operators are supported:
* - Multiplication: '*'
* - Division: '/'
* - Hive Long Division: 'DIV'
* - Modulo: '%'
* - Addition: '+'
* - Subtraction: '-'
* - Binary AND: '&'
* - Binary XOR
* - Binary OR: '|'
*/
override def visitArithmeticBinary(ctx: ArithmeticBinaryContext): Expression = withOrigin(ctx) {
val left = expression(ctx.left)
val right = expression(ctx.right)
ctx.operator.getType match {
case SqlBaseParser.ASTERISK =>
Multiply(left, right)
case SqlBaseParser.SLASH =>
Divide(left, right)
case SqlBaseParser.PERCENT =>
Remainder(left, right)
case SqlBaseParser.DIV =>
IntegralDivide(left, right)
case SqlBaseParser.PLUS =>
Add(left, right)
case SqlBaseParser.MINUS =>
Subtract(left, right)
case SqlBaseParser.CONCAT_PIPE =>
Concat(left :: right :: Nil)
case SqlBaseParser.AMPERSAND =>
BitwiseAnd(left, right)
case SqlBaseParser.HAT =>
BitwiseXor(left, right)
case SqlBaseParser.PIPE =>
BitwiseOr(left, right)
}
}
/**
* Create a unary arithmetic expression. The following arithmetic operators are supported:
* - Plus: '+'
* - Minus: '-'
* - Bitwise Not: '~'
*/
override def visitArithmeticUnary(ctx: ArithmeticUnaryContext): Expression = withOrigin(ctx) {
val value = expression(ctx.valueExpression)
ctx.operator.getType match {
case SqlBaseParser.PLUS =>
UnaryPositive(value)
case SqlBaseParser.MINUS =>
UnaryMinus(value)
case SqlBaseParser.TILDE =>
BitwiseNot(value)
}
}
override def visitCurrentDatetime(ctx: CurrentDatetimeContext): Expression = withOrigin(ctx) {
if (conf.ansiEnabled) {
ctx.name.getType match {
case SqlBaseParser.CURRENT_DATE =>
CurrentDate()
case SqlBaseParser.CURRENT_TIMESTAMP =>
CurrentTimestamp()
}
} else {
// If the parser is not in ansi mode, we should return `UnresolvedAttribute`, in case there
// are columns named `CURRENT_DATE` or `CURRENT_TIMESTAMP`.
UnresolvedAttribute.quoted(ctx.name.getText)
}
}
/**
* Create a [[Cast]] expression.
*/
override def visitCast(ctx: CastContext): Expression = withOrigin(ctx) {
val rawDataType = typedVisit[DataType](ctx.dataType())
val dataType = CharVarcharUtils.replaceCharVarcharWithStringForCast(rawDataType)
val cast = Cast(expression(ctx.expression), dataType)
cast.setTagValue(Cast.USER_SPECIFIED_CAST, true)
cast
}
/**
* Create a [[CreateStruct]] expression.
*/
override def visitStruct(ctx: StructContext): Expression = withOrigin(ctx) {
CreateStruct.create(ctx.argument.asScala.map(expression).toSeq)
}
/**
* Create a [[First]] expression.
*/
override def visitFirst(ctx: FirstContext): Expression = withOrigin(ctx) {
val ignoreNullsExpr = ctx.IGNORE != null
First(expression(ctx.expression), ignoreNullsExpr).toAggregateExpression()
}
/**
* Create a [[Last]] expression.
*/
override def visitLast(ctx: LastContext): Expression = withOrigin(ctx) {
val ignoreNullsExpr = ctx.IGNORE != null
Last(expression(ctx.expression), ignoreNullsExpr).toAggregateExpression()
}
/**
* Create a Position expression.
*/
override def visitPosition(ctx: PositionContext): Expression = withOrigin(ctx) {
new StringLocate(expression(ctx.substr), expression(ctx.str))
}
/**
* Create a Extract expression.
*/
override def visitExtract(ctx: ExtractContext): Expression = withOrigin(ctx) {
val arguments = Seq(Literal(ctx.field.getText), expression(ctx.source))
UnresolvedFunction("extract", arguments, isDistinct = false)
}
/**
* Create a Substring/Substr expression.
*/
override def visitSubstring(ctx: SubstringContext): Expression = withOrigin(ctx) {
if (ctx.len != null) {
Substring(expression(ctx.str), expression(ctx.pos), expression(ctx.len))
} else {
new Substring(expression(ctx.str), expression(ctx.pos))
}
}
/**
* Create a Trim expression.
*/
override def visitTrim(ctx: TrimContext): Expression = withOrigin(ctx) {
val srcStr = expression(ctx.srcStr)
val trimStr = Option(ctx.trimStr).map(expression)
Option(ctx.trimOption).map(_.getType).getOrElse(SqlBaseParser.BOTH) match {
case SqlBaseParser.BOTH =>
StringTrim(srcStr, trimStr)
case SqlBaseParser.LEADING =>
StringTrimLeft(srcStr, trimStr)
case SqlBaseParser.TRAILING =>
StringTrimRight(srcStr, trimStr)
case other =>
throw QueryParsingErrors.trimOptionUnsupportedError(other, ctx)
}
}
/**
* Create a Overlay expression.
*/
override def visitOverlay(ctx: OverlayContext): Expression = withOrigin(ctx) {
val input = expression(ctx.input)
val replace = expression(ctx.replace)
val position = expression(ctx.position)
val lengthOpt = Option(ctx.length).map(expression)
lengthOpt match {
case Some(length) => Overlay(input, replace, position, length)
case None => new Overlay(input, replace, position)
}
}
/**
* Create a (windowed) Function expression.
*/
override def visitFunctionCall(ctx: FunctionCallContext): Expression = withOrigin(ctx) {
// Create the function call.
val name = ctx.functionName.getText
val isDistinct = Option(ctx.setQuantifier()).exists(_.DISTINCT != null)
// Call `toSeq`, otherwise `ctx.argument.asScala.map(expression)` is `Buffer` in Scala 2.13
val arguments = ctx.argument.asScala.map(expression).toSeq match {
case Seq(UnresolvedStar(None))
if name.toLowerCase(Locale.ROOT) == "count" && !isDistinct =>
// Transform COUNT(*) into COUNT(1).
Seq(Literal(1))
case expressions =>
expressions
}
val filter = Option(ctx.where).map(expression(_))
val ignoreNulls =
Option(ctx.nullsOption).map(_.getType == SqlBaseParser.IGNORE).getOrElse(false)
val function = UnresolvedFunction(
getFunctionIdentifier(ctx.functionName), arguments, isDistinct, filter, ignoreNulls)
// Check if the function is evaluated in a windowed context.
ctx.windowSpec match {
case spec: WindowRefContext =>
UnresolvedWindowExpression(function, visitWindowRef(spec))
case spec: WindowDefContext =>
WindowExpression(function, visitWindowDef(spec))
case _ => function
}
}
/**
* Create a function database (optional) and name pair, for multipartIdentifier.
* This is used in CREATE FUNCTION, DROP FUNCTION, SHOWFUNCTIONS.
*/
protected def visitFunctionName(ctx: MultipartIdentifierContext): FunctionIdentifier = {
visitFunctionName(ctx, ctx.parts.asScala.map(_.getText).toSeq)
}
/**
* Create a function database (optional) and name pair.
*/
protected def visitFunctionName(ctx: QualifiedNameContext): FunctionIdentifier = {
visitFunctionName(ctx, ctx.identifier().asScala.map(_.getText).toSeq)
}
/**
* Create a function database (optional) and name pair.
*/
private def visitFunctionName(ctx: ParserRuleContext, texts: Seq[String]): FunctionIdentifier = {
texts match {
case Seq(db, fn) => FunctionIdentifier(fn, Option(db))
case Seq(fn) => FunctionIdentifier(fn, None)
case other =>
throw QueryParsingErrors.functionNameUnsupportedError(texts.mkString("."), ctx)
}
}
/**
* Get a function identifier consist by database (optional) and name.
*/
protected def getFunctionIdentifier(ctx: FunctionNameContext): FunctionIdentifier = {
if (ctx.qualifiedName != null) {
visitFunctionName(ctx.qualifiedName)
} else {
FunctionIdentifier(ctx.getText, None)
}
}
/**
* Create an [[LambdaFunction]].
*/
override def visitLambda(ctx: LambdaContext): Expression = withOrigin(ctx) {
val arguments = ctx.identifier().asScala.map { name =>
UnresolvedNamedLambdaVariable(UnresolvedAttribute.quoted(name.getText).nameParts)
}
val function = expression(ctx.expression).transformUp {
case a: UnresolvedAttribute => UnresolvedNamedLambdaVariable(a.nameParts)
}
LambdaFunction(function, arguments.toSeq)
}
/**
* Create a reference to a window frame, i.e. [[WindowSpecReference]].
*/
override def visitWindowRef(ctx: WindowRefContext): WindowSpecReference = withOrigin(ctx) {
WindowSpecReference(ctx.name.getText)
}
/**
* Create a window definition, i.e. [[WindowSpecDefinition]].
*/
override def visitWindowDef(ctx: WindowDefContext): WindowSpecDefinition = withOrigin(ctx) {
// CLUSTER BY ... | PARTITION BY ... ORDER BY ...
val partition = ctx.partition.asScala.map(expression)
val order = ctx.sortItem.asScala.map(visitSortItem)
// RANGE/ROWS BETWEEN ...
val frameSpecOption = Option(ctx.windowFrame).map { frame =>
val frameType = frame.frameType.getType match {
case SqlBaseParser.RANGE => RangeFrame
case SqlBaseParser.ROWS => RowFrame
}
SpecifiedWindowFrame(
frameType,
visitFrameBound(frame.start),
Option(frame.end).map(visitFrameBound).getOrElse(CurrentRow))
}
WindowSpecDefinition(
partition.toSeq,
order.toSeq,
frameSpecOption.getOrElse(UnspecifiedFrame))
}
/**
* Create or resolve a frame boundary expressions.
*/
override def visitFrameBound(ctx: FrameBoundContext): Expression = withOrigin(ctx) {
def value: Expression = {
val e = expression(ctx.expression)
validate(e.resolved && e.foldable, "Frame bound value must be a literal.", ctx)
e
}
ctx.boundType.getType match {
case SqlBaseParser.PRECEDING if ctx.UNBOUNDED != null =>
UnboundedPreceding
case SqlBaseParser.PRECEDING =>
UnaryMinus(value)
case SqlBaseParser.CURRENT =>
CurrentRow
case SqlBaseParser.FOLLOWING if ctx.UNBOUNDED != null =>
UnboundedFollowing
case SqlBaseParser.FOLLOWING =>
value
}
}
/**
* Create a [[CreateStruct]] expression.
*/
override def visitRowConstructor(ctx: RowConstructorContext): Expression = withOrigin(ctx) {
CreateStruct(ctx.namedExpression().asScala.map(expression).toSeq)
}
/**
* Create a [[ScalarSubquery]] expression.
*/
override def visitSubqueryExpression(
ctx: SubqueryExpressionContext): Expression = withOrigin(ctx) {
ScalarSubquery(plan(ctx.query))
}
/**
* Create a value based [[CaseWhen]] expression. This has the following SQL form:
* {{{
* CASE [expression]
* WHEN [value] THEN [expression]
* ...
* ELSE [expression]
* END
* }}}
*/
override def visitSimpleCase(ctx: SimpleCaseContext): Expression = withOrigin(ctx) {
val e = expression(ctx.value)
val branches = ctx.whenClause.asScala.map { wCtx =>
(EqualTo(e, expression(wCtx.condition)), expression(wCtx.result))
}
CaseWhen(branches.toSeq, Option(ctx.elseExpression).map(expression))
}
/**
* Create a condition based [[CaseWhen]] expression. This has the following SQL syntax:
* {{{
* CASE
* WHEN [predicate] THEN [expression]
* ...
* ELSE [expression]
* END
* }}}
*
* @param ctx the parse tree
* */
override def visitSearchedCase(ctx: SearchedCaseContext): Expression = withOrigin(ctx) {
val branches = ctx.whenClause.asScala.map { wCtx =>
(expression(wCtx.condition), expression(wCtx.result))
}
CaseWhen(branches.toSeq, Option(ctx.elseExpression).map(expression))
}
/**
* Currently only regex in expressions of SELECT statements are supported; in other
* places, e.g., where `(a)?+.+` = 2, regex are not meaningful.
*/
private def canApplyRegex(ctx: ParserRuleContext): Boolean = withOrigin(ctx) {
var parent = ctx.getParent
while (parent != null) {
if (parent.isInstanceOf[NamedExpressionContext]) return true
parent = parent.getParent
}
return false
}
/**
* Create a dereference expression. The return type depends on the type of the parent.
* If the parent is an [[UnresolvedAttribute]], it can be a [[UnresolvedAttribute]] or
* a [[UnresolvedRegex]] for regex quoted in ``; if the parent is some other expression,
* it can be [[UnresolvedExtractValue]].
*/
override def visitDereference(ctx: DereferenceContext): Expression = withOrigin(ctx) {
val attr = ctx.fieldName.getText
expression(ctx.base) match {
case unresolved_attr @ UnresolvedAttribute(nameParts) =>
ctx.fieldName.getStart.getText match {
case escapedIdentifier(columnNameRegex)
if conf.supportQuotedRegexColumnName && canApplyRegex(ctx) =>
UnresolvedRegex(columnNameRegex, Some(unresolved_attr.name),
conf.caseSensitiveAnalysis)
case _ =>
UnresolvedAttribute(nameParts :+ attr)
}
case e =>
UnresolvedExtractValue(e, Literal(attr))
}
}
/**
* Create an [[UnresolvedAttribute]] expression or a [[UnresolvedRegex]] if it is a regex
* quoted in ``
*/
override def visitColumnReference(ctx: ColumnReferenceContext): Expression = withOrigin(ctx) {
ctx.getStart.getText match {
case escapedIdentifier(columnNameRegex)
if conf.supportQuotedRegexColumnName && canApplyRegex(ctx) =>
UnresolvedRegex(columnNameRegex, None, conf.caseSensitiveAnalysis)
case _ =>
UnresolvedAttribute.quoted(ctx.getText)
}
}
/**
* Create an [[UnresolvedExtractValue]] expression, this is used for subscript access to an array.
*/
override def visitSubscript(ctx: SubscriptContext): Expression = withOrigin(ctx) {
UnresolvedExtractValue(expression(ctx.value), expression(ctx.index))
}
/**
* Create an expression for an expression between parentheses. This is need because the ANTLR
* visitor cannot automatically convert the nested context into an expression.
*/
override def visitParenthesizedExpression(
ctx: ParenthesizedExpressionContext): Expression = withOrigin(ctx) {
expression(ctx.expression)
}
/**
* Create a [[SortOrder]] expression.
*/
override def visitSortItem(ctx: SortItemContext): SortOrder = withOrigin(ctx) {
val direction = if (ctx.DESC != null) {
Descending
} else {
Ascending
}
val nullOrdering = if (ctx.FIRST != null) {
NullsFirst
} else if (ctx.LAST != null) {
NullsLast
} else {
direction.defaultNullOrdering
}
SortOrder(expression(ctx.expression), direction, nullOrdering, Seq.empty)
}
/**
* Create a typed Literal expression. A typed literal has the following SQL syntax:
* {{{
* [TYPE] '[VALUE]'
* }}}
* Currently Date, Timestamp, Interval and Binary typed literals are supported.
*/
override def visitTypeConstructor(ctx: TypeConstructorContext): Literal = withOrigin(ctx) {
val value = string(ctx.STRING)
val valueType = ctx.identifier.getText.toUpperCase(Locale.ROOT)
def toLiteral[T](f: UTF8String => Option[T], t: DataType): Literal = {
f(UTF8String.fromString(value)).map(Literal(_, t)).getOrElse {
throw QueryParsingErrors.cannotParseValueTypeError(valueType, value, ctx)
}
}
try {
valueType match {
case "DATE" =>
toLiteral(stringToDate(_, getZoneId(SQLConf.get.sessionLocalTimeZone)), DateType)
case "TIMESTAMP" =>
val zoneId = getZoneId(SQLConf.get.sessionLocalTimeZone)
toLiteral(stringToTimestamp(_, zoneId), TimestampType)
case "INTERVAL" =>
val interval = try {
IntervalUtils.stringToInterval(UTF8String.fromString(value))
} catch {
case e: IllegalArgumentException =>
val ex = QueryParsingErrors.cannotParseIntervalValueError(value, ctx)
ex.setStackTrace(e.getStackTrace)
throw ex
}
Literal(interval, CalendarIntervalType)
case "X" =>
val padding = if (value.length % 2 != 0) "0" else ""
Literal(DatatypeConverter.parseHexBinary(padding + value))
case other =>
throw QueryParsingErrors.literalValueTypeUnsupportedError(other, ctx)
}
} catch {
case e: IllegalArgumentException =>
throw QueryParsingErrors.parsingValueTypeError(e, valueType, ctx)
}
}
/**
* Create a NULL literal expression.
*/
override def visitNullLiteral(ctx: NullLiteralContext): Literal = withOrigin(ctx) {
Literal(null)
}
/**
* Create a Boolean literal expression.
*/
override def visitBooleanLiteral(ctx: BooleanLiteralContext): Literal = withOrigin(ctx) {
if (ctx.getText.toBoolean) {
Literal.TrueLiteral
} else {
Literal.FalseLiteral
}
}
/**
* Create an integral literal expression. The code selects the most narrow integral type
* possible, either a BigDecimal, a Long or an Integer is returned.
*/
override def visitIntegerLiteral(ctx: IntegerLiteralContext): Literal = withOrigin(ctx) {
BigDecimal(ctx.getText) match {
case v if v.isValidInt =>
Literal(v.intValue)
case v if v.isValidLong =>
Literal(v.longValue)
case v => Literal(v.underlying())
}
}
/**
* Create a decimal literal for a regular decimal number.
*/
override def visitDecimalLiteral(ctx: DecimalLiteralContext): Literal = withOrigin(ctx) {
Literal(BigDecimal(ctx.getText).underlying())
}
/**
* Create a decimal literal for a regular decimal number or a scientific decimal number.
*/
override def visitLegacyDecimalLiteral(
ctx: LegacyDecimalLiteralContext): Literal = withOrigin(ctx) {
Literal(BigDecimal(ctx.getText).underlying())
}
/**
* Create a double literal for number with an exponent, e.g. 1E-30
*/
override def visitExponentLiteral(ctx: ExponentLiteralContext): Literal = {
numericLiteral(ctx, ctx.getText, /* exponent values don't have a suffix */
Double.MinValue, Double.MaxValue, DoubleType.simpleString)(_.toDouble)
}
/** Create a numeric literal expression. */
private def numericLiteral(
ctx: NumberContext,
rawStrippedQualifier: String,
minValue: BigDecimal,
maxValue: BigDecimal,
typeName: String)(converter: String => Any): Literal = withOrigin(ctx) {
try {
val rawBigDecimal = BigDecimal(rawStrippedQualifier)
if (rawBigDecimal < minValue || rawBigDecimal > maxValue) {
throw QueryParsingErrors.invalidNumericLiteralRangeError(
rawStrippedQualifier, minValue, maxValue, typeName, ctx)
}
Literal(converter(rawStrippedQualifier))
} catch {
case e: NumberFormatException =>
throw new ParseException(e.getMessage, ctx)
}
}
/**
* Create a Byte Literal expression.
*/
override def visitTinyIntLiteral(ctx: TinyIntLiteralContext): Literal = {
val rawStrippedQualifier = ctx.getText.substring(0, ctx.getText.length - 1)
numericLiteral(ctx, rawStrippedQualifier,
Byte.MinValue, Byte.MaxValue, ByteType.simpleString)(_.toByte)
}
/**
* Create a Short Literal expression.
*/
override def visitSmallIntLiteral(ctx: SmallIntLiteralContext): Literal = {
val rawStrippedQualifier = ctx.getText.substring(0, ctx.getText.length - 1)
numericLiteral(ctx, rawStrippedQualifier,
Short.MinValue, Short.MaxValue, ShortType.simpleString)(_.toShort)
}
/**
* Create a Long Literal expression.
*/
override def visitBigIntLiteral(ctx: BigIntLiteralContext): Literal = {
val rawStrippedQualifier = ctx.getText.substring(0, ctx.getText.length - 1)
numericLiteral(ctx, rawStrippedQualifier,
Long.MinValue, Long.MaxValue, LongType.simpleString)(_.toLong)
}
/**
* Create a Float Literal expression.
*/
override def visitFloatLiteral(ctx: FloatLiteralContext): Literal = {
val rawStrippedQualifier = ctx.getText.substring(0, ctx.getText.length - 1)
numericLiteral(ctx, rawStrippedQualifier,
Float.MinValue, Float.MaxValue, FloatType.simpleString)(_.toFloat)
}
/**
* Create a Double Literal expression.
*/
override def visitDoubleLiteral(ctx: DoubleLiteralContext): Literal = {
val rawStrippedQualifier = ctx.getText.substring(0, ctx.getText.length - 1)
numericLiteral(ctx, rawStrippedQualifier,
Double.MinValue, Double.MaxValue, DoubleType.simpleString)(_.toDouble)
}
/**
* Create a BigDecimal Literal expression.
*/
override def visitBigDecimalLiteral(ctx: BigDecimalLiteralContext): Literal = {
val raw = ctx.getText.substring(0, ctx.getText.length - 2)
try {
Literal(BigDecimal(raw).underlying())
} catch {
case e: AnalysisException =>
throw new ParseException(e.message, ctx)
}
}
/**
* Create a String literal expression.
*/
override def visitStringLiteral(ctx: StringLiteralContext): Literal = withOrigin(ctx) {
Literal(createString(ctx))
}
/**
* Create a String from a string literal context. This supports multiple consecutive string
* literals, these are concatenated, for example this expression "'hello' 'world'" will be
* converted into "helloworld".
*
* Special characters can be escaped by using Hive/C-style escaping.
*/
private def createString(ctx: StringLiteralContext): String = {
if (conf.escapedStringLiterals) {
ctx.STRING().asScala.map(stringWithoutUnescape).mkString
} else {
ctx.STRING().asScala.map(string).mkString
}
}
/**
* Create an [[UnresolvedRelation]] from a multi-part identifier context.
*/
private def createUnresolvedRelation(
ctx: MultipartIdentifierContext): UnresolvedRelation = withOrigin(ctx) {
UnresolvedRelation(visitMultipartIdentifier(ctx))
}
/**
* Create an [[UnresolvedTable]] from a multi-part identifier context.
*/
private def createUnresolvedTable(
ctx: MultipartIdentifierContext,
commandName: String,
relationTypeMismatchHint: Option[String] = None): UnresolvedTable = withOrigin(ctx) {
UnresolvedTable(visitMultipartIdentifier(ctx), commandName, relationTypeMismatchHint)
}
/**
* Create an [[UnresolvedView]] from a multi-part identifier context.
*/
private def createUnresolvedView(
ctx: MultipartIdentifierContext,
commandName: String,
allowTemp: Boolean = true,
relationTypeMismatchHint: Option[String] = None): UnresolvedView = withOrigin(ctx) {
UnresolvedView(visitMultipartIdentifier(ctx), commandName, allowTemp, relationTypeMismatchHint)
}
/**
* Create an [[UnresolvedTableOrView]] from a multi-part identifier context.
*/
private def createUnresolvedTableOrView(
ctx: MultipartIdentifierContext,
commandName: String,
allowTempView: Boolean = true): UnresolvedTableOrView = withOrigin(ctx) {
UnresolvedTableOrView(visitMultipartIdentifier(ctx), commandName, allowTempView)
}
/**
* Create a [[CalendarInterval]] literal expression. Two syntaxes are supported:
* - multiple unit value pairs, for instance: interval 2 months 2 days.
* - from-to unit, for instance: interval '1-2' year to month.
*/
override def visitInterval(ctx: IntervalContext): Literal = withOrigin(ctx) {
Literal(parseIntervalLiteral(ctx), CalendarIntervalType)
}
/**
* Create a [[CalendarInterval]] object
*/
protected def parseIntervalLiteral(ctx: IntervalContext): CalendarInterval = withOrigin(ctx) {
if (ctx.errorCapturingMultiUnitsInterval != null) {
val innerCtx = ctx.errorCapturingMultiUnitsInterval
if (innerCtx.unitToUnitInterval != null) {
throw QueryParsingErrors.moreThanOneFromToUnitInIntervalLiteralError(
innerCtx.unitToUnitInterval)
}
visitMultiUnitsInterval(innerCtx.multiUnitsInterval)
} else if (ctx.errorCapturingUnitToUnitInterval != null) {
val innerCtx = ctx.errorCapturingUnitToUnitInterval
if (innerCtx.error1 != null || innerCtx.error2 != null) {
val errorCtx = if (innerCtx.error1 != null) innerCtx.error1 else innerCtx.error2
throw QueryParsingErrors.moreThanOneFromToUnitInIntervalLiteralError(errorCtx)
}
visitUnitToUnitInterval(innerCtx.body)
} else {
throw QueryParsingErrors.invalidIntervalLiteralError(ctx)
}
}
/**
* Creates a [[CalendarInterval]] with multiple unit value pairs, e.g. 1 YEAR 2 DAYS.
*/
override def visitMultiUnitsInterval(ctx: MultiUnitsIntervalContext): CalendarInterval = {
withOrigin(ctx) {
val units = ctx.unit.asScala
val values = ctx.intervalValue().asScala
try {
assert(units.length == values.length)
val kvs = units.indices.map { i =>
val u = units(i).getText
val v = if (values(i).STRING() != null) {
val value = string(values(i).STRING())
// SPARK-32840: For invalid cases, e.g. INTERVAL '1 day 2' hour,
// INTERVAL 'interval 1' day, we need to check ahead before they are concatenated with
// units and become valid ones, e.g. '1 day 2 hour'.
// Ideally, we only ensure the value parts don't contain any units here.
if (value.exists(Character.isLetter)) {
throw QueryParsingErrors.invalidIntervalFormError(value, ctx)
}
value
} else {
values(i).getText
}
UTF8String.fromString(" " + v + " " + u)
}
IntervalUtils.stringToInterval(UTF8String.concat(kvs: _*))
} catch {
case i: IllegalArgumentException =>
val e = new ParseException(i.getMessage, ctx)
e.setStackTrace(i.getStackTrace)
throw e
}
}
}
/**
* Creates a [[CalendarInterval]] with from-to unit, e.g. '2-1' YEAR TO MONTH.
*/
override def visitUnitToUnitInterval(ctx: UnitToUnitIntervalContext): CalendarInterval = {
withOrigin(ctx) {
val value = Option(ctx.intervalValue.STRING).map(string).getOrElse {
throw QueryParsingErrors.invalidFromToUnitValueError(ctx.intervalValue)
}
try {
val from = ctx.from.getText.toLowerCase(Locale.ROOT)
val to = ctx.to.getText.toLowerCase(Locale.ROOT)
(from, to) match {
case ("year", "month") =>
IntervalUtils.fromYearMonthString(value)
case ("day", "hour") =>
IntervalUtils.fromDayTimeString(value, IntervalUnit.DAY, IntervalUnit.HOUR)
case ("day", "minute") =>
IntervalUtils.fromDayTimeString(value, IntervalUnit.DAY, IntervalUnit.MINUTE)
case ("day", "second") =>
IntervalUtils.fromDayTimeString(value, IntervalUnit.DAY, IntervalUnit.SECOND)
case ("hour", "minute") =>
IntervalUtils.fromDayTimeString(value, IntervalUnit.HOUR, IntervalUnit.MINUTE)
case ("hour", "second") =>
IntervalUtils.fromDayTimeString(value, IntervalUnit.HOUR, IntervalUnit.SECOND)
case ("minute", "second") =>
IntervalUtils.fromDayTimeString(value, IntervalUnit.MINUTE, IntervalUnit.SECOND)
case _ =>
throw QueryParsingErrors.fromToIntervalUnsupportedError(from, to, ctx)
}
} catch {
// Handle Exceptions thrown by CalendarInterval
case e: IllegalArgumentException =>
val pe = new ParseException(e.getMessage, ctx)
pe.setStackTrace(e.getStackTrace)
throw pe
}
}
}
/* ********************************************************************************************
* DataType parsing
* ******************************************************************************************** */
/**
* Resolve/create a primitive type.
*/
override def visitPrimitiveDataType(ctx: PrimitiveDataTypeContext): DataType = withOrigin(ctx) {
val dataType = ctx.identifier.getText.toLowerCase(Locale.ROOT)
(dataType, ctx.INTEGER_VALUE().asScala.toList) match {
case ("boolean", Nil) => BooleanType
case ("tinyint" | "byte", Nil) => ByteType
case ("smallint" | "short", Nil) => ShortType
case ("int" | "integer", Nil) => IntegerType
case ("bigint" | "long", Nil) => LongType
case ("float" | "real", Nil) => FloatType
case ("double", Nil) => DoubleType
case ("date", Nil) => DateType
case ("timestamp", Nil) => TimestampType
case ("string", Nil) => StringType
case ("character" | "char", length :: Nil) => CharType(length.getText.toInt)
case ("varchar", length :: Nil) => VarcharType(length.getText.toInt)
case ("binary", Nil) => BinaryType
case ("decimal" | "dec" | "numeric", Nil) => DecimalType.USER_DEFAULT
case ("decimal" | "dec" | "numeric", precision :: Nil) =>
DecimalType(precision.getText.toInt, 0)
case ("decimal" | "dec" | "numeric", precision :: scale :: Nil) =>
DecimalType(precision.getText.toInt, scale.getText.toInt)
case ("void", Nil) => NullType
case ("interval", Nil) => CalendarIntervalType
case (dt, params) =>
val dtStr = if (params.nonEmpty) s"$dt(${params.mkString(",")})" else dt
throw QueryParsingErrors.dataTypeUnsupportedError(dtStr, ctx)
}
}
/**
* Create a complex DataType. Arrays, Maps and Structures are supported.
*/
override def visitComplexDataType(ctx: ComplexDataTypeContext): DataType = withOrigin(ctx) {
ctx.complex.getType match {
case SqlBaseParser.ARRAY =>
ArrayType(typedVisit(ctx.dataType(0)))
case SqlBaseParser.MAP =>
MapType(typedVisit(ctx.dataType(0)), typedVisit(ctx.dataType(1)))
case SqlBaseParser.STRUCT =>
StructType(Option(ctx.complexColTypeList).toSeq.flatMap(visitComplexColTypeList))
}
}
/**
* Create top level table schema.
*/
protected def createSchema(ctx: ColTypeListContext): StructType = {
StructType(Option(ctx).toSeq.flatMap(visitColTypeList))
}
/**
* Create a [[StructType]] from a number of column definitions.
*/
override def visitColTypeList(ctx: ColTypeListContext): Seq[StructField] = withOrigin(ctx) {
ctx.colType().asScala.map(visitColType).toSeq
}
/**
* Create a top level [[StructField]] from a column definition.
*/
override def visitColType(ctx: ColTypeContext): StructField = withOrigin(ctx) {
import ctx._
val builder = new MetadataBuilder
// Add comment to metadata
Option(commentSpec()).map(visitCommentSpec).foreach {
builder.putString("comment", _)
}
StructField(
name = colName.getText,
dataType = typedVisit[DataType](ctx.dataType),
nullable = NULL == null,
metadata = builder.build())
}
/**
* Create a [[StructType]] from a sequence of [[StructField]]s.
*/
protected def createStructType(ctx: ComplexColTypeListContext): StructType = {
StructType(Option(ctx).toSeq.flatMap(visitComplexColTypeList))
}
/**
* Create a [[StructType]] from a number of column definitions.
*/
override def visitComplexColTypeList(
ctx: ComplexColTypeListContext): Seq[StructField] = withOrigin(ctx) {
ctx.complexColType().asScala.map(visitComplexColType).toSeq
}
/**
* Create a [[StructField]] from a column definition.
*/
override def visitComplexColType(ctx: ComplexColTypeContext): StructField = withOrigin(ctx) {
import ctx._
val structField = StructField(
name = identifier.getText,
dataType = typedVisit(dataType()),
nullable = NULL == null)
Option(commentSpec).map(visitCommentSpec).map(structField.withComment).getOrElse(structField)
}
/**
* Create a location string.
*/
override def visitLocationSpec(ctx: LocationSpecContext): String = withOrigin(ctx) {
string(ctx.STRING)
}
/**
* Create an optional location string.
*/
protected def visitLocationSpecList(ctx: java.util.List[LocationSpecContext]): Option[String] = {
ctx.asScala.headOption.map(visitLocationSpec)
}
/**
* Create a comment string.
*/
override def visitCommentSpec(ctx: CommentSpecContext): String = withOrigin(ctx) {
string(ctx.STRING)
}
/**
* Create an optional comment string.
*/
protected def visitCommentSpecList(ctx: java.util.List[CommentSpecContext]): Option[String] = {
ctx.asScala.headOption.map(visitCommentSpec)
}
/**
* Create a [[BucketSpec]].
*/
override def visitBucketSpec(ctx: BucketSpecContext): BucketSpec = withOrigin(ctx) {
BucketSpec(
ctx.INTEGER_VALUE.getText.toInt,
visitIdentifierList(ctx.identifierList),
Option(ctx.orderedIdentifierList)
.toSeq
.flatMap(_.orderedIdentifier.asScala)
.map { orderedIdCtx =>
Option(orderedIdCtx.ordering).map(_.getText).foreach { dir =>
if (dir.toLowerCase(Locale.ROOT) != "asc") {
operationNotAllowed(s"Column ordering must be ASC, was '$dir'", ctx)
}
}
orderedIdCtx.ident.getText
})
}
/**
* Convert a table property list into a key-value map.
* This should be called through [[visitPropertyKeyValues]] or [[visitPropertyKeys]].
*/
override def visitTablePropertyList(
ctx: TablePropertyListContext): Map[String, String] = withOrigin(ctx) {
val properties = ctx.tableProperty.asScala.map { property =>
val key = visitTablePropertyKey(property.key)
val value = visitTablePropertyValue(property.value)
key -> value
}
// Check for duplicate property names.
checkDuplicateKeys(properties.toSeq, ctx)
properties.toMap
}
/**
* Parse a key-value map from a [[TablePropertyListContext]], assuming all values are specified.
*/
def visitPropertyKeyValues(ctx: TablePropertyListContext): Map[String, String] = {
val props = visitTablePropertyList(ctx)
val badKeys = props.collect { case (key, null) => key }
if (badKeys.nonEmpty) {
operationNotAllowed(
s"Values must be specified for key(s): ${badKeys.mkString("[", ",", "]")}", ctx)
}
props
}
/**
* Parse a list of keys from a [[TablePropertyListContext]], assuming no values are specified.
*/
def visitPropertyKeys(ctx: TablePropertyListContext): Seq[String] = {
val props = visitTablePropertyList(ctx)
val badKeys = props.filter { case (_, v) => v != null }.keys
if (badKeys.nonEmpty) {
operationNotAllowed(
s"Values should not be specified for key(s): ${badKeys.mkString("[", ",", "]")}", ctx)
}
props.keys.toSeq
}
/**
* A table property key can either be String or a collection of dot separated elements. This
* function extracts the property key based on whether its a string literal or a table property
* identifier.
*/
override def visitTablePropertyKey(key: TablePropertyKeyContext): String = {
if (key.STRING != null) {
string(key.STRING)
} else {
key.getText
}
}
/**
* A table property value can be String, Integer, Boolean or Decimal. This function extracts
* the property value based on whether its a string, integer, boolean or decimal literal.
*/
override def visitTablePropertyValue(value: TablePropertyValueContext): String = {
if (value == null) {
null
} else if (value.STRING != null) {
string(value.STRING)
} else if (value.booleanValue != null) {
value.getText.toLowerCase(Locale.ROOT)
} else {
value.getText
}
}
/**
* Type to keep track of a table header: (identifier, isTemporary, ifNotExists, isExternal).
*/
type TableHeader = (Seq[String], Boolean, Boolean, Boolean)
/**
* Type to keep track of table clauses:
* - partition transforms
* - partition columns
* - bucketSpec
* - properties
* - options
* - location
* - comment
* - serde
*
* Note: Partition transforms are based on existing table schema definition. It can be simple
* column names, or functions like `year(date_col)`. Partition columns are column names with data
* types like `i INT`, which should be appended to the existing table schema.
*/
type TableClauses = (
Seq[Transform], Seq[StructField], Option[BucketSpec], Map[String, String],
Map[String, String], Option[String], Option[String], Option[SerdeInfo])
/**
* Validate a create table statement and return the [[TableIdentifier]].
*/
override def visitCreateTableHeader(
ctx: CreateTableHeaderContext): TableHeader = withOrigin(ctx) {
val temporary = ctx.TEMPORARY != null
val ifNotExists = ctx.EXISTS != null
if (temporary && ifNotExists) {
operationNotAllowed("CREATE TEMPORARY TABLE ... IF NOT EXISTS", ctx)
}
val multipartIdentifier = ctx.multipartIdentifier.parts.asScala.map(_.getText).toSeq
(multipartIdentifier, temporary, ifNotExists, ctx.EXTERNAL != null)
}
/**
* Validate a replace table statement and return the [[TableIdentifier]].
*/
override def visitReplaceTableHeader(
ctx: ReplaceTableHeaderContext): TableHeader = withOrigin(ctx) {
val multipartIdentifier = ctx.multipartIdentifier.parts.asScala.map(_.getText).toSeq
(multipartIdentifier, false, false, false)
}
/**
* Parse a qualified name to a multipart name.
*/
override def visitQualifiedName(ctx: QualifiedNameContext): Seq[String] = withOrigin(ctx) {
ctx.identifier.asScala.map(_.getText).toSeq
}
/**
* Parse a list of transforms or columns.
*/
override def visitPartitionFieldList(
ctx: PartitionFieldListContext): (Seq[Transform], Seq[StructField]) = withOrigin(ctx) {
val (transforms, columns) = ctx.fields.asScala.map {
case transform: PartitionTransformContext =>
(Some(visitPartitionTransform(transform)), None)
case field: PartitionColumnContext =>
(None, Some(visitColType(field.colType)))
}.unzip
(transforms.flatten.toSeq, columns.flatten.toSeq)
}
override def visitPartitionTransform(
ctx: PartitionTransformContext): Transform = withOrigin(ctx) {
def getFieldReference(
ctx: ApplyTransformContext,
arg: V2Expression): FieldReference = {
lazy val name: String = ctx.identifier.getText
arg match {
case ref: FieldReference =>
ref
case nonRef =>
throw QueryParsingErrors.partitionTransformNotExpectedError(name, nonRef.describe, ctx)
}
}
def getSingleFieldReference(
ctx: ApplyTransformContext,
arguments: Seq[V2Expression]): FieldReference = {
lazy val name: String = ctx.identifier.getText
if (arguments.size > 1) {
throw QueryParsingErrors.tooManyArgumentsForTransformError(name, ctx)
} else if (arguments.isEmpty) {
throw QueryParsingErrors.notEnoughArgumentsForTransformError(name, ctx)
} else {
getFieldReference(ctx, arguments.head)
}
}
ctx.transform match {
case identityCtx: IdentityTransformContext =>
IdentityTransform(FieldReference(typedVisit[Seq[String]](identityCtx.qualifiedName)))
case applyCtx: ApplyTransformContext =>
val arguments = applyCtx.argument.asScala.map(visitTransformArgument).toSeq
applyCtx.identifier.getText match {
case "bucket" =>
val numBuckets: Int = arguments.head match {
case LiteralValue(shortValue, ShortType) =>
shortValue.asInstanceOf[Short].toInt
case LiteralValue(intValue, IntegerType) =>
intValue.asInstanceOf[Int]
case LiteralValue(longValue, LongType) =>
longValue.asInstanceOf[Long].toInt
case lit =>
throw QueryParsingErrors.invalidBucketsNumberError(lit.describe, applyCtx)
}
val fields = arguments.tail.map(arg => getFieldReference(applyCtx, arg))
BucketTransform(LiteralValue(numBuckets, IntegerType), fields)
case "years" =>
YearsTransform(getSingleFieldReference(applyCtx, arguments))
case "months" =>
MonthsTransform(getSingleFieldReference(applyCtx, arguments))
case "days" =>
DaysTransform(getSingleFieldReference(applyCtx, arguments))
case "hours" =>
HoursTransform(getSingleFieldReference(applyCtx, arguments))
case name =>
ApplyTransform(name, arguments)
}
}
}
/**
* Parse an argument to a transform. An argument may be a field reference (qualified name) or
* a value literal.
*/
override def visitTransformArgument(ctx: TransformArgumentContext): V2Expression = {
withOrigin(ctx) {
val reference = Option(ctx.qualifiedName)
.map(typedVisit[Seq[String]])
.map(FieldReference(_))
val literal = Option(ctx.constant)
.map(typedVisit[Literal])
.map(lit => LiteralValue(lit.value, lit.dataType))
reference.orElse(literal)
.getOrElse(throw QueryParsingErrors.invalidTransformArgumentError(ctx))
}
}
private def cleanNamespaceProperties(
properties: Map[String, String],
ctx: ParserRuleContext): Map[String, String] = withOrigin(ctx) {
import SupportsNamespaces._
val legacyOn = conf.getConf(SQLConf.LEGACY_PROPERTY_NON_RESERVED)
properties.filter {
case (PROP_LOCATION, _) if !legacyOn =>
throw QueryParsingErrors.cannotCleanReservedNamespacePropertyError(
PROP_LOCATION, ctx, "please use the LOCATION clause to specify it")
case (PROP_LOCATION, _) => false
case (PROP_OWNER, _) if !legacyOn =>
throw QueryParsingErrors.cannotCleanReservedNamespacePropertyError(
PROP_OWNER, ctx, "it will be set to the current user")
case (PROP_OWNER, _) => false
case _ => true
}
}
/**
* Create a [[CreateNamespaceStatement]] command.
*
* For example:
* {{{
* CREATE NAMESPACE [IF NOT EXISTS] ns1.ns2.ns3
* create_namespace_clauses;
*
* create_namespace_clauses (order insensitive):
* [COMMENT namespace_comment]
* [LOCATION path]
* [WITH PROPERTIES (key1=val1, key2=val2, ...)]
* }}}
*/
override def visitCreateNamespace(ctx: CreateNamespaceContext): LogicalPlan = withOrigin(ctx) {
import SupportsNamespaces._
checkDuplicateClauses(ctx.commentSpec(), "COMMENT", ctx)
checkDuplicateClauses(ctx.locationSpec, "LOCATION", ctx)
checkDuplicateClauses(ctx.PROPERTIES, "WITH PROPERTIES", ctx)
checkDuplicateClauses(ctx.DBPROPERTIES, "WITH DBPROPERTIES", ctx)
if (!ctx.PROPERTIES.isEmpty && !ctx.DBPROPERTIES.isEmpty) {
throw QueryParsingErrors.propertiesAndDbPropertiesBothSpecifiedError(ctx)
}
var properties = ctx.tablePropertyList.asScala.headOption
.map(visitPropertyKeyValues)
.getOrElse(Map.empty)
properties = cleanNamespaceProperties(properties, ctx)
visitCommentSpecList(ctx.commentSpec()).foreach {
properties += PROP_COMMENT -> _
}
visitLocationSpecList(ctx.locationSpec()).foreach {
properties += PROP_LOCATION -> _
}
CreateNamespaceStatement(
visitMultipartIdentifier(ctx.multipartIdentifier),
ctx.EXISTS != null,
properties)
}
/**
* Create a [[DropNamespace]] command.
*
* For example:
* {{{
* DROP (DATABASE|SCHEMA|NAMESPACE) [IF EXISTS] ns1.ns2 [RESTRICT|CASCADE];
* }}}
*/
override def visitDropNamespace(ctx: DropNamespaceContext): LogicalPlan = withOrigin(ctx) {
DropNamespace(
UnresolvedNamespace(visitMultipartIdentifier(ctx.multipartIdentifier)),
ctx.EXISTS != null,
ctx.CASCADE != null)
}
/**
* Create an [[AlterNamespaceSetProperties]] logical plan.
*
* For example:
* {{{
* ALTER (DATABASE|SCHEMA|NAMESPACE) database
* SET (DBPROPERTIES|PROPERTIES) (property_name=property_value, ...);
* }}}
*/
override def visitSetNamespaceProperties(ctx: SetNamespacePropertiesContext): LogicalPlan = {
withOrigin(ctx) {
val properties = cleanNamespaceProperties(visitPropertyKeyValues(ctx.tablePropertyList), ctx)
AlterNamespaceSetProperties(
UnresolvedNamespace(visitMultipartIdentifier(ctx.multipartIdentifier)),
properties)
}
}
/**
* Create an [[AlterNamespaceSetLocation]] logical plan.
*
* For example:
* {{{
* ALTER (DATABASE|SCHEMA|NAMESPACE) namespace SET LOCATION path;
* }}}
*/
override def visitSetNamespaceLocation(ctx: SetNamespaceLocationContext): LogicalPlan = {
withOrigin(ctx) {
AlterNamespaceSetLocation(
UnresolvedNamespace(visitMultipartIdentifier(ctx.multipartIdentifier)),
visitLocationSpec(ctx.locationSpec))
}
}
/**
* Create a [[ShowNamespaces]] command.
*/
override def visitShowNamespaces(ctx: ShowNamespacesContext): LogicalPlan = withOrigin(ctx) {
if (ctx.DATABASES != null && ctx.multipartIdentifier != null) {
throw QueryParsingErrors.fromOrInNotAllowedInShowDatabasesError(ctx)
}
val multiPart = Option(ctx.multipartIdentifier).map(visitMultipartIdentifier)
ShowNamespaces(
UnresolvedNamespace(multiPart.getOrElse(Seq.empty[String])),
Option(ctx.pattern).map(string))
}
/**
* Create a [[DescribeNamespace]].
*
* For example:
* {{{
* DESCRIBE (DATABASE|SCHEMA|NAMESPACE) [EXTENDED] database;
* }}}
*/
override def visitDescribeNamespace(ctx: DescribeNamespaceContext): LogicalPlan =
withOrigin(ctx) {
DescribeNamespace(
UnresolvedNamespace(visitMultipartIdentifier(ctx.multipartIdentifier())),
ctx.EXTENDED != null)
}
def cleanTableProperties(
ctx: ParserRuleContext, properties: Map[String, String]): Map[String, String] = {
import TableCatalog._
val legacyOn = conf.getConf(SQLConf.LEGACY_PROPERTY_NON_RESERVED)
properties.filter {
case (PROP_PROVIDER, _) if !legacyOn =>
throw QueryParsingErrors.cannotCleanReservedTablePropertyError(
PROP_PROVIDER, ctx, "please use the USING clause to specify it")
case (PROP_PROVIDER, _) => false
case (PROP_LOCATION, _) if !legacyOn =>
throw QueryParsingErrors.cannotCleanReservedTablePropertyError(
PROP_LOCATION, ctx, "please use the LOCATION clause to specify it")
case (PROP_LOCATION, _) => false
case (PROP_OWNER, _) if !legacyOn =>
throw QueryParsingErrors.cannotCleanReservedTablePropertyError(
PROP_OWNER, ctx, "it will be set to the current user")
case (PROP_OWNER, _) => false
case _ => true
}
}
def cleanTableOptions(
ctx: ParserRuleContext,
options: Map[String, String],
location: Option[String]): (Map[String, String], Option[String]) = {
var path = location
val filtered = cleanTableProperties(ctx, options).filter {
case (k, v) if k.equalsIgnoreCase("path") && path.nonEmpty =>
throw QueryParsingErrors.duplicatedTablePathsFoundError(path.get, v, ctx)
case (k, v) if k.equalsIgnoreCase("path") =>
path = Some(v)
false
case _ => true
}
(filtered, path)
}
/**
* Create a [[SerdeInfo]] for creating tables.
*
* Format: STORED AS (name | INPUTFORMAT input_format OUTPUTFORMAT output_format)
*/
override def visitCreateFileFormat(ctx: CreateFileFormatContext): SerdeInfo = withOrigin(ctx) {
(ctx.fileFormat, ctx.storageHandler) match {
// Expected format: INPUTFORMAT input_format OUTPUTFORMAT output_format
case (c: TableFileFormatContext, null) =>
SerdeInfo(formatClasses = Some(FormatClasses(string(c.inFmt), string(c.outFmt))))
// Expected format: SEQUENCEFILE | TEXTFILE | RCFILE | ORC | PARQUET | AVRO
case (c: GenericFileFormatContext, null) =>
SerdeInfo(storedAs = Some(c.identifier.getText))
case (null, storageHandler) =>
operationNotAllowed("STORED BY", ctx)
case _ =>
throw QueryParsingErrors.storedAsAndStoredByBothSpecifiedError(ctx)
}
}
/**
* Create a [[SerdeInfo]] used for creating tables.
*
* Example format:
* {{{
* SERDE serde_name [WITH SERDEPROPERTIES (k1=v1, k2=v2, ...)]
* }}}
*
* OR
*
* {{{
* DELIMITED [FIELDS TERMINATED BY char [ESCAPED BY char]]
* [COLLECTION ITEMS TERMINATED BY char]
* [MAP KEYS TERMINATED BY char]
* [LINES TERMINATED BY char]
* [NULL DEFINED AS char]
* }}}
*/
def visitRowFormat(ctx: RowFormatContext): SerdeInfo = withOrigin(ctx) {
ctx match {
case serde: RowFormatSerdeContext => visitRowFormatSerde(serde)
case delimited: RowFormatDelimitedContext => visitRowFormatDelimited(delimited)
}
}
/**
* Create SERDE row format name and properties pair.
*/
override def visitRowFormatSerde(ctx: RowFormatSerdeContext): SerdeInfo = withOrigin(ctx) {
import ctx._
SerdeInfo(
serde = Some(string(name)),
serdeProperties = Option(tablePropertyList).map(visitPropertyKeyValues).getOrElse(Map.empty))
}
/**
* Create a delimited row format properties object.
*/
override def visitRowFormatDelimited(
ctx: RowFormatDelimitedContext): SerdeInfo = withOrigin(ctx) {
// Collect the entries if any.
def entry(key: String, value: Token): Seq[(String, String)] = {
Option(value).toSeq.map(x => key -> string(x))
}
// TODO we need proper support for the NULL format.
val entries =
entry("field.delim", ctx.fieldsTerminatedBy) ++
entry("serialization.format", ctx.fieldsTerminatedBy) ++
entry("escape.delim", ctx.escapedBy) ++
// The following typo is inherited from Hive...
entry("colelction.delim", ctx.collectionItemsTerminatedBy) ++
entry("mapkey.delim", ctx.keysTerminatedBy) ++
Option(ctx.linesSeparatedBy).toSeq.map { token =>
val value = string(token)
validate(
value == "\\n",
s"LINES TERMINATED BY only supports newline '\\\\n' right now: $value",
ctx)
"line.delim" -> value
}
SerdeInfo(serdeProperties = entries.toMap)
}
/**
* Throw a [[ParseException]] if the user specified incompatible SerDes through ROW FORMAT
* and STORED AS.
*
* The following are allowed. Anything else is not:
* ROW FORMAT SERDE ... STORED AS [SEQUENCEFILE | RCFILE | TEXTFILE]
* ROW FORMAT DELIMITED ... STORED AS TEXTFILE
* ROW FORMAT ... STORED AS INPUTFORMAT ... OUTPUTFORMAT ...
*/
protected def validateRowFormatFileFormat(
rowFormatCtx: RowFormatContext,
createFileFormatCtx: CreateFileFormatContext,
parentCtx: ParserRuleContext): Unit = {
if (rowFormatCtx == null || createFileFormatCtx == null) {
return
}
(rowFormatCtx, createFileFormatCtx.fileFormat) match {
case (_, ffTable: TableFileFormatContext) => // OK
case (rfSerde: RowFormatSerdeContext, ffGeneric: GenericFileFormatContext) =>
ffGeneric.identifier.getText.toLowerCase(Locale.ROOT) match {
case ("sequencefile" | "textfile" | "rcfile") => // OK
case fmt =>
operationNotAllowed(
s"ROW FORMAT SERDE is incompatible with format '$fmt', which also specifies a serde",
parentCtx)
}
case (rfDelimited: RowFormatDelimitedContext, ffGeneric: GenericFileFormatContext) =>
ffGeneric.identifier.getText.toLowerCase(Locale.ROOT) match {
case "textfile" => // OK
case fmt => operationNotAllowed(
s"ROW FORMAT DELIMITED is only compatible with 'textfile', not '$fmt'", parentCtx)
}
case _ =>
// should never happen
def str(ctx: ParserRuleContext): String = {
(0 until ctx.getChildCount).map { i => ctx.getChild(i).getText }.mkString(" ")
}
operationNotAllowed(
s"Unexpected combination of ${str(rowFormatCtx)} and ${str(createFileFormatCtx)}",
parentCtx)
}
}
protected def validateRowFormatFileFormat(
rowFormatCtx: Seq[RowFormatContext],
createFileFormatCtx: Seq[CreateFileFormatContext],
parentCtx: ParserRuleContext): Unit = {
if (rowFormatCtx.size == 1 && createFileFormatCtx.size == 1) {
validateRowFormatFileFormat(rowFormatCtx.head, createFileFormatCtx.head, parentCtx)
}
}
override def visitCreateTableClauses(ctx: CreateTableClausesContext): TableClauses = {
checkDuplicateClauses(ctx.TBLPROPERTIES, "TBLPROPERTIES", ctx)
checkDuplicateClauses(ctx.OPTIONS, "OPTIONS", ctx)
checkDuplicateClauses(ctx.PARTITIONED, "PARTITIONED BY", ctx)
checkDuplicateClauses(ctx.createFileFormat, "STORED AS/BY", ctx)
checkDuplicateClauses(ctx.rowFormat, "ROW FORMAT", ctx)
checkDuplicateClauses(ctx.commentSpec(), "COMMENT", ctx)
checkDuplicateClauses(ctx.bucketSpec(), "CLUSTERED BY", ctx)
checkDuplicateClauses(ctx.locationSpec, "LOCATION", ctx)
if (ctx.skewSpec.size > 0) {
operationNotAllowed("CREATE TABLE ... SKEWED BY", ctx)
}
val (partTransforms, partCols) =
Option(ctx.partitioning).map(visitPartitionFieldList).getOrElse((Nil, Nil))
val bucketSpec = ctx.bucketSpec().asScala.headOption.map(visitBucketSpec)
val properties = Option(ctx.tableProps).map(visitPropertyKeyValues).getOrElse(Map.empty)
val cleanedProperties = cleanTableProperties(ctx, properties)
val options = Option(ctx.options).map(visitPropertyKeyValues).getOrElse(Map.empty)
val location = visitLocationSpecList(ctx.locationSpec())
val (cleanedOptions, newLocation) = cleanTableOptions(ctx, options, location)
val comment = visitCommentSpecList(ctx.commentSpec())
val serdeInfo =
getSerdeInfo(ctx.rowFormat.asScala.toSeq, ctx.createFileFormat.asScala.toSeq, ctx)
(partTransforms, partCols, bucketSpec, cleanedProperties, cleanedOptions, newLocation, comment,
serdeInfo)
}
protected def getSerdeInfo(
rowFormatCtx: Seq[RowFormatContext],
createFileFormatCtx: Seq[CreateFileFormatContext],
ctx: ParserRuleContext): Option[SerdeInfo] = {
validateRowFormatFileFormat(rowFormatCtx, createFileFormatCtx, ctx)
val rowFormatSerdeInfo = rowFormatCtx.map(visitRowFormat)
val fileFormatSerdeInfo = createFileFormatCtx.map(visitCreateFileFormat)
(fileFormatSerdeInfo ++ rowFormatSerdeInfo).reduceLeftOption((l, r) => l.merge(r))
}
private def partitionExpressions(
partTransforms: Seq[Transform],
partCols: Seq[StructField],
ctx: ParserRuleContext): Seq[Transform] = {
if (partTransforms.nonEmpty) {
if (partCols.nonEmpty) {
val references = partTransforms.map(_.describe()).mkString(", ")
val columns = partCols
.map(field => s"${field.name} ${field.dataType.simpleString}")
.mkString(", ")
operationNotAllowed(
s"""PARTITION BY: Cannot mix partition expressions and partition columns:
|Expressions: $references
|Columns: $columns""".stripMargin, ctx)
}
partTransforms
} else {
// columns were added to create the schema. convert to column references
partCols.map { column =>
IdentityTransform(FieldReference(Seq(column.name)))
}
}
}
/**
* Create a table, returning a [[CreateTableStatement]] logical plan.
*
* Expected format:
* {{{
* CREATE [TEMPORARY] TABLE [IF NOT EXISTS] [db_name.]table_name
* [USING table_provider]
* create_table_clauses
* [[AS] select_statement];
*
* create_table_clauses (order insensitive):
* [PARTITIONED BY (partition_fields)]
* [OPTIONS table_property_list]
* [ROW FORMAT row_format]
* [STORED AS file_format]
* [CLUSTERED BY (col_name, col_name, ...)
* [SORTED BY (col_name [ASC|DESC], ...)]
* INTO num_buckets BUCKETS
* ]
* [LOCATION path]
* [COMMENT table_comment]
* [TBLPROPERTIES (property_name=property_value, ...)]
*
* partition_fields:
* col_name, transform(col_name), transform(constant, col_name), ... |
* col_name data_type [NOT NULL] [COMMENT col_comment], ...
* }}}
*/
override def visitCreateTable(ctx: CreateTableContext): LogicalPlan = withOrigin(ctx) {
val (table, temp, ifNotExists, external) = visitCreateTableHeader(ctx.createTableHeader)
val columns = Option(ctx.colTypeList()).map(visitColTypeList).getOrElse(Nil)
val provider = Option(ctx.tableProvider).map(_.multipartIdentifier.getText)
val (partTransforms, partCols, bucketSpec, properties, options, location, comment, serdeInfo) =
visitCreateTableClauses(ctx.createTableClauses())
if (provider.isDefined && serdeInfo.isDefined) {
operationNotAllowed(s"CREATE TABLE ... USING ... ${serdeInfo.get.describe}", ctx)
}
if (temp) {
val asSelect = if (ctx.query == null) "" else " AS ..."
operationNotAllowed(
s"CREATE TEMPORARY TABLE ...$asSelect, use CREATE TEMPORARY VIEW instead", ctx)
}
val partitioning = partitionExpressions(partTransforms, partCols, ctx)
Option(ctx.query).map(plan) match {
case Some(_) if columns.nonEmpty =>
operationNotAllowed(
"Schema may not be specified in a Create Table As Select (CTAS) statement",
ctx)
case Some(_) if partCols.nonEmpty =>
// non-reference partition columns are not allowed because schema can't be specified
operationNotAllowed(
"Partition column types may not be specified in Create Table As Select (CTAS)",
ctx)
case Some(query) =>
CreateTableAsSelectStatement(
table, query, partitioning, bucketSpec, properties, provider, options, location, comment,
writeOptions = Map.empty, serdeInfo, external = external, ifNotExists = ifNotExists)
case _ =>
// Note: table schema includes both the table columns list and the partition columns
// with data type.
val schema = StructType(columns ++ partCols)
CreateTableStatement(table, schema, partitioning, bucketSpec, properties, provider,
options, location, comment, serdeInfo, external = external, ifNotExists = ifNotExists)
}
}
/**
* Replace a table, returning a [[ReplaceTableStatement]] logical plan.
*
* Expected format:
* {{{
* [CREATE OR] REPLACE TABLE [db_name.]table_name
* [USING table_provider]
* replace_table_clauses
* [[AS] select_statement];
*
* replace_table_clauses (order insensitive):
* [OPTIONS table_property_list]
* [PARTITIONED BY (partition_fields)]
* [CLUSTERED BY (col_name, col_name, ...)
* [SORTED BY (col_name [ASC|DESC], ...)]
* INTO num_buckets BUCKETS
* ]
* [LOCATION path]
* [COMMENT table_comment]
* [TBLPROPERTIES (property_name=property_value, ...)]
*
* partition_fields:
* col_name, transform(col_name), transform(constant, col_name), ... |
* col_name data_type [NOT NULL] [COMMENT col_comment], ...
* }}}
*/
override def visitReplaceTable(ctx: ReplaceTableContext): LogicalPlan = withOrigin(ctx) {
val (table, temp, ifNotExists, external) = visitReplaceTableHeader(ctx.replaceTableHeader)
val orCreate = ctx.replaceTableHeader().CREATE() != null
if (temp) {
val action = if (orCreate) "CREATE OR REPLACE" else "REPLACE"
operationNotAllowed(s"$action TEMPORARY TABLE ..., use $action TEMPORARY VIEW instead.", ctx)
}
if (external) {
operationNotAllowed("REPLACE EXTERNAL TABLE ...", ctx)
}
if (ifNotExists) {
operationNotAllowed("REPLACE ... IF NOT EXISTS, use CREATE IF NOT EXISTS instead", ctx)
}
val (partTransforms, partCols, bucketSpec, properties, options, location, comment, serdeInfo) =
visitCreateTableClauses(ctx.createTableClauses())
val columns = Option(ctx.colTypeList()).map(visitColTypeList).getOrElse(Nil)
val provider = Option(ctx.tableProvider).map(_.multipartIdentifier.getText)
if (provider.isDefined && serdeInfo.isDefined) {
operationNotAllowed(s"CREATE TABLE ... USING ... ${serdeInfo.get.describe}", ctx)
}
val partitioning = partitionExpressions(partTransforms, partCols, ctx)
Option(ctx.query).map(plan) match {
case Some(_) if columns.nonEmpty =>
operationNotAllowed(
"Schema may not be specified in a Replace Table As Select (RTAS) statement",
ctx)
case Some(_) if partCols.nonEmpty =>
// non-reference partition columns are not allowed because schema can't be specified
operationNotAllowed(
"Partition column types may not be specified in Replace Table As Select (RTAS)",
ctx)
case Some(query) =>
ReplaceTableAsSelectStatement(table, query, partitioning, bucketSpec, properties,
provider, options, location, comment, writeOptions = Map.empty, serdeInfo,
orCreate = orCreate)
case _ =>
// Note: table schema includes both the table columns list and the partition columns
// with data type.
val schema = StructType(columns ++ partCols)
ReplaceTableStatement(table, schema, partitioning, bucketSpec, properties, provider,
options, location, comment, serdeInfo, orCreate = orCreate)
}
}
/**
* Create a [[DropTable]] command.
*/
override def visitDropTable(ctx: DropTableContext): LogicalPlan = withOrigin(ctx) {
// DROP TABLE works with either a table or a temporary view.
DropTable(
createUnresolvedTableOrView(ctx.multipartIdentifier(), "DROP TABLE"),
ctx.EXISTS != null,
ctx.PURGE != null)
}
/**
* Create a [[DropView]] command.
*/
override def visitDropView(ctx: DropViewContext): AnyRef = withOrigin(ctx) {
DropView(
createUnresolvedView(
ctx.multipartIdentifier(),
commandName = "DROP VIEW",
allowTemp = true,
relationTypeMismatchHint = Some("Please use DROP TABLE instead.")),
ctx.EXISTS != null)
}
/**
* Create a [[UseStatement]] logical plan.
*/
override def visitUse(ctx: UseContext): LogicalPlan = withOrigin(ctx) {
val nameParts = visitMultipartIdentifier(ctx.multipartIdentifier)
UseStatement(ctx.NAMESPACE != null, nameParts)
}
/**
* Create a [[ShowCurrentNamespaceStatement]].
*/
override def visitShowCurrentNamespace(
ctx: ShowCurrentNamespaceContext) : LogicalPlan = withOrigin(ctx) {
ShowCurrentNamespaceStatement()
}
/**
* Create a [[ShowTables]] command.
*/
override def visitShowTables(ctx: ShowTablesContext): LogicalPlan = withOrigin(ctx) {
val multiPart = Option(ctx.multipartIdentifier).map(visitMultipartIdentifier)
ShowTables(
UnresolvedNamespace(multiPart.getOrElse(Seq.empty[String])),
Option(ctx.pattern).map(string))
}
/**
* Create a [[ShowTableExtended]] command.
*/
override def visitShowTableExtended(
ctx: ShowTableExtendedContext): LogicalPlan = withOrigin(ctx) {
val multiPart = Option(ctx.multipartIdentifier).map(visitMultipartIdentifier)
val partitionKeys = Option(ctx.partitionSpec).map { specCtx =>
UnresolvedPartitionSpec(visitNonOptionalPartitionSpec(specCtx), None)
}
ShowTableExtended(
UnresolvedNamespace(multiPart.getOrElse(Seq.empty[String])),
string(ctx.pattern),
partitionKeys)
}
/**
* Create a [[ShowViews]] command.
*/
override def visitShowViews(ctx: ShowViewsContext): LogicalPlan = withOrigin(ctx) {
val multiPart = Option(ctx.multipartIdentifier).map(visitMultipartIdentifier)
ShowViews(
UnresolvedNamespace(multiPart.getOrElse(Seq.empty[String])),
Option(ctx.pattern).map(string))
}
override def visitColPosition(ctx: ColPositionContext): ColumnPosition = {
ctx.position.getType match {
case SqlBaseParser.FIRST => ColumnPosition.first()
case SqlBaseParser.AFTER => ColumnPosition.after(ctx.afterCol.getText)
}
}
/**
* Parse new column info from ADD COLUMN into a QualifiedColType.
*/
override def visitQualifiedColTypeWithPosition(
ctx: QualifiedColTypeWithPositionContext): QualifiedColType = withOrigin(ctx) {
QualifiedColType(
name = typedVisit[Seq[String]](ctx.name),
dataType = typedVisit[DataType](ctx.dataType),
nullable = ctx.NULL == null,
comment = Option(ctx.commentSpec()).map(visitCommentSpec),
position = Option(ctx.colPosition).map(typedVisit[ColumnPosition]))
}
/**
* Parse a [[AlterTableAddColumnsStatement]] command.
*
* For example:
* {{{
* ALTER TABLE table1
* ADD COLUMNS (col_name data_type [COMMENT col_comment], ...);
* }}}
*/
override def visitAddTableColumns(ctx: AddTableColumnsContext): LogicalPlan = withOrigin(ctx) {
AlterTableAddColumnsStatement(
visitMultipartIdentifier(ctx.multipartIdentifier),
ctx.columns.qualifiedColTypeWithPosition.asScala.map(typedVisit[QualifiedColType]).toSeq
)
}
/**
* Parse a [[AlterTableRenameColumnStatement]] command.
*
* For example:
* {{{
* ALTER TABLE table1 RENAME COLUMN a.b.c TO x
* }}}
*/
override def visitRenameTableColumn(
ctx: RenameTableColumnContext): LogicalPlan = withOrigin(ctx) {
AlterTableRenameColumnStatement(
visitMultipartIdentifier(ctx.table),
ctx.from.parts.asScala.map(_.getText).toSeq,
ctx.to.getText)
}
/**
* Parse a [[AlterTableAlterColumnStatement]] command to alter a column's property.
*
* For example:
* {{{
* ALTER TABLE table1 ALTER COLUMN a.b.c TYPE bigint
* ALTER TABLE table1 ALTER COLUMN a.b.c SET NOT NULL
* ALTER TABLE table1 ALTER COLUMN a.b.c DROP NOT NULL
* ALTER TABLE table1 ALTER COLUMN a.b.c COMMENT 'new comment'
* ALTER TABLE table1 ALTER COLUMN a.b.c FIRST
* ALTER TABLE table1 ALTER COLUMN a.b.c AFTER x
* }}}
*/
override def visitAlterTableAlterColumn(
ctx: AlterTableAlterColumnContext): LogicalPlan = withOrigin(ctx) {
val action = ctx.alterColumnAction
if (action == null) {
val verb = if (ctx.CHANGE != null) "CHANGE" else "ALTER"
operationNotAllowed(
s"ALTER TABLE table $verb COLUMN requires a TYPE, a SET/DROP, a COMMENT, or a FIRST/AFTER",
ctx)
}
val dataType = if (action.dataType != null) {
Some(typedVisit[DataType](action.dataType))
} else {
None
}
val nullable = if (action.setOrDrop != null) {
action.setOrDrop.getType match {
case SqlBaseParser.SET => Some(false)
case SqlBaseParser.DROP => Some(true)
}
} else {
None
}
val comment = if (action.commentSpec != null) {
Some(visitCommentSpec(action.commentSpec()))
} else {
None
}
val position = if (action.colPosition != null) {
Some(typedVisit[ColumnPosition](action.colPosition))
} else {
None
}
assert(Seq(dataType, nullable, comment, position).count(_.nonEmpty) == 1)
AlterTableAlterColumnStatement(
visitMultipartIdentifier(ctx.table),
typedVisit[Seq[String]](ctx.column),
dataType = dataType,
nullable = nullable,
comment = comment,
position = position)
}
/**
* Parse a [[AlterTableAlterColumnStatement]] command. This is Hive SQL syntax.
*
* For example:
* {{{
* ALTER TABLE table [PARTITION partition_spec]
* CHANGE [COLUMN] column_old_name column_new_name column_dataType [COMMENT column_comment]
* [FIRST | AFTER column_name];
* }}}
*/
override def visitHiveChangeColumn(ctx: HiveChangeColumnContext): LogicalPlan = withOrigin(ctx) {
if (ctx.partitionSpec != null) {
operationNotAllowed("ALTER TABLE table PARTITION partition_spec CHANGE COLUMN", ctx)
}
val columnNameParts = typedVisit[Seq[String]](ctx.colName)
if (!conf.resolver(columnNameParts.last, ctx.colType().colName.getText)) {
throw QueryParsingErrors.operationInHiveStyleCommandUnsupportedError("Renaming column",
"ALTER COLUMN", ctx, Some("please run RENAME COLUMN instead"))
}
if (ctx.colType.NULL != null) {
throw QueryParsingErrors.operationInHiveStyleCommandUnsupportedError(
"NOT NULL", "ALTER COLUMN", ctx,
Some("please run ALTER COLUMN ... SET/DROP NOT NULL instead"))
}
AlterTableAlterColumnStatement(
typedVisit[Seq[String]](ctx.table),
columnNameParts,
dataType = Option(ctx.colType().dataType()).map(typedVisit[DataType]),
nullable = None,
comment = Option(ctx.colType().commentSpec()).map(visitCommentSpec),
position = Option(ctx.colPosition).map(typedVisit[ColumnPosition]))
}
override def visitHiveReplaceColumns(
ctx: HiveReplaceColumnsContext): LogicalPlan = withOrigin(ctx) {
if (ctx.partitionSpec != null) {
operationNotAllowed("ALTER TABLE table PARTITION partition_spec REPLACE COLUMNS", ctx)
}
AlterTableReplaceColumnsStatement(
visitMultipartIdentifier(ctx.multipartIdentifier),
ctx.columns.qualifiedColTypeWithPosition.asScala.map { colType =>
if (colType.NULL != null) {
throw QueryParsingErrors.operationInHiveStyleCommandUnsupportedError(
"NOT NULL", "REPLACE COLUMNS", ctx)
}
if (colType.colPosition != null) {
throw QueryParsingErrors.operationInHiveStyleCommandUnsupportedError(
"Column position", "REPLACE COLUMNS", ctx)
}
typedVisit[QualifiedColType](colType)
}.toSeq
)
}
/**
* Parse a [[AlterTableDropColumnsStatement]] command.
*
* For example:
* {{{
* ALTER TABLE table1 DROP COLUMN a.b.c
* ALTER TABLE table1 DROP COLUMNS a.b.c, x, y
* }}}
*/
override def visitDropTableColumns(
ctx: DropTableColumnsContext): LogicalPlan = withOrigin(ctx) {
val columnsToDrop = ctx.columns.multipartIdentifier.asScala.map(typedVisit[Seq[String]])
AlterTableDropColumnsStatement(
visitMultipartIdentifier(ctx.multipartIdentifier),
columnsToDrop.toSeq)
}
/**
* Parse [[AlterViewSetProperties]] or [[AlterTableSetProperties]] commands.
*
* For example:
* {{{
* ALTER TABLE table SET TBLPROPERTIES ('table_property' = 'property_value');
* ALTER VIEW view SET TBLPROPERTIES ('table_property' = 'property_value');
* }}}
*/
override def visitSetTableProperties(
ctx: SetTablePropertiesContext): LogicalPlan = withOrigin(ctx) {
val properties = visitPropertyKeyValues(ctx.tablePropertyList)
val cleanedTableProperties = cleanTableProperties(ctx, properties)
if (ctx.VIEW != null) {
AlterViewSetProperties(
createUnresolvedView(
ctx.multipartIdentifier,
commandName = "ALTER VIEW ... SET TBLPROPERTIES",
allowTemp = false,
relationTypeMismatchHint = alterViewTypeMismatchHint),
cleanedTableProperties)
} else {
AlterTableSetProperties(
createUnresolvedTable(
ctx.multipartIdentifier,
"ALTER TABLE ... SET TBLPROPERTIES",
alterTableTypeMismatchHint),
cleanedTableProperties)
}
}
/**
* Parse [[AlterViewUnsetProperties]] or [[AlterTableUnsetProperties]] commands.
*
* For example:
* {{{
* ALTER TABLE table UNSET TBLPROPERTIES [IF EXISTS] ('comment', 'key');
* ALTER VIEW view UNSET TBLPROPERTIES [IF EXISTS] ('comment', 'key');
* }}}
*/
override def visitUnsetTableProperties(
ctx: UnsetTablePropertiesContext): LogicalPlan = withOrigin(ctx) {
val properties = visitPropertyKeys(ctx.tablePropertyList)
val cleanedProperties = cleanTableProperties(ctx, properties.map(_ -> "").toMap).keys.toSeq
val ifExists = ctx.EXISTS != null
if (ctx.VIEW != null) {
AlterViewUnsetProperties(
createUnresolvedView(
ctx.multipartIdentifier,
commandName = "ALTER VIEW ... UNSET TBLPROPERTIES",
allowTemp = false,
relationTypeMismatchHint = alterViewTypeMismatchHint),
cleanedProperties,
ifExists)
} else {
AlterTableUnsetProperties(
createUnresolvedTable(
ctx.multipartIdentifier,
"ALTER TABLE ... UNSET TBLPROPERTIES",
alterTableTypeMismatchHint),
cleanedProperties,
ifExists)
}
}
/**
* Create an [[AlterTableSetLocation]] command.
*
* For example:
* {{{
* ALTER TABLE table_name [PARTITION partition_spec] SET LOCATION "loc";
* }}}
*/
override def visitSetTableLocation(ctx: SetTableLocationContext): LogicalPlan = withOrigin(ctx) {
AlterTableSetLocation(
createUnresolvedTable(
ctx.multipartIdentifier,
"ALTER TABLE ... SET LOCATION ...",
alterTableTypeMismatchHint),
Option(ctx.partitionSpec).map(visitNonOptionalPartitionSpec),
visitLocationSpec(ctx.locationSpec))
}
/**
* Create a [[DescribeColumn]] or [[DescribeRelation]] commands.
*/
override def visitDescribeRelation(ctx: DescribeRelationContext): LogicalPlan = withOrigin(ctx) {
val isExtended = ctx.EXTENDED != null || ctx.FORMATTED != null
val relation = createUnresolvedTableOrView(
ctx.multipartIdentifier(),
"DESCRIBE TABLE")
if (ctx.describeColName != null) {
if (ctx.partitionSpec != null) {
throw QueryParsingErrors.descColumnForPartitionUnsupportedError(ctx)
} else {
DescribeColumn(
relation,
UnresolvedAttribute(ctx.describeColName.nameParts.asScala.map(_.getText).toSeq),
isExtended)
}
} else {
val partitionSpec = if (ctx.partitionSpec != null) {
// According to the syntax, visitPartitionSpec returns `Map[String, Option[String]]`.
visitPartitionSpec(ctx.partitionSpec).map {
case (key, Some(value)) => key -> value
case (key, _) =>
throw QueryParsingErrors.incompletePartitionSpecificationError(key, ctx)
}
} else {
Map.empty[String, String]
}
DescribeRelation(relation, partitionSpec, isExtended)
}
}
/**
* Create an [[AnalyzeTable]], or an [[AnalyzeColumn]].
* Example SQL for analyzing a table or a set of partitions :
* {{{
* ANALYZE TABLE multi_part_name [PARTITION (partcol1[=val1], partcol2[=val2], ...)]
* COMPUTE STATISTICS [NOSCAN];
* }}}
*
* Example SQL for analyzing columns :
* {{{
* ANALYZE TABLE multi_part_name COMPUTE STATISTICS FOR COLUMNS column1, column2;
* }}}
*
* Example SQL for analyzing all columns of a table:
* {{{
* ANALYZE TABLE multi_part_name COMPUTE STATISTICS FOR ALL COLUMNS;
* }}}
*/
override def visitAnalyze(ctx: AnalyzeContext): LogicalPlan = withOrigin(ctx) {
def checkPartitionSpec(): Unit = {
if (ctx.partitionSpec != null) {
logWarning("Partition specification is ignored when collecting column statistics: " +
ctx.partitionSpec.getText)
}
}
if (ctx.identifier != null &&
ctx.identifier.getText.toLowerCase(Locale.ROOT) != "noscan") {
throw QueryParsingErrors.computeStatisticsNotExpectedError(ctx.identifier())
}
if (ctx.ALL() != null) {
checkPartitionSpec()
AnalyzeColumn(
createUnresolvedTableOrView(
ctx.multipartIdentifier(),
"ANALYZE TABLE ... FOR ALL COLUMNS"),
None,
allColumns = true)
} else if (ctx.identifierSeq() == null) {
val partitionSpec = if (ctx.partitionSpec != null) {
visitPartitionSpec(ctx.partitionSpec)
} else {
Map.empty[String, Option[String]]
}
AnalyzeTable(
createUnresolvedTableOrView(
ctx.multipartIdentifier(),
"ANALYZE TABLE",
allowTempView = false),
partitionSpec,
noScan = ctx.identifier != null)
} else {
checkPartitionSpec()
AnalyzeColumn(
createUnresolvedTableOrView(
ctx.multipartIdentifier(),
"ANALYZE TABLE ... FOR COLUMNS ..."),
Option(visitIdentifierSeq(ctx.identifierSeq())),
allColumns = false)
}
}
/**
* Create a [[RepairTable]].
*
* For example:
* {{{
* MSCK REPAIR TABLE multi_part_name
* }}}
*/
override def visitRepairTable(ctx: RepairTableContext): LogicalPlan = withOrigin(ctx) {
RepairTable(createUnresolvedTable(ctx.multipartIdentifier, "MSCK REPAIR TABLE"))
}
/**
* Create a [[LoadData]].
*
* For example:
* {{{
* LOAD DATA [LOCAL] INPATH 'filepath' [OVERWRITE] INTO TABLE multi_part_name
* [PARTITION (partcol1=val1, partcol2=val2 ...)]
* }}}
*/
override def visitLoadData(ctx: LoadDataContext): LogicalPlan = withOrigin(ctx) {
LoadData(
child = createUnresolvedTable(ctx.multipartIdentifier, "LOAD DATA"),
path = string(ctx.path),
isLocal = ctx.LOCAL != null,
isOverwrite = ctx.OVERWRITE != null,
partition = Option(ctx.partitionSpec).map(visitNonOptionalPartitionSpec)
)
}
/**
* Creates a [[ShowCreateTable]]
*/
override def visitShowCreateTable(ctx: ShowCreateTableContext): LogicalPlan = withOrigin(ctx) {
ShowCreateTable(
createUnresolvedTableOrView(
ctx.multipartIdentifier(),
"SHOW CREATE TABLE",
allowTempView = false),
ctx.SERDE != null)
}
/**
* Create a [[CacheTable]] or [[CacheTableAsSelect]].
*
* For example:
* {{{
* CACHE [LAZY] TABLE multi_part_name
* [OPTIONS tablePropertyList] [[AS] query]
* }}}
*/
override def visitCacheTable(ctx: CacheTableContext): LogicalPlan = withOrigin(ctx) {
import org.apache.spark.sql.connector.catalog.CatalogV2Implicits._
val query = Option(ctx.query).map(plan)
val relation = createUnresolvedRelation(ctx.multipartIdentifier)
val tableName = relation.multipartIdentifier
if (query.isDefined && tableName.length > 1) {
val catalogAndNamespace = tableName.init
throw QueryParsingErrors.addCatalogInCacheTableAsSelectNotAllowedError(
catalogAndNamespace.quoted, ctx)
}
val options = Option(ctx.options).map(visitPropertyKeyValues).getOrElse(Map.empty)
val isLazy = ctx.LAZY != null
if (query.isDefined) {
CacheTableAsSelect(tableName.head, query.get, source(ctx.query()), isLazy, options)
} else {
CacheTable(relation, tableName, isLazy, options)
}
}
/**
* Create an [[UncacheTable]] logical plan.
*/
override def visitUncacheTable(ctx: UncacheTableContext): LogicalPlan = withOrigin(ctx) {
UncacheTable(
createUnresolvedRelation(ctx.multipartIdentifier),
ctx.EXISTS != null)
}
/**
* Create a [[TruncateTable]] command.
*
* For example:
* {{{
* TRUNCATE TABLE multi_part_name [PARTITION (partcol1=val1, partcol2=val2 ...)]
* }}}
*/
override def visitTruncateTable(ctx: TruncateTableContext): LogicalPlan = withOrigin(ctx) {
TruncateTable(
createUnresolvedTable(ctx.multipartIdentifier, "TRUNCATE TABLE"),
Option(ctx.partitionSpec).map(visitNonOptionalPartitionSpec))
}
/**
* A command for users to list the partition names of a table. If partition spec is specified,
* partitions that match the spec are returned. Otherwise an empty result set is returned.
*
* This function creates a [[ShowPartitionsStatement]] logical plan
*
* The syntax of using this command in SQL is:
* {{{
* SHOW PARTITIONS multi_part_name [partition_spec];
* }}}
*/
override def visitShowPartitions(ctx: ShowPartitionsContext): LogicalPlan = withOrigin(ctx) {
val partitionKeys = Option(ctx.partitionSpec).map { specCtx =>
UnresolvedPartitionSpec(visitNonOptionalPartitionSpec(specCtx), None)
}
ShowPartitions(
createUnresolvedTable(ctx.multipartIdentifier(), "SHOW PARTITIONS"),
partitionKeys)
}
/**
* Create a [[RefreshTable]].
*
* For example:
* {{{
* REFRESH TABLE multi_part_name
* }}}
*/
override def visitRefreshTable(ctx: RefreshTableContext): LogicalPlan = withOrigin(ctx) {
RefreshTable(
createUnresolvedTableOrView(
ctx.multipartIdentifier(),
"REFRESH TABLE"))
}
/**
* A command for users to list the column names for a table.
* This function creates a [[ShowColumns]] logical plan.
*
* The syntax of using this command in SQL is:
* {{{
* SHOW COLUMNS (FROM | IN) tableName=multipartIdentifier
* ((FROM | IN) namespace=multipartIdentifier)?
* }}}
*/
override def visitShowColumns(ctx: ShowColumnsContext): LogicalPlan = withOrigin(ctx) {
val table = createUnresolvedTableOrView(ctx.table, "SHOW COLUMNS")
val namespace = Option(ctx.ns).map(visitMultipartIdentifier)
// Use namespace only if table name doesn't specify it. If namespace is already specified
// in the table name, it's checked against the given namespace after table/view is resolved.
val tableWithNamespace = if (namespace.isDefined && table.multipartIdentifier.length == 1) {
CurrentOrigin.withOrigin(table.origin) {
table.copy(multipartIdentifier = namespace.get ++ table.multipartIdentifier)
}
} else {
table
}
ShowColumns(tableWithNamespace, namespace)
}
/**
* Create an [[AlterTableRecoverPartitions]]
*
* For example:
* {{{
* ALTER TABLE multi_part_name RECOVER PARTITIONS;
* }}}
*/
override def visitRecoverPartitions(
ctx: RecoverPartitionsContext): LogicalPlan = withOrigin(ctx) {
AlterTableRecoverPartitions(
createUnresolvedTable(
ctx.multipartIdentifier,
"ALTER TABLE ... RECOVER PARTITIONS",
alterTableTypeMismatchHint))
}
/**
* Create an [[AlterTableAddPartition]].
*
* For example:
* {{{
* ALTER TABLE multi_part_name ADD [IF NOT EXISTS] PARTITION spec [LOCATION 'loc1']
* ALTER VIEW multi_part_name ADD [IF NOT EXISTS] PARTITION spec
* }}}
*
* ALTER VIEW ... ADD PARTITION ... is not supported because the concept of partitioning
* is associated with physical tables
*/
override def visitAddTablePartition(
ctx: AddTablePartitionContext): LogicalPlan = withOrigin(ctx) {
if (ctx.VIEW != null) {
operationNotAllowed("ALTER VIEW ... ADD PARTITION", ctx)
}
// Create partition spec to location mapping.
val specsAndLocs = ctx.partitionSpecLocation.asScala.map { splCtx =>
val spec = visitNonOptionalPartitionSpec(splCtx.partitionSpec)
val location = Option(splCtx.locationSpec).map(visitLocationSpec)
UnresolvedPartitionSpec(spec, location)
}
AlterTableAddPartition(
createUnresolvedTable(
ctx.multipartIdentifier,
"ALTER TABLE ... ADD PARTITION ...",
alterTableTypeMismatchHint),
specsAndLocs.toSeq,
ctx.EXISTS != null)
}
/**
* Create an [[AlterTableRenamePartition]]
*
* For example:
* {{{
* ALTER TABLE multi_part_name PARTITION spec1 RENAME TO PARTITION spec2;
* }}}
*/
override def visitRenameTablePartition(
ctx: RenameTablePartitionContext): LogicalPlan = withOrigin(ctx) {
AlterTableRenamePartition(
createUnresolvedTable(
ctx.multipartIdentifier,
"ALTER TABLE ... RENAME TO PARTITION",
alterTableTypeMismatchHint),
UnresolvedPartitionSpec(visitNonOptionalPartitionSpec(ctx.from)),
UnresolvedPartitionSpec(visitNonOptionalPartitionSpec(ctx.to)))
}
/**
* Create an [[AlterTableDropPartition]]
*
* For example:
* {{{
* ALTER TABLE multi_part_name DROP [IF EXISTS] PARTITION spec1[, PARTITION spec2, ...]
* [PURGE];
* ALTER VIEW view DROP [IF EXISTS] PARTITION spec1[, PARTITION spec2, ...];
* }}}
*
* ALTER VIEW ... DROP PARTITION ... is not supported because the concept of partitioning
* is associated with physical tables
*/
override def visitDropTablePartitions(
ctx: DropTablePartitionsContext): LogicalPlan = withOrigin(ctx) {
if (ctx.VIEW != null) {
operationNotAllowed("ALTER VIEW ... DROP PARTITION", ctx)
}
val partSpecs = ctx.partitionSpec.asScala.map(visitNonOptionalPartitionSpec)
.map(spec => UnresolvedPartitionSpec(spec))
AlterTableDropPartition(
createUnresolvedTable(
ctx.multipartIdentifier,
"ALTER TABLE ... DROP PARTITION ...",
alterTableTypeMismatchHint),
partSpecs.toSeq,
ifExists = ctx.EXISTS != null,
purge = ctx.PURGE != null)
}
/**
* Create an [[AlterTableSerDeProperties]]
*
* For example:
* {{{
* ALTER TABLE multi_part_name [PARTITION spec] SET SERDE serde_name
* [WITH SERDEPROPERTIES props];
* ALTER TABLE multi_part_name [PARTITION spec] SET SERDEPROPERTIES serde_properties;
* }}}
*/
override def visitSetTableSerDe(ctx: SetTableSerDeContext): LogicalPlan = withOrigin(ctx) {
AlterTableSerDeProperties(
createUnresolvedTable(
ctx.multipartIdentifier,
"ALTER TABLE ... SET [SERDE|SERDEPROPERTIES]",
alterTableTypeMismatchHint),
Option(ctx.STRING).map(string),
Option(ctx.tablePropertyList).map(visitPropertyKeyValues),
// TODO a partition spec is allowed to have optional values. This is currently violated.
Option(ctx.partitionSpec).map(visitNonOptionalPartitionSpec))
}
/**
* Create or replace a view. This creates a [[CreateViewStatement]]
*
* For example:
* {{{
* CREATE [OR REPLACE] [[GLOBAL] TEMPORARY] VIEW [IF NOT EXISTS] multi_part_name
* [(column_name [COMMENT column_comment], ...) ]
* create_view_clauses
*
* AS SELECT ...;
*
* create_view_clauses (order insensitive):
* [COMMENT view_comment]
* [TBLPROPERTIES (property_name = property_value, ...)]
* }}}
*/
override def visitCreateView(ctx: CreateViewContext): LogicalPlan = withOrigin(ctx) {
if (!ctx.identifierList.isEmpty) {
operationNotAllowed("CREATE VIEW ... PARTITIONED ON", ctx)
}
checkDuplicateClauses(ctx.commentSpec(), "COMMENT", ctx)
checkDuplicateClauses(ctx.PARTITIONED, "PARTITIONED ON", ctx)
checkDuplicateClauses(ctx.TBLPROPERTIES, "TBLPROPERTIES", ctx)
val userSpecifiedColumns = Option(ctx.identifierCommentList).toSeq.flatMap { icl =>
icl.identifierComment.asScala.map { ic =>
ic.identifier.getText -> Option(ic.commentSpec()).map(visitCommentSpec)
}
}
val properties = ctx.tablePropertyList.asScala.headOption.map(visitPropertyKeyValues)
.getOrElse(Map.empty)
if (ctx.TEMPORARY != null && !properties.isEmpty) {
operationNotAllowed("TBLPROPERTIES can't coexist with CREATE TEMPORARY VIEW", ctx)
}
val viewType = if (ctx.TEMPORARY == null) {
PersistedView
} else if (ctx.GLOBAL != null) {
GlobalTempView
} else {
LocalTempView
}
CreateViewStatement(
visitMultipartIdentifier(ctx.multipartIdentifier),
userSpecifiedColumns,
visitCommentSpecList(ctx.commentSpec()),
properties,
Option(source(ctx.query)),
plan(ctx.query),
ctx.EXISTS != null,
ctx.REPLACE != null,
viewType)
}
/**
* Alter the query of a view. This creates a [[AlterViewAs]]
*
* For example:
* {{{
* ALTER VIEW multi_part_name AS SELECT ...;
* }}}
*/
override def visitAlterViewQuery(ctx: AlterViewQueryContext): LogicalPlan = withOrigin(ctx) {
AlterViewAs(
createUnresolvedView(ctx.multipartIdentifier, "ALTER VIEW ... AS"),
originalText = source(ctx.query),
query = plan(ctx.query))
}
/**
* Create a [[RenameTable]] command.
*
* For example:
* {{{
* ALTER TABLE multi_part_name1 RENAME TO multi_part_name2;
* ALTER VIEW multi_part_name1 RENAME TO multi_part_name2;
* }}}
*/
override def visitRenameTable(ctx: RenameTableContext): LogicalPlan = withOrigin(ctx) {
val isView = ctx.VIEW != null
val relationStr = if (isView) "VIEW" else "TABLE"
RenameTable(
createUnresolvedTableOrView(ctx.from, s"ALTER $relationStr ... RENAME TO"),
visitMultipartIdentifier(ctx.to),
isView)
}
/**
* A command for users to list the properties for a table. If propertyKey is specified, the value
* for the propertyKey is returned. If propertyKey is not specified, all the keys and their
* corresponding values are returned.
* The syntax of using this command in SQL is:
* {{{
* SHOW TBLPROPERTIES multi_part_name[('propertyKey')];
* }}}
*/
override def visitShowTblProperties(
ctx: ShowTblPropertiesContext): LogicalPlan = withOrigin(ctx) {
ShowTableProperties(
createUnresolvedTableOrView(ctx.table, "SHOW TBLPROPERTIES"),
Option(ctx.key).map(visitTablePropertyKey))
}
/**
* Create a plan for a DESCRIBE FUNCTION statement.
*/
override def visitDescribeFunction(ctx: DescribeFunctionContext): LogicalPlan = withOrigin(ctx) {
import ctx._
val functionName =
if (describeFuncName.STRING() != null) {
Seq(string(describeFuncName.STRING()))
} else if (describeFuncName.qualifiedName() != null) {
visitQualifiedName(describeFuncName.qualifiedName)
} else {
Seq(describeFuncName.getText)
}
DescribeFunction(UnresolvedFunc(functionName), EXTENDED != null)
}
/**
* Create a plan for a SHOW FUNCTIONS command.
*/
override def visitShowFunctions(ctx: ShowFunctionsContext): LogicalPlan = withOrigin(ctx) {
val (userScope, systemScope) = Option(ctx.identifier)
.map(_.getText.toLowerCase(Locale.ROOT)) match {
case None | Some("all") => (true, true)
case Some("system") => (false, true)
case Some("user") => (true, false)
case Some(x) => throw QueryParsingErrors.showFunctionsUnsupportedError(x, ctx.identifier())
}
val pattern = Option(ctx.pattern).map(string(_))
val unresolvedFuncOpt = Option(ctx.multipartIdentifier)
.map(visitMultipartIdentifier)
.map(UnresolvedFunc(_))
ShowFunctions(unresolvedFuncOpt, userScope, systemScope, pattern)
}
/**
* Create a DROP FUNCTION statement.
*
* For example:
* {{{
* DROP [TEMPORARY] FUNCTION [IF EXISTS] function;
* }}}
*/
override def visitDropFunction(ctx: DropFunctionContext): LogicalPlan = withOrigin(ctx) {
val functionName = visitMultipartIdentifier(ctx.multipartIdentifier)
DropFunction(
UnresolvedFunc(functionName),
ctx.EXISTS != null,
ctx.TEMPORARY != null)
}
/**
* Create a CREATE FUNCTION statement.
*
* For example:
* {{{
* CREATE [OR REPLACE] [TEMPORARY] FUNCTION [IF NOT EXISTS] [db_name.]function_name
* AS class_name [USING JAR|FILE|ARCHIVE 'file_uri' [, JAR|FILE|ARCHIVE 'file_uri']];
* }}}
*/
override def visitCreateFunction(ctx: CreateFunctionContext): LogicalPlan = withOrigin(ctx) {
val resources = ctx.resource.asScala.map { resource =>
val resourceType = resource.identifier.getText.toLowerCase(Locale.ROOT)
resourceType match {
case "jar" | "file" | "archive" =>
FunctionResource(FunctionResourceType.fromString(resourceType), string(resource.STRING))
case other =>
operationNotAllowed(s"CREATE FUNCTION with resource type '$resourceType'", ctx)
}
}
val functionIdentifier = visitMultipartIdentifier(ctx.multipartIdentifier)
CreateFunctionStatement(
functionIdentifier,
string(ctx.className),
resources.toSeq,
ctx.TEMPORARY != null,
ctx.EXISTS != null,
ctx.REPLACE != null)
}
override def visitRefreshFunction(ctx: RefreshFunctionContext): LogicalPlan = withOrigin(ctx) {
val functionIdentifier = visitMultipartIdentifier(ctx.multipartIdentifier)
RefreshFunction(UnresolvedFunc(functionIdentifier))
}
override def visitCommentNamespace(ctx: CommentNamespaceContext): LogicalPlan = withOrigin(ctx) {
val comment = ctx.comment.getType match {
case SqlBaseParser.NULL => ""
case _ => string(ctx.STRING)
}
val nameParts = visitMultipartIdentifier(ctx.multipartIdentifier)
CommentOnNamespace(UnresolvedNamespace(nameParts), comment)
}
override def visitCommentTable(ctx: CommentTableContext): LogicalPlan = withOrigin(ctx) {
val comment = ctx.comment.getType match {
case SqlBaseParser.NULL => ""
case _ => string(ctx.STRING)
}
CommentOnTable(createUnresolvedTable(ctx.multipartIdentifier, "COMMENT ON TABLE"), comment)
}
private def alterViewTypeMismatchHint: Option[String] = Some("Please use ALTER TABLE instead.")
private def alterTableTypeMismatchHint: Option[String] = Some("Please use ALTER VIEW instead.")
}
|
witgo/spark
|
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/AstBuilder.scala
|
Scala
|
apache-2.0
| 150,203
|
package com.example.qa.vastservice.orm
import slick.collection.heterogeneous._
import slick.collection.heterogeneous.syntax._
import slick.lifted.Tag
import slick.jdbc.MySQLProfile.api._
trait UsersRecord {
this: ConfigurationTables.type =>
object Users {
type UHList = Int::Int::Int::String::String::String::HNil
}
class Users(tag: Tag) extends Table[Users.UHList](tag, "users") {
def id = column[Int]("id", O.PrimaryKey, O.AutoInc)
def clientId = column[Int]("client_id")
def groupId = column[Int]("group_id")
def firstName = column[String]("first_name")
def lastName = column[String]("last_name")
def email = column[String]("email")
def * = id :: clientId :: groupId :: firstName :: lastName :: email :: HNil
}
}
|
vanclist/xml-webservice-scalatest
|
src/main/scala/com/example/qa/vastservice/orm/UsersRecord.scala
|
Scala
|
mit
| 771
|
/*
* Copyright 2011-2017 Chris de Vreeze
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package eu.cdevreeze.yaidom.xpath.saxon
import java.io.File
import java.net.URI
import eu.cdevreeze.yaidom.core.EName
import eu.cdevreeze.yaidom.core.QName
import eu.cdevreeze.yaidom.core.Scope
import eu.cdevreeze.yaidom.core.jvm.JavaQNames
import eu.cdevreeze.yaidom.resolved
import eu.cdevreeze.yaidom.saxon.SaxonDocument
import eu.cdevreeze.yaidom.saxon.SaxonElem
import eu.cdevreeze.yaidom.saxon.SaxonNode
import eu.cdevreeze.yaidom.simple
import eu.cdevreeze.yaidom.utils.saxon.SaxonElemToSimpleElemConverter
import eu.cdevreeze.yaidom.utils.saxon.SimpleElemToSaxonElemConverter
import javax.xml.xpath.XPathFunction
import javax.xml.xpath.XPathFunctionResolver
import javax.xml.xpath.XPathVariableResolver
import net.sf.saxon.om.NodeInfo
import net.sf.saxon.s9api.DocumentBuilder
import net.sf.saxon.s9api.Processor
import org.scalatest.funsuite.AnyFunSuite
/**
* XPath test case using JAXP backed by Saxon.
*
* @author Chris de Vreeze
*/
class XPathTest extends AnyFunSuite {
private val processor = new Processor(false)
private val XbrliNamespace = "http://www.xbrl.org/2003/instance"
private val XLinkNamespace = "http://www.w3.org/1999/xlink"
private val DimensionEName = EName("dimension")
private val IdEName = EName("id")
private val XbrliItemEName = EName(XbrliNamespace, "item")
private val XLinkRoleEName = EName(XLinkNamespace, "role")
private val MyFuncNamespace = "http://example.com/xbrl-xpath-functions"
private val MyVarNamespace = "http://example.com/xbrl-xpath-variables"
private val docFile = new File(classOf[XPathTest].getResource("sample-xbrl-instance.xml").toURI)
private val docBuilder: DocumentBuilder = processor.newDocumentBuilder()
private val rootElem: SaxonElem =
SaxonDocument.wrapDocument(docBuilder.build(docFile).getUnderlyingNode.getTreeInfo).documentElement
.ensuring(_.baseUri.toString.contains("saxon"), s"Expected non-empty base URI containing the string 'saxon'")
private def useXbrliPrefix(e: SaxonElem): SaxonElem = {
require(
e.scope.filterNamespaces(Set(XbrliNamespace)).keySet == Set("", "xbrli"),
s"Expected namespace ${XbrliNamespace} as default namespace and having prefix 'xbrli' as well")
def convert(elm: simple.Elem): simple.Elem = {
if (elm.qname.prefixOption.isEmpty) elm.copy(qname = QName("xbrli", elm.qname.localPart)) else elm
}
val converterToSaxon = new SimpleElemToSaxonElemConverter(processor)
val simpleRootElem = SaxonElemToSimpleElemConverter.convertSaxonElem(e.rootElem)
val simpleResultRootElem = simpleRootElem.updateElemOrSelf(e.path)(_.transformElemsOrSelf(convert))
// The conversion above happens to leave the Path to the resulting element the same!
converterToSaxon.convertSimpleElem(simpleResultRootElem).getElemOrSelfByPath(e.path).ensuring(_.path == e.path)
}
private val xpathEvaluatorFactory =
SaxonJaxpXPathEvaluatorFactory(processor.getUnderlyingConfiguration)
.withExtraScope(rootElem.scope ++ Scope.from("myfun" -> MyFuncNamespace, "myvar" -> MyVarNamespace))
.withBaseUri(rootElem.baseUri)
xpathEvaluatorFactory.underlyingEvaluatorFactory.setXPathFunctionResolver(new XPathFunctionResolver {
def resolveFunction(functionName: javax.xml.namespace.QName, arity: Int): XPathFunction = {
if (arity == 1 && (functionName == JavaQNames.enameToJavaQName(EName(MyFuncNamespace, "contexts"), None))) {
new FindAllXbrliContexts
} else if (arity == 2 && (functionName == JavaQNames.enameToJavaQName(EName(MyFuncNamespace, "transform"), None))) {
new TransformElem
} else {
sys.error(s"Unknown function with name $functionName and arity $arity")
}
}
})
xpathEvaluatorFactory.underlyingEvaluatorFactory.setXPathVariableResolver(new XPathVariableResolver {
def resolveVariable(variableName: javax.xml.namespace.QName): AnyRef = {
if (variableName == JavaQNames.enameToJavaQName(EName("contextPosition"), None)) {
java.lang.Integer.valueOf(4)
} else if (variableName == JavaQNames.enameToJavaQName(EName(MyVarNamespace, "contextPosition"), None)) {
java.lang.Integer.valueOf(4)
} else if (variableName == JavaQNames.enameToJavaQName(EName(MyVarNamespace, "identity"), None)) {
{ (e: SaxonElem) => e }
} else if (variableName == JavaQNames.enameToJavaQName(EName(MyVarNamespace, "useXbrliPrefix"), None)) {
{ (e: SaxonElem) => useXbrliPrefix(e) }
} else {
sys.error(s"Unknown variable with name $variableName")
}
}
})
private val xpathEvaluator: SaxonJaxpXPathEvaluator =
xpathEvaluatorFactory.newXPathEvaluator()
test("testSimpleStringXPathWithoutContextItem") {
val exprString = "string(count((1, 2, 3, 4, 5)))"
val expr = xpathEvaluator.makeXPathExpression(exprString)
val result = xpathEvaluator.evaluateAsString(expr, None)
assertResult("5") {
result
}
}
test("testSimpleNumberXPathWithoutContextItem") {
val exprString = "count((1, 2, 3, 4, 5))"
val expr = xpathEvaluator.makeXPathExpression(exprString)
val result = xpathEvaluator.evaluateAsBigDecimal(expr, None)
assertResult(5) {
result.toInt
}
}
test("testSimpleBooleanXPathWithoutContextItem") {
val exprString = "empty((1, 2, 3, 4, 5))"
val expr = xpathEvaluator.makeXPathExpression(exprString)
val result = xpathEvaluator.evaluateAsBoolean(expr, None)
assertResult(false) {
result
}
}
test("testSimpleENameXPathWithoutContextItem") {
val exprString = "xs:QName('xbrli:item')"
val expr = xpathEvaluator.makeXPathExpression(exprString)
val result =
rootElem.scope.resolveQNameOption(
QName.parse(
xpathEvaluator.evaluateAsString(expr, None))).get
assertResult(XbrliItemEName) {
result
}
}
test("testLoopingXPathWithoutContextItem") {
val exprString = "max(for $i in (1 to 5) return $i * 2)"
val expr = xpathEvaluator.makeXPathExpression(exprString)
val result = xpathEvaluator.evaluateAsBigDecimal(expr, None)
assertResult(BigDecimal("10")) {
result
}
}
test("testSimpleNodeXPath") {
val exprString = "//xbrli:context[1]/xbrli:entity/xbrli:segment/xbrldi:explicitMember[1]"
val expr = xpathEvaluator.makeXPathExpression(exprString)
val result = xpathEvaluator.evaluateAsNode(expr, Some(rootElem.wrappedNode))
assertResult("gaap:ABCCompanyDomain") {
SaxonNode.wrapElement(result.asInstanceOf[NodeInfo]).text.trim
}
}
test("testSimpleNodeSeqXPath") {
val exprString = "//xbrli:context/xbrli:entity/xbrli:segment/xbrldi:explicitMember"
val expr = xpathEvaluator.makeXPathExpression(exprString)
val result = xpathEvaluator.evaluateAsNodeSeq(expr, Some(rootElem.wrappedNode))
assertResult(true) {
result.size > 100
}
}
test("testYaidomQueryOnXPathNodeResults") {
val exprString = "//xbrli:context/xbrli:entity/xbrli:segment/xbrldi:explicitMember"
val expr = xpathEvaluator.makeXPathExpression(exprString)
val result = xpathEvaluator.evaluateAsNodeSeq(expr, Some(rootElem.wrappedNode))
// Use yaidom query API on results
val resultElems = result.map(e => SaxonNode.wrapElement(e))
assertResult(true) {
val someDimQNames =
Set(QName("gaap:EntityAxis"), QName("gaap:VerificationAxis"), QName("gaap:PremiseAxis"), QName("gaap:ShareOwnershipPlanIdentifierAxis"))
val someDimENames = someDimQNames.map(qn => rootElem.scope.resolveQNameOption(qn).get)
val foundDimensions =
resultElems.flatMap(_.attributeAsResolvedQNameOption(DimensionEName)).toSet
someDimENames.subsetOf(foundDimensions)
}
// The Paths are not lost!
val resultElemPaths = resultElems.map(_.path)
assertResult(Set(List("context", "entity", "segment", "explicitMember"))) {
resultElemPaths.map(_.entries.map(_.elementName.localPart)).toSet
}
assertResult(Set(EName(XbrliNamespace, "xbrl"))) {
resultElems.map(_.rootElem.resolvedName).toSet
}
assertResult(resultElems) {
resultElems.map(e => e.rootElem.getElemOrSelfByPath(e.path))
}
}
test("testSimpleBackingElemXPath") {
val exprString = "//xbrli:context[1]/xbrli:entity/xbrli:segment/xbrldi:explicitMember[1]"
val expr = xpathEvaluator.makeXPathExpression(exprString)
val resultElem = xpathEvaluator.evaluateAsBackingElem(expr, Some(rootElem.wrappedNode))
assertResult("gaap:ABCCompanyDomain") {
resultElem.text.trim
}
}
test("testSimpleBackingElemSeqXPath") {
val exprString = "//xbrli:context/xbrli:entity/xbrli:segment/xbrldi:explicitMember"
val expr = xpathEvaluator.makeXPathExpression(exprString)
val resultElems = xpathEvaluator.evaluateAsBackingElemSeq(expr, Some(rootElem.wrappedNode))
assertResult(true) {
resultElems.size > 100
}
}
test("testYaidomQueryOnXPathBackingElemResults") {
val exprString = "//xbrli:context/xbrli:entity/xbrli:segment/xbrldi:explicitMember"
val expr = xpathEvaluator.makeXPathExpression(exprString)
val resultElems = xpathEvaluator.evaluateAsBackingElemSeq(expr, Some(rootElem.wrappedNode))
// Use yaidom query API on results
assertResult(true) {
val someDimQNames =
Set(QName("gaap:EntityAxis"), QName("gaap:VerificationAxis"), QName("gaap:PremiseAxis"), QName("gaap:ShareOwnershipPlanIdentifierAxis"))
val someDimENames = someDimQNames.map(qn => rootElem.scope.resolveQNameOption(qn).get)
val foundDimensions =
resultElems.flatMap(_.attributeAsResolvedQNameOption(DimensionEName)).toSet
someDimENames.subsetOf(foundDimensions)
}
// The Paths are not lost!
val resultElemPaths = resultElems.map(_.path)
assertResult(Set(List("context", "entity", "segment", "explicitMember"))) {
resultElemPaths.map(_.entries.map(_.elementName.localPart)).toSet
}
assertResult(Set(EName(XbrliNamespace, "xbrl"))) {
resultElems.map(_.rootElem.resolvedName).toSet
}
assertResult(resultElems) {
resultElems.map(e => e.rootElem.getElemOrSelfByPath(e.path))
}
}
test("testBaseUri") {
val exprString = "base-uri(/xbrli:xbrl)"
val expr = xpathEvaluator.makeXPathExpression(exprString)
val resultAsString = xpathEvaluator.evaluateAsString(expr, Some(rootElem.wrappedNode))
val result = URI.create(resultAsString)
assertResult(true) {
resultAsString.contains("sample-xbrl-instance.xml")
}
assertResult(true) {
resultAsString.contains("saxon")
}
assertResult(result) {
rootElem.baseUri
}
}
test("testDocFunction") {
val exprString =
"doc('http://www.nltaxonomie.nl/nt11/kvk/20170419/presentation/kvk-balance-sheet-education-pre.xml')//link:presentationLink[1]/link:loc[10]"
val expr = xpathEvaluator.makeXPathExpression(exprString)
val result = xpathEvaluator.evaluateAsNode(expr, Some(rootElem.wrappedNode))
val resultElem = SaxonNode.wrapElement(result.asInstanceOf[NodeInfo])
assertResult(Some("urn:kvk:linkrole:balance-sheet-education")) {
// Getting parent element, to make the example more exciting
resultElem.parent.attributeOption(XLinkRoleEName)
}
}
test("testCustomFunction") {
val exprString = "myfun:contexts(.)[4]"
val expr = xpathEvaluator.makeXPathExpression(exprString)
val result = xpathEvaluator.evaluateAsNode(expr, Some(rootElem.wrappedNode))
val resultElem = SaxonNode.wrapElement(result.asInstanceOf[NodeInfo])
assertResult(EName(XbrliNamespace, "context")) {
resultElem.resolvedName
}
assertResult(Some("I-2005")) {
resultElem.attributeOption(IdEName)
}
}
test("testCustomFunctionAndVariable") {
val exprString = "myfun:contexts(.)[$contextPosition]"
val expr = xpathEvaluator.makeXPathExpression(exprString)
val result = xpathEvaluator.evaluateAsNode(expr, Some(rootElem.wrappedNode))
val resultElem = SaxonNode.wrapElement(result.asInstanceOf[NodeInfo])
assertResult(EName(XbrliNamespace, "context")) {
resultElem.resolvedName
}
assertResult(Some("I-2005")) {
resultElem.attributeOption(IdEName)
}
}
test("testCustomFunctionAndPrefixedVariable") {
val exprString = "myfun:contexts(.)[$myvar:contextPosition]"
val expr = xpathEvaluator.makeXPathExpression(exprString)
val result = xpathEvaluator.evaluateAsNode(expr, Some(rootElem.wrappedNode))
val resultElem = SaxonNode.wrapElement(result.asInstanceOf[NodeInfo])
assertResult(EName(XbrliNamespace, "context")) {
resultElem.resolvedName
}
assertResult(Some("I-2005")) {
resultElem.attributeOption(IdEName)
}
}
test("testIdentityTransformation") {
val exprString = "myfun:transform(., $myvar:identity)"
val expr = xpathEvaluator.makeXPathExpression(exprString)
val result = xpathEvaluator.evaluateAsNode(expr, Some(rootElem.wrappedNode))
val resultElem = SaxonNode.wrapElement(result.asInstanceOf[NodeInfo])
assertResult(EName(XbrliNamespace, "xbrl")) {
resultElem.resolvedName
}
assertResult(rootElem.findAllElemsOrSelf.size) {
resultElem.findAllElemsOrSelf.size
}
}
test("testUseXbrliPrefixTransformation") {
val exprString = "myfun:transform(., $myvar:useXbrliPrefix)"
val expr = xpathEvaluator.makeXPathExpression(exprString)
val result = xpathEvaluator.evaluateAsNode(expr, Some(rootElem.wrappedNode))
val resultElem = SaxonNode.wrapElement(result.asInstanceOf[NodeInfo])
assertResult(EName(XbrliNamespace, "xbrl")) {
resultElem.resolvedName
}
assertResult(rootElem.findAllElemsOrSelf.size) {
resultElem.findAllElemsOrSelf.size
}
assertResult(List.empty) {
resultElem.filterElemsOrSelf(_.qname.prefixOption.isEmpty)
}
assertResult(resolved.Elem.from(rootElem)) {
resolved.Elem.from(resultElem)
}
}
test("testUseXbrliPrefixLocalTransformation") {
val exprString = "myfun:transform(., $myvar:useXbrliPrefix)"
val firstContext =
rootElem.findElem(e => e.resolvedName == EName(XbrliNamespace, "context") &&
e.attributeOption(IdEName).contains("I-2007")).head
val expr = xpathEvaluator.makeXPathExpression(exprString)
val result = xpathEvaluator.evaluateAsNode(expr, Some(firstContext.wrappedNode))
val resultContextElem = SaxonNode.wrapElement(result.asInstanceOf[NodeInfo])
val resultRootElem = resultContextElem.rootElem
assertResult(EName(XbrliNamespace, "xbrl")) {
resultRootElem.resolvedName
}
assertResult(rootElem.findAllElemsOrSelf.size) {
resultRootElem.findAllElemsOrSelf.size
}
assertResult(List.empty) {
resultContextElem.filterElemsOrSelf(_.qname.prefixOption.isEmpty)
}
assertResult(false) {
resultRootElem.filterElemsOrSelf(_.qname.prefixOption.isEmpty).isEmpty
}
assertResult(resolved.Elem.from(firstContext)) {
resolved.Elem.from(resultContextElem)
}
assertResult(resolved.Elem.from(rootElem)) {
resolved.Elem.from(resultRootElem)
}
}
test("testInstanceOfElement") {
val exprString = ". instance of element()"
val expr = xpathEvaluator.makeXPathExpression(exprString)
val result = xpathEvaluator.evaluateAsBoolean(expr, Some(rootElem.wrappedNode))
assertResult(true) {
result
}
}
test("testNotInstanceOfElement") {
val exprString = "3 instance of element()"
val expr = xpathEvaluator.makeXPathExpression(exprString)
val result = xpathEvaluator.evaluateAsBoolean(expr, None)
assertResult(false) {
result
}
}
test("testInstanceOfElementSeq") {
val exprString = "myfun:contexts(.) instance of element()+"
val expr = xpathEvaluator.makeXPathExpression(exprString)
val result = xpathEvaluator.evaluateAsBoolean(expr, Some(rootElem.wrappedNode))
assertResult(true) {
result
}
}
test("testSumOfEmptySeq") {
val exprString = "sum(())"
val expr = xpathEvaluator.makeXPathExpression(exprString)
val result = xpathEvaluator.evaluateAsBigDecimal(expr, None)
assertResult(0) {
result.toInt
}
}
}
|
dvreeze/yaidom
|
jvm/src/test/scala/eu/cdevreeze/yaidom/xpath/saxon/XPathTest.scala
|
Scala
|
apache-2.0
| 16,861
|
package sorm.mappings
import embrace._
import sorm.driver.DriverConnection
import sorm.reflection._
class TupleMapping
( val reflection : Reflection,
val membership : Option[Membership],
val settings : Map[Reflection, EntitySettings] )
extends CompositeMapping {
@inline def mappings
= items.toStream
lazy val items
= reflection.generics.view.zipWithIndex.map { case (r, i) => Mapping(r, Membership.TupleItem(i, this), settings) }.toVector
def valueFromContainerRow ( row : String => Any, c : DriverConnection )
= reflection instantiate mappings.map(_.valueFromContainerRow(row, c))
def valuesForContainerTableRow ( value : Any )
= itemValues(value).flatMap{ case (m, v) => m.valuesForContainerTableRow(v) }
private def itemValues ( value : Any )
= mappings zip value.asInstanceOf[Product].productIterator.toIterable
override def update ( value : Any, masterKey : Stream[Any], connection : DriverConnection ) {
itemValues(value).foreach(_ $$ (_.update(_, masterKey, connection)))
}
override def insert ( value : Any, masterKey : Stream[Any], connection : DriverConnection ) {
itemValues(value).foreach(_ $$ (_.insert(_, masterKey, connection)))
}
}
|
cllu/sorm2
|
src/main/scala/sorm/mappings/TupleMapping.scala
|
Scala
|
mit
| 1,217
|
/*
* Copyright (c) <2015-2016>, see CONTRIBUTORS
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the <organization> nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package ch.usi.inf.l3.sana.dcct.phases
import ch.usi.inf.l3.sana
import sana.tiny.dsl._
import sana.tiny.core._
import sana.tiny.core.Implicits._
import sana.tiny.ast.{Tree, NoTree}
import sana.tiny.symbols.Symbol
import sana.dcct.DCCTNodes
import sana.primj.typechecker.{ValDefTyperComponent => _, MethodDefTyperComponent => _, _}
import sana.dcct.typechecker._
import sana.ooj.typechecker.TemplateTyperComponent
import sana.calcj.typechecker.{UnaryTyperComponent => _, _}
trait DcctTyperFamilyApi extends
TransformationFamily[Tree, Tree] {
self =>
override def default = {case s => s}
def components: List[PartialFunction[Tree, Tree]] =
generateComponents[Tree, Tree](DCCTNodes.nodes,
"TyperComponent", "typed", "")
def typed: Tree => Tree = family
}
case class DcctTyperFamily(compiler: CompilerInterface) extends
DcctTyperFamilyApi
|
amanjpro/languages-a-la-carte
|
dcct/src/main/scala/phases/TyperFamily.scala
|
Scala
|
bsd-3-clause
| 2,438
|
package org.mms.patterns
import PatternDescriptions._;
object BOXAapi extends API("Box")(
simpleCollection("/folders","{folderId}")(
item("{id}")(
readOnlyCollection("/items", GET),
readOnlyCollection("/collaborations", GET),
action("/copy", POST),
action("/trash", GET),
action("/trash", DELETE)
),
readOnlyCollection("/trash/items")
)
,
collection("files", "Files collection(can not list)")(
item("{id}")(
)
)
)
|
petrochenko-pavel-a/mms.core
|
org.mms.core/src/main/scala/org/mms/patterns/BOX.scala
|
Scala
|
epl-1.0
| 510
|
import runtime.ScalaRunTime._
trait SuperS[@specialized(AnyRef) T] {
def arr: Array[T]
def foo() = arr(0)
def bar(b: Array[T]) = b(0) = arr(0)
}
class BaseS[@specialized(AnyRef) T](val arr: Array[T]) extends SuperS[T] { }
trait SuperG[T] {
def arr: Array[T]
def foo() = arr(0)
def bar(b: Array[T]) = b(0) = arr(0)
}
class BaseG[T](val arr: Array[T]) extends SuperG[T] { }
object Test {
def main(args: Array[String]): Unit = {
(new BaseS(new Array[String](1)): SuperS[String]).foo
println(arrayApplyCount)
(new BaseS(new Array[String](1)): SuperS[String]).bar(new Array[String](1))
println(arrayApplyCount)
println(arrayUpdateCount)
(new BaseG(new Array[String](1)): SuperG[String]).foo
println(arrayApplyCount)
(new BaseG(new Array[String](1)): SuperG[String]).bar(new Array[String](1))
println(arrayApplyCount)
println(arrayUpdateCount)
}
}
|
martijnhoekstra/scala
|
test/files/specialized/arrays-traits.scala
|
Scala
|
apache-2.0
| 904
|
/*
* Copyright 2017 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.accessibility
import org.scalatest.mock.MockitoSugar
import org.scalatest.{Matchers, WordSpec}
import uk.gov.hmrc.accessibility.AccessibilityReport._
import org.scalatest.prop.TableDrivenPropertyChecks._
import org.jsoup.Jsoup
import org.jsoup.nodes.Document
class AccessibilityReportSpec extends WordSpec with Matchers with MockitoSugar {
private val errorResult = AccessibilityResult("ERROR","standard","element","identifier","description", "context")
private val warningResult = AccessibilityResult("WARNING","standard","element","identifier","description", "context")
private def makeSeq(errors : Int, warnings : Int) : Seq[AccessibilityResult] = {
Seq.fill(errors)(errorResult) ++ Seq.fill(warnings)(warningResult)
}
val summaries = Table(
("errors", "warnings", "message"),
(0, 0, "There were no errors or warnings"),
(1, 0, "There was 1 error and no warnings"),
(0, 1, "There were no errors and 1 warning"),
(2, 0, "There were 2 errors and no warnings"),
(0, 2, "There were no errors and 2 warnings"),
(2, 2, "There were 2 errors and 2 warnings")
)
"makeScenarioSummary" should {
forAll(summaries) { (e, w, msg) =>
s"give the message '$msg' when there are $e errors and $w warnings" in {
makeScenarioSummary(makeSeq(e,w)) should include(msg)
}
}
}
private def checkTableHead(doc : Document): Unit = {
doc.getElementsByTag("table").size() shouldBe 1
doc.getElementsByTag("thead").size() shouldBe 1
doc.select("thead tr").size() shouldBe 1
}
private def checkTableRowCount(doc : Document, errors : Int, warnings : Int): Unit = {
doc.getElementsContainingOwnText("ERROR").size() shouldBe errors
doc.getElementsContainingOwnText("WARNING").size() shouldBe warnings
}
"makeTable" should {
"contain a header and no rows for empty result sequence" in {
val output: Document = Jsoup.parse(makeTable(Seq()))
checkTableHead(output)
checkTableRowCount(output, 0, 0)
}
"contain a header and one row for result sequence with 1 error" in {
println(makeSeq(1, 1))
val output: Document = Jsoup.parse(makeTable(makeSeq(1, 0)))
checkTableHead(output)
checkTableRowCount(output, 1, 0)
}
"contain a header and one row for result sequence with 1 warning" in {
val output: Document = Jsoup.parse(makeTable(makeSeq(0, 1)))
checkTableHead(output)
checkTableRowCount(output, 0, 1)
}
"contain a header and two rows for result sequence with 1 error and 1 warning" in {
val output: Document = Jsoup.parse(makeTable(makeSeq(1, 1)))
checkTableHead(output)
checkTableRowCount(output, 1, 1)
}
"contain a header and four rows for result sequence with 2 errors and 2 warnings" in {
val output: Document = Jsoup.parse(makeTable(makeSeq(2, 2)))
checkTableHead(output)
checkTableRowCount(output, 2, 2)
}
}
}
|
chrisjameswright/accessibility-testing-library
|
src/test/scala/uk/gov/hmrc/accessibility/AccessibilityReportSpec.scala
|
Scala
|
apache-2.0
| 3,562
|
package org.gtri.util.scala.xsdbuilder
import org.gtri.util.scala.statemachine._
import org.gtri.util.xsddatatypes.XsdCodes.AllOrNoneCode
import scala.collection.immutable.Seq
case class XsdAllOrNone[A](value : Either[AllOrNoneCode, Set[A]]) {
override def toString = {
value fold(
fa = { allOrNoneCode => allOrNoneCode.toString },
fb = { set => set.mkString(" ")}
)
}
}
object XsdAllOrNone {
def parser[A](subparser: Parser[String,A]) : Parser[String,XsdAllOrNone[A]] = {
case s : String if s == AllOrNoneCode.NONE.toString => Parser.Succeed(XsdAllOrNone(Left(AllOrNoneCode.NONE)))
case s : String if s == AllOrNoneCode.ALL.toString => Parser.Succeed(XsdAllOrNone(Left(AllOrNoneCode.ALL)))
case s : String =>
val r0 : Seq[Parser.Transition[A]] =
for {
member <- s.split("\\\\s+").distinct.toList
} yield subparser(member)
val r1 : Parser.Transition[Seq[A]] = r0.sequence
r1.flatMap { xs => Parser.Succeed(XsdAllOrNone(Right(xs.toSet))) }
}
}
|
gtri-iead/org.gtri.util.scala
|
xsdbuilder/src/main/scala/org/gtri/util/scala/xsdbuilder/XsdAllOrNone.scala
|
Scala
|
gpl-3.0
| 1,027
|
package x7c1.wheat.modern.kinds
import x7c1.wheat.modern.callback.either.EitherTask
import scala.language.reflectiveCalls
trait Fate[X, +L, +R] {
def map[R2](f: R => R2): Fate[X, L, R2]
def flatMap[L2 >: L, R2](f: R => Fate[X, L2, R2]): Fate[X, L2, R2]
def run[L2 >: L, R2 >: R](x: X): FateRunner[L2, R2]
def transform[L2, R2](f: Either[L, R] => Either[L2, R2]): Fate[X, L2, R2]
def toEitherTask[L2 >: L, R2 >: R](x: X): EitherTask[L2, R2] = EitherTask(run(x))
}
object Fate {
def apply[X, L, R](underlying: X => (Either[L, R] => Unit) => Unit): Fate[X, L, R] = {
new FateImpl(underlying)
}
def apply[X, L, R](r: R): Fate[X, L, R] = Fate { x => g =>
g(Right(r))
}
def left[X, L, R](l: L): Fate[X, L, R] = Fate { x => g =>
g(Left(l))
}
implicit class Fates[X, L, R](fates: Seq[Fate[X, L, R]]) {
def toParallel: Fate[X, Seq[L], Seq[R]] = Fate { x => f =>
val dispatcher = new EitherCollector(f, fates.length)
fates foreach {
_ run x apply dispatcher.run
}
}
}
}
class FateRunner[L, R](
underlying: (Either[L, R] => Unit) => Unit) extends ((Either[L, R] => Unit) => Unit) {
override def apply(f: Either[L, R] => Unit): Unit = underlying(f)
def atLeft(f: L => Unit): Unit = apply {
case Right(r) => //nop
case Left(l) => f(l)
}
}
private class FateImpl[X, L, R](
underlying: X => (Either[L, R] => Unit) => Unit) extends Fate[X, L, R] {
override def map[R2](f: R => R2): Fate[X, L, R2] = new FateImpl[X, L, R2](
context => g => underlying(context) {
case Right(right) => g(Right(f(right)))
case Left(left) => g(Left(left))
}
)
override def flatMap[L2 >: L, R2](f: R => Fate[X, L2, R2]): Fate[X, L2, R2] = new FateImpl[X, L2, R2](
context => g => underlying(context) {
case Right(right) => f(right).run(context)(g)
case Left(left) => g(Left(left))
}
)
override def run[L2 >: L, R2 >: R](x: X) = {
new FateRunner[L2, R2](underlying(x))
}
override def transform[L2, R2](f: Either[L, R] => Either[L2, R2]): Fate[X, L2, R2] = {
new FateImpl[X, L2, R2](
context => g =>
underlying(context) {
g apply f(_)
}
)
}
}
private class EitherCollector[L, R](f: Either[Seq[L], Seq[R]] => Unit, threshold: Int) {
import java.util.concurrent.atomic.AtomicInteger
import scala.collection.mutable.ArrayBuffer
private val lefts = ArrayBuffer[L]()
private val rights = ArrayBuffer[R]()
private val processed = new AtomicInteger(0)
def run(either: Either[L, R]): Unit = synchronized {
either match {
case Left(l) => lefts += l
case Right(r) => rights += r
}
if (processed.incrementAndGet() == threshold) {
if (lefts.nonEmpty) f(Left(lefts))
else f(Right(rights))
}
}
}
|
x7c1/Linen
|
wheat-modern/src/main/scala/x7c1/wheat/modern/kinds/Fate.scala
|
Scala
|
mit
| 2,812
|
/*
* Copyright 2014 IBM Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.ibm.spark.kernel.protocol.v5.client.boot
import akka.actor.ActorSystem
import com.ibm.spark.comm.{CommRegistrar, CommStorage}
import com.ibm.spark.kernel.protocol.v5.client.boot.layers._
import com.ibm.spark.kernel.protocol.v5.client.socket.{SocketConfig, SocketFactory}
import com.ibm.spark.kernel.protocol.v5.client.{SimpleActorLoader, SparkKernelClient}
import com.ibm.spark.utils.LogLike
import com.typesafe.config.Config
import org.zeromq.ZMQ
object ClientBootstrap {
/**
* Generates a new unique name for a client actor system.
*
* @return The unique name as a string
*/
def newActorSystemName(): String =
"spark-client-actor-system-" + java.util.UUID.randomUUID().toString
}
class ClientBootstrap(config: Config) extends LogLike {
this: SystemInitialization with HandlerInitialization =>
/**
* Creates a new Spark Kernel client instance.
*
* @return The new client instance
*/
def createClient(
actorSystemName: String = ClientBootstrap.newActorSystemName()
): SparkKernelClient = {
logger.trace(s"Creating new kernel client actor system, '$actorSystemName'")
val actorSystem = ActorSystem(actorSystemName)
logger.trace(s"Creating actor loader for actor system, '$actorSystemName'")
val actorLoader = SimpleActorLoader(actorSystem)
logger.trace(s"Creating socket factory for actor system, '$actorSystemName")
val socketFactory = new SocketFactory(SocketConfig.fromConfig(config))
logger.trace(s"Initializing underlying system for, '$actorSystemName'")
val (_, _, _, _, commRegistrar, _) =
initializeSystem(config, actorSystem, actorLoader, socketFactory)
logger.trace(s"Initializing handlers for, '$actorSystemName'")
initializeHandlers(actorSystem, actorLoader)
logger.trace(s"ZeroMQ (JeroMQ) version: ${ZMQ.getVersionString}")
new SparkKernelClient(actorLoader, actorSystem, commRegistrar)
}
}
|
yeghishe/spark-kernel
|
client/src/main/scala/com/ibm/spark/kernel/protocol/v5/client/boot/ClientBootstrap.scala
|
Scala
|
apache-2.0
| 2,518
|
package com.anchortab.model
import scala.collection.immutable.HashMap
import net.liftweb._
import mongodb._
import json.ext._
import org.joda.time.DateTime
import org.bson.types.ObjectId
case class PlanTerm(description:String, abbreveation:String, stripeCode:String)
/**
* Model for a Plan that users can subscribe to on Anchor Tab.
*
* Special note: the quotas are all per month values regardless of
* the term of the plan.
**/
case class Plan(name:String,
description:String,
price:Double,
trialDays:Int,
features:Map[String,Boolean],
quotas:Map[String, Long],
isSpecial:Boolean = false,
visibleOnRegistration:Boolean = true,
starts:Option[DateTime] = None,
ends:Option[DateTime] = None,
term:PlanTerm = Plan.MonthlyTerm,
stripeId: Option[String] = None,
_id:ObjectId = ObjectId.get
) extends MongoDocument[Plan] {
val meta = Plan
val formattedPrice = "$" + ("%1.2f" format price)
val free_? = price == 0
val hasTrial_? = trialDays > 0
def hasFeature_?(feature:String) = {
features.get(feature) match {
case Some(true) => true
case _ => false
}
}
def quotaFor(quotaKey:String) = quotas.get(quotaKey)
lazy val registrationTitle = {
price match {
case 0 =>
name + " (Free)"
case _ =>
name + " (" + formattedPrice + "/" + term.abbreveation + ")"
}
}
}
object Plan extends MongoDocumentMeta[Plan] {
override def formats = allFormats ++ JodaTimeSerializers.all
val MonthlyTerm = PlanTerm("monthly", "mo", "month")
val YearlyTerm = PlanTerm("yearly", "yr", "year")
val terms = MonthlyTerm :: YearlyTerm :: Nil
object Quotas {
val NumberOfTabs = "number-of-tabs"
val EmailSubscriptions = "email-subscriptions"
val Views = "views"
def humanNameFor(quotaName: String) = {
quotaName match {
case NumberOfTabs => "Number of Tabs"
case EmailSubscriptions => "Email Subscriptions"
case Views => "Views"
case _ => ""
}
}
}
object Features {
val BasicAnalytics = "basic-analytics"
val WhitelabeledTabs = "whitelabeled-tabs"
val CustomColorSchemes = "custom-color-schemes"
val ApiAccess = "api-access"
val PardotIntegration = "pardot-integration"
}
// The DefaultPlan, or the plan you're on if you don't have a plan. If we decide to offer
// a free tier at some point in the future, we should change this plan to describe the
// free tier.
val DefaultPlan = Plan("Free Edition", "1 tab, 10 subscribes", 0, 0,
Map.empty, Map(Quotas.EmailSubscriptions -> 10, Quotas.NumberOfTabs -> 1))
}
|
farmdawgnation/anchortab
|
src/main/scala/com/anchortab/model/Plan.scala
|
Scala
|
apache-2.0
| 2,799
|
package is.solidninja
package openshift
package client
import fs2.Task
import fs2.async.immutable.Signal
import io.circe._
import gnieh.diffson.circe._
import org.http4s.{Credentials, Uri}
import org.http4s.client.Client
import is.solidninja.openshift.api.v1._
import is.solidninja.openshift.client.impl.HttpOpenshiftCluster
sealed trait ClusterToken
case class BearerToken(token: String)
case class ProjectId(id: String)
trait OpenshiftCluster {
def project(id: ProjectId): Task[OpenshiftProject with OpenshiftProjectRaw]
}
trait OpenshiftProject {
def pod(name: String): Task[Option[Pod]]
def pods(): Task[Seq[Pod]]
def deploymentConfig(name: String): Task[Option[DeploymentConfig]]
def deploymentConfigs(): Task[Seq[DeploymentConfig]]
def route(name: String): Task[Option[Route]]
def routes(): Task[Seq[Route]]
def services(): Task[Seq[Service]]
def service(name: String): Task[Option[Service]]
def createDeploymentConfig(dc: DeploymentConfig): Task[DeploymentConfig]
def createRoute(route: Route): Task[Route]
def createService(service: Service): Task[Service]
def patchDeploymentConfig(name: String, patch: JsonPatch): Task[DeploymentConfig]
def patchRoute(name: String, patch: JsonPatch): Task[Route]
def patchService(name: String, patch: JsonPatch): Task[Service]
}
// TODO: experimental?
trait OpenshiftProjectRaw {
def podRaw(name: String): Task[Option[Json]]
def routeRaw(name: String): Task[Option[Json]]
def deploymentConfigRaw(name: String): Task[Option[Json]]
def serviceRaw(name: String): Task[Option[Json]]
}
object OpenshiftCluster {
def apply(url: Uri, token: Signal[Task, Credentials.Token], httpClient: Client): Task[OpenshiftCluster] =
Task.now(new HttpOpenshiftCluster(url, token, httpClient))
}
|
vladimir-lu/openshift-scala-api
|
src/main/scala/is/solidninja/openshift/client/OpenshiftClient.scala
|
Scala
|
mit
| 1,781
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.openwhisk.common
import java.io.File
import java.net.URI
import java.nio.charset.StandardCharsets.UTF_8
import org.apache.commons.io.FileUtils
import pureconfig.ConfigReader
import pureconfig.ConvertHelpers.catchReadError
class ConfigMapValue private (val value: String)
object ConfigMapValue {
/**
* Checks if the value is a file url like `file:/etc/config/foo.yaml` then treat it as a file reference
* and read its content otherwise consider it as a literal value
*/
def apply(config: String): ConfigMapValue = {
val value = if (config.startsWith("file:")) {
val uri = new URI(config)
val file = new File(uri)
FileUtils.readFileToString(file, UTF_8)
} else config
new ConfigMapValue(value)
}
implicit val reader: ConfigReader[ConfigMapValue] = ConfigReader.fromString[ConfigMapValue](catchReadError(apply))
}
|
jeremiaswerner/openwhisk
|
common/scala/src/main/scala/org/apache/openwhisk/common/ConfigMapValue.scala
|
Scala
|
apache-2.0
| 1,686
|
package com.lukedeighton.play
import com.lukedeighton.play.validation.Forms._
import org.scalatest.FlatSpec
class BaseSpec extends FlatSpec {
lazy val test1Form = form[Test1]
lazy val test2Form = form[Test2]
lazy val test3Form = form[Test3]
lazy val test4Form = form[Test4]
lazy val test5Form = form[Test5]
lazy val test6Form = form[Test6]
lazy val test7Form = form[Test7]
lazy val test8Form = form[Test8]
lazy val emptyMap = Map.empty[String, String]
lazy val emptyForm1 = test1Form.bind(emptyMap)
lazy val emptyForm2 = test2Form.bind(emptyMap)
lazy val emptyForm3 = test3Form.bind(emptyMap)
lazy val invalidMap = Map("a" -> "x", "b" -> "x", "c" -> "x", "d" -> "x", "e" -> "x", "f" -> "x")
lazy val invalidForm1 = test1Form.bind(invalidMap)
lazy val invalidForm3 = test3Form.bind(invalidMap)
lazy val minMap = Map("a" -> "5", "b" -> "5", "c" -> "5", "d" -> "5", "e" -> "5", "f" -> "5", "g" -> "a" * 5)
lazy val minForm4 = test4Form.bind(minMap)
lazy val minForm5 = test5Form.bind(minMap)
lazy val maxMap = Map("a" -> "25", "b" -> "25", "c" -> "25", "d" -> "25", "e" -> "25", "f" -> "25", "g" -> "a" * 25)
lazy val maxForm4 = test4Form.bind(maxMap)
lazy val maxForm5 = test5Form.bind(maxMap)
}
|
LukeDeighton/play-validation
|
src/test/scala/com/lukedeighton/play/BaseSpec.scala
|
Scala
|
apache-2.0
| 1,242
|
package collins.permissions
case class Users(val assets: Map[String, Set[String]]) {
def this() = this(Map.empty)
lazy val aliases: Map[String, Set[String]] = invertedMap
// Turn values into keys, new values are sets of old keys
// Turns a map of Group -> Users into User -> Groups
def invertedMap: Map[String, Set[String]] = {
val map = PermissionsHelper.hashMapWithDefault
assets.foreach { case (groupName, users) =>
users.foreach { user =>
map.update(user, map(user) + groupName)
}
}
map.toMap
}
}
|
byxorna/collins
|
app/collins/permissions/Users.scala
|
Scala
|
apache-2.0
| 550
|
package de.uni_potsdam.hpi.coheel.ml
import java.util
import de.uni_potsdam.hpi.coheel.programs.DataClasses.{ClassificationInfo, FeatureLine}
import weka.classifiers.Classifier
import weka.core.Attribute
import weka.core.Instances
import weka.core.Instance
import weka.core.DenseInstance
object CoheelClassifier {
val NUMBER_OF_FEATURES = 16 // excluding class attribute
val POSITIVE_CLASS = 1.0
val POS_TAG_GROUPS = Array(
List("NN", "NNS"),
List("NNP", "NNPS"),
List("JJ", "JJR", "JJS"),
List("VB", "VBD", "VBG", "VBN", "VBP", "VBZ"),
List("CD"),
List("SYM"),
List("WDT", "WP", "WP$", "WRB")
)
val FEATURE_DEFINITION = {
val attrs = new util.ArrayList[Attribute](NUMBER_OF_FEATURES + 1)
// basic features
attrs.add(new Attribute("prom"))
attrs.add(new Attribute("promRank"))
attrs.add(new Attribute("promDeltaTop"))
attrs.add(new Attribute("promDeltaSucc"))
attrs.add(new Attribute("context"))
attrs.add(new Attribute("contextRank"))
attrs.add(new Attribute("contextDeltaTop"))
attrs.add(new Attribute("contextDeltaSucc"))
attrs.add(new Attribute("surfaceLinkProb"))
// pos tags
attrs.add(new Attribute("NN"))
attrs.add(new Attribute("NNP"))
attrs.add(new Attribute("JJ"))
attrs.add(new Attribute("VB"))
attrs.add(new Attribute("CD"))
attrs.add(new Attribute("SYM"))
attrs.add(new Attribute("W"))
val classAttrValues = new util.ArrayList[String](2)
classAttrValues.add("0.0")
classAttrValues.add("1.0")
val classAttr = new Attribute("class", classAttrValues)
attrs.add(classAttr)
attrs
}
}
class CoheelClassifier(classifier: Classifier) {
val instances = new Instances("Classification", CoheelClassifier.FEATURE_DEFINITION, 1)
instances.setClassIndex(CoheelClassifier.NUMBER_OF_FEATURES)
/**
* Classifies a given group of instances, which result from the same link/trie hit in the original text.
* Only if exactly one true prediction is given, the function returns a result.
* @param featureLine The features of all possible links.
* @return The predicted link or None, if no link is predicted.
*/
def classifyResultsWithSeedLogic(featureLine: Seq[FeatureLine[ClassificationInfo]]): Option[FeatureLine[ClassificationInfo]] = {
var positivePredictions = List[FeatureLine[ClassificationInfo]]()
featureLine.foreach { featureLine =>
assert(featureLine.features.size == CoheelClassifier.NUMBER_OF_FEATURES || featureLine.features.size == CoheelClassifier.NUMBER_OF_FEATURES + 1)
val instance = buildInstance(featureLine)
instance.setDataset(instances)
if (classifier.classifyInstance(instance) == CoheelClassifier.POSITIVE_CLASS) {
positivePredictions ::= featureLine
}
}
if (positivePredictions.size == 1)
positivePredictions.headOption
else
None
}
/**
* Classifies a given group of instances, which result from the same link/trie hit in the original text, using candidate logic.
*/
def classifyResultsWithCandidateLogic(featureLine: Seq[FeatureLine[ClassificationInfo]]): List[FeatureLine[ClassificationInfo]] = {
var positivePredictions = List[FeatureLine[ClassificationInfo]]()
featureLine.foreach { featureLine =>
assert(featureLine.features.size == CoheelClassifier.NUMBER_OF_FEATURES || featureLine.features.size == CoheelClassifier.NUMBER_OF_FEATURES + 1)
val instance = buildInstance(featureLine)
instance.setDataset(instances)
if (classifier.classifyInstance(instance) == CoheelClassifier.POSITIVE_CLASS) {
positivePredictions ::= featureLine
}
}
positivePredictions
}
private def buildInstance(featureLine: FeatureLine[ClassificationInfo]): Instance = {
val attValues = featureLine.features.toArray
val instance = new DenseInstance(1.0, attValues)
instance
}
}
|
stratosphere/coheel
|
src/main/scala/de/uni_potsdam/hpi/coheel/ml/CoheelClassifier.scala
|
Scala
|
apache-2.0
| 3,744
|
/**
* © 2019 Refinitiv. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the “License”); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an “AS IS” BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package trafficshaping
import akka.actor.Actor
import akka.actor.Actor.Receive
import cmwell.ws.Settings
import cmwell.ws.Settings._
import com.typesafe.scalalogging.LazyLogging
import k.grid.dmap.api.SettingsLong
import k.grid.dmap.impl.persistent.PersistentDMap
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration._
/**
* Created by michael on 6/29/16.
*/
case object GetTrafficData
case class TrafficData(requestors: Map[String, RequestorCounter])
object CongestionAnalyzer {
val name = "CongestionAnalyzer"
var penalizeTopUsers = 3
}
class CongestionAnalyzer extends Actor with LazyLogging {
import Settings._
import DMapKeys._
val numOfCpus = Runtime.getRuntime.availableProcessors()
def getThresholdFactor: Long = {
PersistentDMap
.get(THRESHOLD_FACTOR)
.map {
case SettingsLong(l) => l
case _ => 0L
}
.getOrElse(0L)
}
case object AnalyzeCongestion
@throws[Exception](classOf[Exception])
override def preStart(): Unit = {
context.system.scheduler.schedule(0.seconds, checkFrequency.seconds, self, AnalyzeCongestion)
}
override def receive: Receive = {
case AnalyzeCongestion =>
val thresholdFactor = getThresholdFactor
val threshold = checkFrequency.seconds.toMillis * thresholdFactor
TrafficShaper.lastRequests.toVector
.sortBy(_._2.requestsTime)
.takeRight(CongestionAnalyzer.penalizeTopUsers)
.foreach {
case (k, v) =>
if (v.requestsTime > threshold) {
v.penalty = v.penalty.next
logger.info(s"The user $k is getting ${v.penalty}.")
} else v.penalty = v.penalty.prev
v.reset
}
TrafficShaper.lastRequests = TrafficShaper.lastRequests.filter {
case (k, v) => v.penalty != NoPenalty || v.requestsTime > 0L
}
case GetTrafficData =>
sender ! TrafficData(TrafficShaper.getRequestors)
}
}
|
e-orz/CM-Well
|
server/cmwell-ws/app/trafficshaping/CongestionAnalyzer.scala
|
Scala
|
apache-2.0
| 2,617
|
/*
* Copyright 2016-2018 Michal Harish, michal.harish@gmail.com
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.amient.affinity.avro
import java.util.UUID
import io.amient.affinity.avro.record.AvroJsonConverter
import org.scalatest.{FlatSpec, Matchers}
class AvroJsonConverterSpec extends FlatSpec with Matchers {
behavior of "AvroJsonConverter"
it should "serialize to and from case class <-> avro <-> json with identical result to circe lib" in {
val msg = AvroNamedRecords(SimpleKey(99), Some(SimpleKey(99)), None, List(SimpleKey(99), SimpleKey(100)), List(None, Some(SimpleKey(99)), None))
val avroJson = AvroJsonConverter.toJson(msg, false)
avroJson should be ("{\\"e\\":{\\"id\\":99},\\"rn\\":{\\"id\\":99},\\"rs\\":null,\\"l\\":[{\\"id\\":99},{\\"id\\":100}],\\"lo\\":[null,{\\"id\\":99},null]}")
AvroJsonConverter.toAvro(avroJson, msg.getSchema()) should be (msg)
}
it should "handle primitives, strings and nulls" in {
val msg = AvroPrmitives()
val avroJson = AvroJsonConverter.toJson(msg)
avroJson should be("{\\"bn\\":null,\\"bs\\":true,\\"in\\":null,\\"is\\":-2147483648,\\"ln\\":null,\\"ls\\":-9223372036854775808,\\"fn\\":null,\\"fs\\":-3.4028235E38,\\"dn\\":null,\\"ds\\":-1.7976931348623157E308,\\"sn\\":null,\\"ss\\":\\"Hello\\"}")
AvroJsonConverter.toAvro(avroJson, msg.getSchema()) should be (msg)
}
it should "handle enums" in {
val msg = AvroEnums(SimpleEnum.B, Some(SimpleEnum.B), None, List(SimpleEnum.A, SimpleEnum.B), List(None, Some(SimpleEnum.B)))
val avroJson = AvroJsonConverter.toJson(msg)
avroJson should be("{\\"raw\\":\\"B\\",\\"on\\":\\"B\\",\\"sd\\":null,\\"l\\":[\\"A\\",\\"B\\"],\\"lo\\":[null,\\"B\\"]}")
AvroJsonConverter.toAvro(avroJson, msg.getSchema()) should be (msg)
}
it should "handle bytes" in {
val msg = AvroBytes(Array[Byte](1,2,3), Some(Array[Byte]()), List(Array[Byte](1,2,3), Array[Byte](4)))
val avroJson = AvroJsonConverter.toJson(msg)
val avroMsg = AvroJsonConverter.toAvro(avroJson, msg.getSchema()).asInstanceOf[AvroBytes]
avroMsg.raw should be (Array[Byte](1,2,3))
avroMsg.optional.get should be (Array[Byte]())
avroMsg.listed(0) should be(Array[Byte](1,2,3))
avroMsg.listed(1) should be(Array[Byte](4))
}
it should "handle fixed field variants" in {
val msg = LongCompoundKey(100L, "UK", "C001", 9.9)
val avroJson = AvroJsonConverter.toJson(msg)
avroJson should be("{\\"version\\":\\"AAAAAAAAAGQ=\\",\\"country\\":\\"VUs=\\",\\"city\\":\\"QzAwMQ==\\",\\"value\\":9.9}")
AvroJsonConverter.toAvro(avroJson, msg.getSchema()) should be (msg)
}
it should "handle maps as structs" in {
val a = SimpleMap(Map("key1" -> SimpleKey(1)))
val j = AvroJsonConverter.toJson(a)
AvroJsonConverter.toAvro[SimpleMap](j) should equal(a)
}
it should "handle optional maps as nested structs" in {
val a = OptionalMap(Some(Map("key1" -> SimpleKey(1))))
val j = AvroJsonConverter.toJson(a)
AvroJsonConverter.toAvro[OptionalMap](j) should equal(a)
}
it should "handle defaults when converting toAvro empty json structures" in {
AvroJsonConverter.toAvro[ListSet]("{}") should equal(ListSet())
AvroJsonConverter.toAvro[ListSet]("{\\"l\\": []}") should equal(ListSet())
AvroJsonConverter.toAvro[OptionalMapWithDefaultItem]("{}") should equal(OptionalMapWithDefaultItem(Some(Map())))
AvroJsonConverter.toAvro[OptionalMapWithDefaultItem]("{\\"map\\":{}}") should equal(OptionalMapWithDefaultItem(Some(Map())))
AvroJsonConverter.toAvro[OptionalMapWithDefaultItem]("{\\"map\\":{\\"empty\\": {}}}") should equal(OptionalMapWithDefaultItem(Some(Map("empty" -> ListSet()))))
AvroJsonConverter.toAvro[AvroPrmitives]("{}") should equal(AvroPrmitives())
AvroJsonConverter.toAvro[AvroEnums]("{}") should equal(AvroEnums())
}
it should "allow passing unknown json fields" in {
AvroJsonConverter.toAvro[ListSet]("{\\"hello\\": \\"there\\"}")
}
it should "format UUID in as json readable" in {
val u = UUID.fromString("01010101-0202-0202-0303-030304040404")
val a = UuidCompoundKey(Array(65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65), u, 1L)
val j = AvroJsonConverter.toJson(a)
j should include("QUFBQUFBQUFBQUFBQUFBQQ==")
j should include("01010101-0202-0202-0303-030304040404")
val a2 = AvroJsonConverter.toAvro[UuidCompoundKey](j)
a2.uuid2 should equal(a.uuid2)
a.uuid2 should equal(u)
val n = NestedLogicalType(a)
val jn = AvroJsonConverter.toJson(n)
jn should include("QUFBQUFBQUFBQUFBQUFBQQ==")
jn should include("01010101-0202-0202-0303-030304040404")
val n2 = AvroJsonConverter.toAvro[NestedLogicalType](jn)
n2.u.uuid2 should equal(a.uuid2)
n2.u.uuid2 should equal(u)
}
}
|
amient/affinity
|
avro/src/test/scala/io/amient/affinity/avro/AvroJsonConverterSpec.scala
|
Scala
|
apache-2.0
| 5,441
|
// negatives
package foo1 {
object Foo { // companion is trait
def main(args: Array[String]): Unit = ()
}
trait Foo
}
package foo2 {
object Foo { // companion has its own main
def main(args: Array[String]): Unit = ()
}
class Foo {
def main(args: Array[String]): Unit = ()
}
}
// these should all be made to work, but are negatives for now
// because forwarders need more work.
package foo3 {
object Foo { // Companion contains main, but not an interfering main.
def main(args: Array[String]): Unit = ()
}
class Foo {
def main(args: Int): Unit = ()
}
}
package foo4 {
object Foo extends Foo { // Inherits main from the class
}
class Foo {
def main(args: Array[String]): Unit = ()
}
}
package foo5 {
object Foo extends Foo { // Overrides main from the class
override def main(args: Array[String]): Unit = ()
}
class Foo {
def main(args: Array[String]): Unit = ()
}
}
|
yusuke2255/dotty
|
tests/untried/neg/main1.scala
|
Scala
|
bsd-3-clause
| 945
|
package org.openurp.edu.eams.teach.lesson.task.service.genstrategy.impl
import java.sql.Date
import java.text.MessageFormat
import org.beangle.commons.collection.Collections
import org.beangle.commons.lang.Strings
import org.openurp.base.Semester
import org.openurp.edu.base.Adminclass
import org.openurp.edu.base.Course
import org.openurp.edu.base.code.CourseType
import org.openurp.edu.teach.lesson.Lesson
import org.openurp.edu.teach.lesson.TeachClass
import org.openurp.edu.eams.teach.lesson.task.biz.LessonGenPreview
import org.openurp.edu.eams.teach.lesson.task.service.LessonPlanRelationService
import org.openurp.edu.eams.teach.lesson.task.service.TaskGenObserver
import org.openurp.edu.eams.teach.lesson.task.service.genstrategy.AbstractLessonGenStrategy
import org.openurp.edu.teach.plan.PlanCourse
import org.openurp.edu.teach.plan.MajorPlan
import org.openurp.edu.teach.plan.MajorPlanCourse
import org.openurp.edu.teach.lesson.model.LessonBean
import org.openurp.edu.teach.plan.model.MajorCourseGroupBean
import org.openurp.edu.teach.plan.MajorPlan
import org.openurp.base.code.RoomType
import scala.collection.mutable.HashSet
import java.util.ArrayList
import org.openurp.edu.teach.lesson.model.LessonBean
import org.openurp.edu.teach.plan.MajorPlan
import org.openurp.edu.eams.teach.lesson.dao.LessonDao
import org.openurp.edu.eams.teach.lesson.service.LessonLimitService
import org.openurp.edu.eams.core.service.SemesterService
import org.openurp.edu.eams.teach.time.util.TermCalculator
import org.openurp.edu.eams.teach.util.AdminclassQueryBuilder
import org.openurp.edu.eams.teach.program.util.PlanUtils
import org.openurp.edu.eams.teach.lesson.service.LessonLogHelper
import org.openurp.edu.eams.weekstate.WeekStates
import org.openurp.edu.eams.teach.lesson.service.LessonLogBuilder
class ClassicLessonGenStrategy extends AbstractLessonGenStrategy {
private var lessonDao: LessonDao = _
private var lessonLogHelper: LessonLogHelper = _
private var semesterService: SemesterService = _
private var lessonPlanRelationService: LessonPlanRelationService = _
private var lessonLimitService: LessonLimitService = _
protected override def iDo(source: String): Boolean = "MAJOR_PROGRAM" == source.toUpperCase()
protected override def gen(context: Map[String, Any], observer: TaskGenObserver) {
val planIds = context.get("planIds").asInstanceOf[Integer]
val plans = entityDao.find(classOf[MajorPlan], planIds)
val planCount = plans.size
if (null != observer) {
observer.notifyStart(observer.messageOf("info.taskGenInit.start") + "(" + planCount +
")", planCount, null)
}
for (plan <- plans) {
genLessons(plan, observer, context)
}
if (null != observer) {
observer.notifyGenResult(planCount)
observer.notifyFinish()
}
}
protected override def preview(context: Map[String, Any]): AnyRef = {
val planIds = context.get("planIds").asInstanceOf[Integer]
val res = new ArrayList[LessonGenPreview]()
val plans = entityDao.find(classOf[MajorPlan], planIds)
for (plan <- plans) {
res.add(previewLessonGen(plan, context))
}
res
}
private def genLessons(plan: MajorPlan, observer: TaskGenObserver, params: Map[String, Any]) {
val preview = previewLessonGen(plan, params)
val removeGenerated = true == params.get("removeGenerated")
val semester = params.get("semester").asInstanceOf[Semester]
if (removeGenerated) {
observer.outputNotifyRemove(preview.term, plan, "info.plan.removeGenTask", false)
}
try {
lessonDao.saveGenResult(plan, semester, preview.lessons, removeGenerated)
for (lesson <- preview.lessons) {
lessonLogHelper.log(LessonLogBuilder.create(lesson, "生成任务"))
}
} catch {
case e: Exception => {
e.printStackTrace()
observer.outputNotifyRemove(0, plan, "info.plan.failure.removeGenTask", false)
return
}
}
if (null != observer) {
observer.outputNotify(preview.term, preview.lessons.size, plan)
}
}
private def previewLessonGen(plan: MajorPlan, params: Map[String, Any]): LessonGenPreview = {
val semester = params.get("semester").asInstanceOf[Semester]
val omitSmallTerm = true == params.get("omitSmallTerm")
val termCalc = new TermCalculator(semesterService, semester)
var term = -1
term = termCalc.getTerm(plan.program.beginOn, if (plan.program.endOn != null) plan.program.endOn else Date.valueOf("2099-09-09"),
omitSmallTerm)
if (plan.startTerm != null) {
term = term + plan.startTerm - 1
}
val preview = new LessonGenPreview(plan, term)
if (term <= 0) {
preview.error ="还没到该计划生成任务的时候"
return preview
}
val planCourses = getPlanCourses(preview)
if (Strings.isNotEmpty(preview.error)) {
return preview
}
preview.error = filterPlanCourses(planCourses, plan, params)
preview.lessons ++=(makeLessons(plan, planCourses, params))
preview
}
private def getPlanCourses(preview: LessonGenPreview) : Seq[PlanCourse] = {
val planCourses = PlanUtils.getPlanCourses(preview.plan, preview.term)
if (Collections.isEmpty(planCourses)) {
preview.error = (MessageFormat.format("该计划在第{0}学期没有课程", preview.term.asInstanceOf[Object]))
}
planCourses
}
private def filterPlanCourses(planCourses: Seq[PlanCourse], plan: MajorPlan, params: Map[String, Any]): String = {
val semester = params.get("semester").asInstanceOf[Semester]
val adminclasses = entityDao.search(AdminclassQueryBuilder.build(plan))
new MajorPlanCourseFilter(planCourses, params, adminclasses) {
override def shouldRemove(planCourse: MajorPlanCourse): Boolean = {
val allowNoAdminclass = true == params.get("allowNoAdminclass")
val adminclasses = other.asInstanceOf[List[Adminclass]]
if (Collections.isEmpty(adminclasses) && !allowNoAdminclass) {
return true
}
return false
}
}
.filter()
if (Collections.isEmpty(adminclasses) && Collections.isEmpty(planCourses)) {
return "没有行政班无法生成任务"
}
val existCourses = new HashSet[Course]()
for (relation <- lessonPlanRelationService.relations(plan, semester)) {
existCourses.add(relation.getLesson.getCourse)
}
new MajorPlanCourseFilter(planCourses, params, existCourses) {
override def shouldRemove(planCourse: MajorPlanCourse): Boolean = {
val removeGenerated = true == params.get("removeGenerated")
if (removeGenerated) {
return false
}
val courses = other.asInstanceOf[Set[Course]]
for (course <- courses if planCourse.course == course) {
return true
}
return false
}
}
.filter()
if (Collections.isNotEmpty(existCourses) && Collections.isEmpty(planCourses)) {
return "所有课程都已生成过任务"
}
new MajorPlanCourseFilter(planCourses, params) {
override def shouldRemove(planCourse: MajorPlanCourse): Boolean = {
val onlyGenCourseTypes = params.get("onlyGenCourseTypes").asInstanceOf[List[CourseType]]
if (Collections.isNotEmpty(onlyGenCourseTypes) &&
!onlyGenCourseTypes.contains(planCourse.group.courseType)) {
return true
}
return false
}
}
.filter()
new MajorPlanCourseFilter(planCourses, params) {
override def shouldRemove(planCourse: MajorPlanCourse): Boolean = {
val onlyGenCourses = params.get("onlyGenCourses").asInstanceOf[List[Course]]
if (Collections.isNotEmpty(onlyGenCourses) && !onlyGenCourses.contains(planCourse.course)) {
return true
}
return false
}
}
.filter()
new MajorPlanCourseFilter(planCourses, params) {
override def shouldRemove(planCourse: MajorPlanCourse): Boolean = {
val dontGenCourses = params.get("dontGenCourses").asInstanceOf[List[Course]]
if (Collections.isNotEmpty(dontGenCourses) && dontGenCourses.contains(planCourse.course)) {
return true
}
return false
}
}
.filter()
null
}
private def makeLessons(plan: MajorPlan, planCourses: Seq[PlanCourse], params: Map[String, Any]): collection.mutable.Buffer[Lesson] = {
val res = Collections.newBuffer[Lesson]
if (Collections.isEmpty(planCourses)) {
res
}
val adminclasses = entityDao.search(AdminclassQueryBuilder.build(plan))
if (Collections.isNotEmpty(adminclasses)) {
for (adminclass <- adminclasses) {
val lessons = Collections.newBuffer[Lesson]
for (planCourse <- planCourses) {
val lesson = makeNewLesson(planCourse, plan, adminclass, params)
lessons+=lesson
}
res ++= lessons
}
} else {
val lessons = Collections.newBuffer[Lesson]
for (planCourse <- planCourses) {
val lesson = makeNewLesson(planCourse, plan, null, params)
lessons += lesson
}
res ++= lessons
}
res
}
private def makeNewLesson(planCourse: PlanCourse,
plan: MajorPlan,
adminClass: Adminclass,
params: Map[String, Any]): Lesson = {
val semester = params.get("semester").asInstanceOf[Semester]
val startWeek = params.get("startWeek").asInstanceOf[java.lang.Integer]
val weeks = params.get("weeks").asInstanceOf[java.lang.Integer]
val roomType = params.get("roomType").asInstanceOf[RoomType]
val lesson = new LessonBean
lesson.project = plan.program.major.project
lesson.teachDepart = planCourse.department
lesson.course = planCourse.course
lesson.courseType = planCourse.group.courseType
lesson.semester = semester
// lesson.examMode = planCourse.course.examMode
val courseSchedule = lesson.schedule
var endWeek = startWeek
val course = planCourse.course
endWeek = if (course.weeks != null && course.weeks > 0) startWeek + course.weeks - 1 else if (course.weekHour != 0) startWeek + (course.period / course.weekHour).toInt -
1 else startWeek + weeks - 1
courseSchedule.weekState = WeekStates.build(startWeek + "-" + endWeek)
courseSchedule.roomType = roomType
val teachClass = lesson.teachClass
teachClass.grade = plan.program.grade
teachClass.depart = plan.program.department
val builder = lessonLimitService.builder(teachClass)
if (null != adminClass) {
if (adminClass.stdCount == 0) {
teachClass.limitCount = adminClass.planCount
} else {
teachClass.limitCount = adminClass.stdCount
}
builder.in(adminClass)
} else {
builder.inGrades(plan.program.grade)
builder.in(plan.program.education)
if (plan.program.stdType != null) {
builder.in(plan.program.stdType)
}
builder.in(plan.program.department)
builder.in(plan.program.major)
if (plan.program.direction != null) {
builder.in(plan.program.direction)
}
if (planCourse.group.isInstanceOf[MajorCourseGroupBean]) {
if (planCourse.group.asInstanceOf[MajorCourseGroupBean]
.direction !=
null) {
builder.in(planCourse.group.asInstanceOf[MajorCourseGroupBean]
.direction)
}
}
builder.in(plan.program)
}
teachClassNameStrategy.autoName(teachClass)
lesson.updatedAt = new Date(System.currentTimeMillis())
lesson
}
}
abstract class MajorPlanCourseFilter(private var planCourses: Seq[PlanCourse], protected var params: Map[String, Any]){
protected var other: AnyRef = _
def this(planCourses: Seq[MajorPlanCourse], params: Map[String, Any], other: AnyRef) {
this()
this.planCourses = planCourses
this.params = params
this.other = other
}
def filter() {
val removeIndecies = Collections.newBuffer[Integer](20)
for (i <- 0 until planCourses.size if shouldRemove(planCourses(i))) {
removeIndecies.add (0, i)
}
for (i <- removeIndecies) {
planCourses.remove(i.intValue())
}
}
def shouldRemove(planCourse: PlanCourse): Boolean
}
|
openurp/edu-eams-webapp
|
schedule/src/main/scala/org/openurp/edu/eams/teach/lesson/task/service/genstrategy/impl/ClassicLessonGenStrategy.scala
|
Scala
|
gpl-3.0
| 12,177
|
package io.getquill.context.jasync.postgres
import scala.concurrent.ExecutionContext.Implicits.{ global => ec }
import io.getquill.context.sql.DepartmentsSpec
class DepartmentsPostgresAsyncSpec extends DepartmentsSpec {
val context = testContext
import testContext._
override def beforeAll =
await {
testContext.transaction { implicit ec =>
for {
_ <- testContext.run(query[Department].delete)
_ <- testContext.run(query[Employee].delete)
_ <- testContext.run(query[Task].delete)
_ <- testContext.run(liftQuery(departmentEntries).foreach(e => departmentInsert(e)))
_ <- testContext.run(liftQuery(employeeEntries).foreach(e => employeeInsert(e)))
_ <- testContext.run(liftQuery(taskEntries).foreach(e => taskInsert(e)))
} yield {}
}
}
"Example 8 - nested naive" in {
await(testContext.run(`Example 8 expertise naive`(lift(`Example 8 param`)))) mustEqual `Example 8 expected result`
}
"Example 9 - nested db" in {
await(testContext.run(`Example 9 expertise`(lift(`Example 9 param`)))) mustEqual `Example 9 expected result`
}
"performIO" in {
await(performIO(runIO(query[Task]).transactional))
}
}
|
getquill/quill
|
quill-jasync-postgres/src/test/scala/io/getquill/context/jasync/postgres/DepartmentsPostgresAsyncSpec.scala
|
Scala
|
apache-2.0
| 1,230
|
package com.sksamuel.avro4s.record.decoder
import com.sksamuel.avro4s.{AvroSchema, AvroTransient, Decoder, DefaultNamingStrategy}
import org.apache.avro.generic.GenericData
import org.apache.avro.util.Utf8
import org.scalatest.{FunSuite, Matchers}
class TransientDecoderTest extends FunSuite with Matchers {
case class TransientFoo(a: String, @AvroTransient b: Option[String])
test("decoder should populate transient fields with None") {
val schema = AvroSchema[TransientFoo]
val record = new GenericData.Record(schema)
record.put("a", new Utf8("hello"))
Decoder[TransientFoo].decode(record, schema, DefaultNamingStrategy) shouldBe TransientFoo("hello", None)
}
}
|
51zero/avro4s
|
avro4s-core/src/test/scala/com/sksamuel/avro4s/record/decoder/TransientDecoderTest.scala
|
Scala
|
mit
| 690
|
package com.databricks.spark.sql.perf.mllib.clustering
import org.apache.spark.ml
import org.apache.spark.ml.Estimator
import org.apache.spark.sql._
import com.databricks.spark.sql.perf.mllib.OptionImplicits._
import com.databricks.spark.sql.perf.mllib.data.DataGenerator
import com.databricks.spark.sql.perf.mllib.{BenchmarkAlgorithm, MLBenchContext, TestFromTraining}
object KMeans extends BenchmarkAlgorithm with TestFromTraining {
override def trainingDataSet(ctx: MLBenchContext): DataFrame = {
import ctx.params._
DataGenerator.generateGaussianMixtureData(ctx.sqlContext, k, numExamples, ctx.seed(),
numPartitions, numFeatures)
}
override def getEstimator(ctx: MLBenchContext): Estimator[_] = {
import ctx.params._
new ml.clustering.KMeans()
.setK(k)
.setSeed(randomSeed.toLong)
.setMaxIter(maxIter)
}
// TODO(?) add a scoring method here.
}
|
josiahsams/spark-sql-perf-spark2.0.0
|
src/main/scala/com/databricks/spark/sql/perf/mllib/clustering/KMeans.scala
|
Scala
|
apache-2.0
| 905
|
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** @author John Miller, Robert Davis
* @version 1.3
* @date Sun Sep 16 14:09:25 EDT 2012
* @see LICENSE (MIT style license file).
*
* This file contains classes for Hessenburg reductions, finding Eigenvalues
* and computing Eigenvectors.
* Need to add ability to work with `SparseMatrixD`
*/
package scalation.linalgebra
import scala.math.{abs, signum, sqrt}
import scala.util.control.Breaks.{breakable, break}
import scalation.linalgebra.Householder.house
import scalation.linalgebra.MatrixD.{eye, outer}
import scalation.math.double_exp
import scalation.math.ExtremeD.TOL
import scalation.util.Error
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `Eigen` trait defines constants used by classes and objects in the group.
*/
trait Eigen
{
/** Debug flag
*/
protected val DEBUG = true
} // Eigen trait
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `Hessenburg` class is used to reduce, via similarity transformations, an
* 'n' by 'n' matrix 'a' to Hessenburg form 'h', where all elements two below the
* main diagonal are zero (or close to zero). Note, similarity transformations
* do not changes the eigenvalues.
* @param a the matrix to reduce to Hessenburg form
*/
class Hessenburg (a: MatrixD)
extends Eigen with Error
{
private val (m, n) = (a.dim1, a.dim2) // size of matrix
private var h = new MatrixD (a) // Hessenburg h matrix
if (m != n) flaw ("constructor", "must have m == n")
for (j <- 0 until n) { // for each column j
val x = h.col(j, j) // jth column from jth position
val u = x + x.oneAt (0) * x.norm * (if (x(0) < 0.0) -1.0 else 1.0)
val pp = eye (n-j) - outer (u, u) * (2.0 / u.normSq)
val p = eye (j) diag pp
h = p.t * h * p
} // for
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Get the Hessenburg h matrix.
*/
def getH: MatrixD = h
} // Hessenburg class
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `Eigenvalue` class is used to find the eigenvalues of an 'n' by 'n' matrix
* 'a' using an iterative technique that applies similarity transformations to
* convert 'a' into an upper triangular matrix, so that the eigenvalues appear
* along the diagonal. To improve performance, the 'a' matrix is first reduced
* to Hessenburg form. During the iterative steps, a shifted 'QR' decomposition
* is performed.
* Caveats: (1) it will not handle eigenvalues that are complex numbers,
* (2) it uses a simple shifting strategy that may slow convergence.
* @param a the matrix whose eigenvalues are sought
*/
class Eigenvalue (a: MatrixD)
extends Eigen with Error
{
private val ITERATIONS = 12 // max iterations: increase --> more precision, but slower
private val (m, n) = (a.dim1, a.dim2) // size of matrix
private val e = new VectorD (m) // vector of eigenvalues
if (m != n) flaw ("constructor", "must have m == n")
var g = (new Hessenburg (a)).getH // convert g matrix to Hessenburg form
var converging = true // still converging, has not converged yet
var lastE = Double.PositiveInfinity // save an eigenvalue from last iteration
for (k <- 0 until ITERATIONS if converging) { // major iterations
converging = true
for (l <- 0 until ITERATIONS) { // minor iterations
val s = g(n - 1, n - 1) // the shift parameter
val eye_g = eye (g.dim1)
val (qq, rr) = (new Fac_QR_H (g - eye_g * s)).factor12 ()
g = rr.asInstanceOf [MatrixD] * qq.asInstanceOf [MatrixD] + eye_g * s // FIX
} // for
for (i <- 0 until n) e(i) = g(i, i) // extract eigenvalues from diagonal
val e0 = e(0) // consider one eigenvalue
if (abs ((lastE - e0) / e0) < TOL) { // relative error
converging = false // end major iterations
} else {
lastE = e0 // save this eigenvalue
} // if
if (DEBUG) {
println ("-------------------------------------------")
println ("Eigenvalue: on iteration " + k + ": g = " + g)
println ("Eigenvalue: on iteration " + k + ": e = " + e)
if (! converging) println ("Eigenvalue: converged!")
} // if
} // for
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Reorder the eigenvalue vector 'e' in non-increasing order.
*/
def reorder () { e.sort2 () }
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Get the eigenvalue 'e' vector.
* @param order whether to order the eigenvalues in non-increasing order
*/
def getE (order: Boolean = true): VectorD = { if (order) reorder() ; e }
} // Eigenvalue class
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `HouseholderT` class performs a Householder Tridiagonalization on a
* symmetric matrix.
* @see Algorithm 8.3.1 in Matrix Computations.
* @param a the symmetric matrix to tridiagonalize
*/
class HouseholderT (a: MatrixD)
extends Eigen with Error
{
/** The Householder tridiagonal matrix
*/
private val t = new SymTriMatrixD (a.dim1)
if (a.dim1 != a.dim2) flaw ("constructor", "must have m == n")
if (! a.isSymmetric) flaw ("constructor", "matrix a must be symmetric")
val n = a.dim1 - 1 // the last index
for (k <- 0 to n - 2) {
val ts = a.col(k).slice (k+1, n+1)
val v_b = house (ts)
val v = v_b._1; val b = v_b._2
val p = a.slice (k+1, n+1, k+1, n+1) * v * b
val w = p - v * ((b / 2) * (p dot v))
t(k, k) = a(k, k)
t(k+1, k) = ts.norm
for (i <- k + 1 to n; j <- k + 1 to n) {
a(i, j) = a(i, j) - (v(i - (k+1)) * w(j - (k+1)) +
w(i - (k+1)) * v(j - (k+1)))
} // for
} // for
t(n-1, n) = a(n-1, n)
t(n-1, n-1) = a(n-1, n-1)
t(n, n) = a(n, n)
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Get the Householder Tridiagonal matrix 't'.
*/
def getT: SymTriMatrixD = t
} // HouseholderT class
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `SymmetricQRstep` object performs a symmetric 'QR' step with a Wilkinson shift.
* @see Algorithm 8.3.2 in Matrix Computations.
* @see http://people.inf.ethz.ch/arbenz/ewp/Lnotes/chapter3.pdf (Algorithm 3.6)
*/
object SymmetricQRstep
extends Eigen with Error
{
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Apply a 'QR' reduction step to matrix 't'.
* @param t the unreduced symmetric tridiagonal matrix
* @param p the row index
* @param q the column index
*/
def qRStep (t: SymTriMatrixD, p: Int, q: Int) =
{
val n = t.dg.dim - q - 1 // the last index
val d = (t.dg(n-1) - t.dg(n)) / 2.0 // Wilkinson shift
val t2 = t.sd(n-1) * t.sd(n-1)
val d2 = t.dg(n) - t2 / (d + signum (d) * sqrt (d * d + t2))
var g = t.dg(0) - d2
var s = 1.0
var c = 1.0
var phi = 0.0
for (k <- p until n) {
var f = s * (t.sd(k))
var b = c * (t.sd(k))
var r = sqrt (g * g + f * f)
c = g / r
s = f / r
if (k != 0) t.sd(k-1) = r
g = t.dg(k) - phi
r = (t.dg(k+1) - g) * s + 2.0 * c * b
phi = s * r
t.dg(k) = g + phi
g = c * r - b
} // for
t.dg(n) = t.dg(n) - phi
t.sd(n-1) = g
} // qRStep
} // SymmetricQRstep object
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `EigenvalueSym` class is used to find the eigenvalues of an 'n' by 'n'
* symmetric matrix 'a' using an iterative technique, the Symmetric 'QR' Algorithm.
* @see Algorithm 8.3.3 in Matrix Computations.
* Caveats: (1) it will not handle eigenvalues that are complex numbers,
* (2) it uses a simple shifting strategy that may slow convergence.
* @param a the symmetric matrix whose eigenvalues are sought
*/
class EigenvalueSym (a: MatrixD)
extends Eigen with Error
{
/** The matrix containing a vector of eigenvalues
*/
private var d: SymTriMatrixD = null
val m = a.dim1 // number of rows
if (m != a.dim2) flaw ("constructor", "must have m == n")
if (! a.isSymmetric) flaw ("constructor", "matrix a must be symmetric")
var p = 0 // the row index
var q = 0 // the column index
d = (new HouseholderT (a)).getT // make symmetric tridiagonal matrix
while (q < m) {
for (i <- 0 to m-2 if abs (d(i, i+1)) <= TOL) d(i, i+1) = 0.0 // clean d
q = 0; p = m-1
while (p > 0 && d(p, p-1) =~ 0.0 && q < m) { q += 1; p -= 1 }
while (p > 0 && ! (d(p, p-1) =~ 0.0)) p -= 1
if (q < m) SymmetricQRstep.qRStep (d, p, q)
} // while
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Get the eigenvalue 'e' vector.
*/
def getE: VectorD = d.dg // the diagonal of the tridiagonal matrix
} // EigenvalueSym
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `Eigenvector` class is used to find the eigenvectors of an 'n' by 'n' matrix
* 'a' by solving equations of the form
* <p>
* (a - eI)v = 0
* <p>
* where 'e' is the eigenvalue and 'v' is the eigenvector. Place the eigenvectors
* in a matrix column-wise.
* @param a the matrix whose eigenvectors are sought
* @param _e the vector of eigenvalues of matrix a
*/
class Eigenvector (a: MatrixD, _e: VectorD = null)
extends Eigen with Error
{
private val ITERATIONS = 12 // max iterations
private val m = a.dim1 // number of rows
if (a.dim2 != m) flaw ("constructor", "must have m == n")
private val v = new MatrixD (m, m) // eigenvectors matrix (each row)
private val ident = eye (m) // identity matrix
private val e = if (_e == null) (new Eigenvalue (a)).getE () else _e
// find eigenvectors using nullspace calculation
for (i <- 0 until m) { // compute eigenvector for i-th eigenvalue
val a_Ie = (a - ident * e(i)) // a - Ie
val c_a_Ie = a_Ie.clean (TOL)
if (DEBUG) println (s"a_Ie = $a_Ie \\nc_a_Ie = $c_a_Ie")
val qr = new Fac_QR_H (c_a_Ie)
qr.factor ()
val eVec = qr.nullspaceV (e.zero (m))
println ("+++ eigenvector for eigenvalue " + e(i) + " = " + eVec)
val mat = a_Ie.slice (1, m)
if (DEBUG) println ("mat = " + mat)
val eVec2 = mat.nullspace
println ("--- eigenvector for eigenvalue " + e(i) + " = " + eVec2)
// v.setCol (i, eVec)
v.setCol (i, eVec2)
} // for
// find eigenvectors using inverse iteration (also improves eigenvalues)
// @see http://home.iitk.ac.in/~dasgupta/MathBook/lmastertrans.pdf (p. 130)
// var y_k = new VectorD (m); y_k.set (1./m.toDouble) // old estimate of eigenvector
// var y_l: VectorD = null // new estimate of eigenvector
//
// for (i <- 0 until m) { // compute eigenvector for i-th eigenvalue
// breakable { for (k <- 0 until ITERATIONS) {
// val a_Ie = a - ident * e(i) // form matrix: [a - Ie]
// f (DEBUG) println ("a_Ie = " + a_Ie)
// val qr = new Fac_QR_H (a_Ie)
// qr.factor ()
// val y = qr.solve (y_k) // solve [a - Ie]y = y_k
// y_l = y / y.norm // normalize
// e(i) += 1.0 / (y_k dot y) // improve the eigenvalue
// if ((y_l - y_k).norm < TOL) break
// y_k = y_l // update the eigenvector
// }} // for
// println ("eigenvector for eigenvalue " + e(i) + " = " + y_l)
// v.setCol (i, y_l)
// } // for
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Get the eigenvector 'v' matrix.
*/
def getV: MatrixD = v
} // Eigenvector class
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `EigenTest` object is used to test the all the classes used in computing
* Eigenvalues and Eigenvectors for the non-symmetric/general case.
* > run-main scalation.linalgebra.EigenTest
*/
object EigenTest extends App
{
import scalation.util.Banner.banner
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** For matrix a, find Hessenburg matrix, eigenvalues and eigenvectors.
*/
def test (a: MatrixD, name: String)
{
banner (name)
val e = (new Eigenvalue (a)).getE ()
val v = (new Eigenvector (a, e)).getV
println ("----------------------------------------------------------")
println ("a = " + a)
println ("e = " + e)
println ("v = " + v)
for (i <- 0 until v.dim1) { // check that a * v_i = e_i * v_i
println ("a * v_i - v_i * e_i = " + (a * v.col(i) - v.col(i) * e(i)))
} // for
} // test
// @see http://www.mathworks.com/help/symbolic/eigenvalue-trajectories.html
// should give e = (3., 2., 1.)
val b = new MatrixD ((3, 3), -149.0, -50.0, -154.0, // 3-by-3 matrix
537.0, 180.0, 546.0,
-27.0, -9.0, -25.0)
test (b, "matrix b")
// @see http://www.math.hmc.edu/calculus/tutorials/eigenstuff/eigenstuff.pdf
// should give e = (1., -3., -3.)
val c = new MatrixD ((3, 3), 5.0, 8.0, 16.0, // 3-by-3 matrix
4.0, 1.0, 8.0,
-4.0, -4.0, -11.0)
test (c, "matrix c")
} // EigenTest object
|
NBKlepp/fda
|
scalation_1.3/scalation_mathstat/src/main/scala/scalation/linalgebra/Eigen.scala
|
Scala
|
mit
| 15,006
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.plans.logical.{LogicalPlan, Project}
import org.apache.spark.sql.execution.{LeafExecNode, SparkPlan}
import org.apache.spark.sql.test.SharedSparkSession
case class FastOperator(output: Seq[Attribute]) extends LeafExecNode {
override protected def doExecute(): RDD[InternalRow] = {
val str = Literal("so fast").value
val row = new GenericInternalRow(Array[Any](str))
val unsafeProj = UnsafeProjection.create(schema)
val unsafeRow = unsafeProj(row).copy()
sparkContext.parallelize(Seq(unsafeRow))
}
override def producedAttributes: AttributeSet = outputSet
}
object TestStrategy extends Strategy {
def apply(plan: LogicalPlan): Seq[SparkPlan] = plan match {
case Project(Seq(attr), _) if attr.name == "a" =>
FastOperator(attr.toAttribute :: Nil) :: Nil
case _ => Nil
}
}
class ExtraStrategiesSuite extends QueryTest with SharedSparkSession {
import testImplicits._
test("insert an extraStrategy") {
try {
spark.experimental.extraStrategies = TestStrategy :: Nil
val df = sparkContext.parallelize(Seq(("so slow", 1))).toDF("a", "b")
checkAnswer(
df.select("a"),
Row("so fast"))
checkAnswer(
df.select("a", "b"),
Row("so slow", 1))
} finally {
spark.experimental.extraStrategies = Nil
}
}
}
|
maropu/spark
|
sql/core/src/test/scala/org/apache/spark/sql/ExtraStrategiesSuite.scala
|
Scala
|
apache-2.0
| 2,320
|
import sbt._
import Keys._
object JniKeys {
val javahName = settingKey[String](
"The name of the javah command for generating JNI headers")
val javahPath = settingKey[String](
"The path to the javah executable")
val jniClasses = settingKey[Seq[String]](
"Fully qualified names of classes with native methods for which " +
"JNI headers are to be generated")
val javah = taskKey[Seq[File]](
"Produce C headers from Java classes with native methods")
}
object JniBuild {
import JniKeys._
import NativeKeys._
private val jdkHome = file(System.getProperty("java.home")) / ".."
private val jdkInclude = jdkHome / "include"
private val jdkOsInclude = jdkInclude / System.getProperty("os.name").toLowerCase
lazy val jniSettings = Seq(
javahName := "javah",
javahPath <<= (javaHome, javahName) apply { (home, name) =>
home map ( h => (h / "bin" / name).absolutePath ) getOrElse name
},
jniClasses := Seq.empty,
cIncludes ++= Seq(jdkInclude.toString, jdkOsInclude.toString),
libraryDependencies += "org.scala-lang" % "scala-reflect" % scalaVersion.value,
javah in Compile := {
val log = streams.value.log
val classPath =
(internalDependencyClasspath in Compile).value.map(_.data) ++
(externalDependencyClasspath in Compile).value.map(_.data) ++
Seq((classDirectory in Compile).value.toString)
val javahCommandLine = Seq(
javahPath.value,
"-d", (resourceManaged in Compile).value.toString,
"-cp", classPath.mkString(":")
) ++ jniClasses.value
log.info(javahCommandLine mkString " ")
val exitCode = Process(javahCommandLine) ! log
if (exitCode != 0) {
sys.error("javah exited with " + exitCode)
}
jniClasses.value map { s =>
file(((resourceManaged in Compile).value / (s.replace(".", "_") + ".h")).toString)
}
}
)
}
|
edinburgh-rbm/expokit
|
project/Jni.scala
|
Scala
|
gpl-3.0
| 1,917
|
package org.retistruen.jmx
import java.util.Hashtable
import javax.management.ObjectName.{ getInstance ⇒ ObjectName }
import javax.management.{ Attribute, AttributeList, DynamicMBean, MBeanAttributeInfo, MBeanInfo, StandardMBean }
import org.retistruen.building.BuildingInfrastructure
import org.retistruen.{ Named, Pollable, Source }
import scala.collection.JavaConversions._
import scala.math.ScalaNumericConversions
import org.retistruen.OpenSource
trait SourceMBean[T] {
protected val source: OpenSource[T]
def getName = source.name
def emit(value: String) = source << value
}
class SourceObject[T](protected val source: OpenSource[T]) extends SourceMBean[T]
trait JMX extends Named {
this: BuildingInfrastructure ⇒
object MBean extends DynamicMBean {
def attributes: Array[MBeanAttributeInfo] =
select[Pollable[_]].toArray.map(pollable ⇒ new MBeanAttributeInfo(pollable.name, classOf[java.lang.Double].getName, null, true, false, false))
def getMBeanInfo: MBeanInfo =
new MBeanInfo(getClass.getName, name, attributes, null, null, null)
def invoke(method: String, arguments: Array[AnyRef], signature: Array[String]): AnyRef =
null
def getAttribute(name: String): Attribute =
attribute(select[Pollable[_]].find(pollable ⇒ pollable.name == name).get)
def setAttribute(attribute: Attribute): Unit =
Unit
def getAttributes(names: Array[String]): AttributeList =
new AttributeList(names.map(getAttribute(_)).toList)
def setAttributes(attributes: AttributeList): AttributeList =
null
private def attribute(pollable: Pollable[_]) =
new Attribute(pollable.name, pollable.poll.getOrElse(null))
}
private def table(pairs: List[(String, String)]) = {
val ht = new Hashtable[String, String]
for ((a, b) ← pairs.reverse) ht.put(a, b)
ht
}
val jmxRegistrationDomain = "org.retistruen"
private def on(key: (String, String)*) =
ObjectName(jmxRegistrationDomain, table(("model" → name) :: key.toList))
def registerMBeans = {
val server = bestMBeanServer("jboss")
server.registerMBean(MBean,
on("type" → "model", "name" → name))
for (source ← select[OpenSource[_]])
server.registerMBean(new StandardMBean(new SourceObject(source), classOf[SourceMBean[_]]),
on("type" → "source", "name" → source.name))
}
def unregisterMBeans = {
val server = bestMBeanServer("jboss")
server.unregisterMBean(on("type" → "model", "name" → name))
for (source ← select[OpenSource[_]])
server.unregisterMBean(on("type" → "source", "name" → source.name))
}
}
|
plalloni/retistruen
|
src/main/scala/org/retistruen/jmx/JMX.scala
|
Scala
|
mit
| 2,649
|
package chandu0101.scalajs.react.components.materialui
import chandu0101.macros.tojs.JSMacro
import japgolly.scalajs.react._
import materialui.Mui
import scala.scalajs.js
case class MuiAvatar(backgroundColor: js.UndefOr[String] = js.undefined,
style: js.UndefOr[js.Any] = js.undefined,
icon: js.UndefOr[ReactElement] = js.undefined,
ref: js.UndefOr[String] = js.undefined,
color: js.UndefOr[String] = js.undefined,
key: js.UndefOr[String] = js.undefined,
src: js.UndefOr[String] = js.undefined) {
def apply() = {
val props = JSMacro[MuiAvatar](this)
val f = React.asInstanceOf[js.Dynamic].createFactory(Mui.Avatar)
f(props).asInstanceOf[ReactComponentU_]
}
}
|
mproch/scalajs-react-components
|
core/src/main/scala/chandu0101/scalajs/react/components/materialui/MuiAvatar.scala
|
Scala
|
apache-2.0
| 752
|
package org.dsa.core.prepare
import java.io.{File, PrintWriter}
import org.apache.spark.{SparkConf, SparkContext}
import scala.io.Source
/**
* Created by xubo on 2016/11/29.
* find sequnce from ref database
*/
object FindLocal {
var intervalVale = 0
def main(args: Array[String]) {
var time = 0
if (args == null || args.length < 2) {
throw new Exception("input should include: file and output")
} else if (args.length < 3) {
time = 100
} else {
time = args(2).toInt
}
intervalVale = 50
val arr = (100 to 42965 by intervalVale).toArray
// val arr = Array(32771)
findSaveByLength(args(0), args(1), arr, time)
}
def printArr(arr: Array[String]): Unit = {
for (i <- 0 until arr.length) {
println(arr(i))
}
}
def findSaveByLength(file: String, output: String, arr: Array[Int], time: Int): Unit = {
for (i <- 0 until arr.length) {
val result = findSequnceByLength(file, arr(i))
if (result != null) {
val str = result.split(",")
val strLength = str(1).length + 1000000
val refOutPath = output + "/RL" + strLength + str(0) + ".file"
val queryOutPath = output + "/QL" + strLength + str(0) + ".file"
val file = new File(output)
if (!file.exists()) {
// file.mkdir()
file.mkdirs()
}
val outRef = new PrintWriter(refOutPath)
for (j <- 0 until time) {
outRef.println(result)
}
outRef.close()
val outQuery = new PrintWriter(queryOutPath)
outQuery.println(result)
outQuery.close()
}
}
}
/**
* find sequnce by length in local mode
*
* @param file input file
* @param length length
* @return sequnce array
*/
def findSequnceByLength(file: String, length: Int, interval: Int = intervalVale): String = {
val sources = Source.fromFile(file)
val iter = sources.getLines()
var flag = true
var min = Integer.MAX_VALUE
var minStr = ""
while (iter.hasNext && flag) {
val str = iter.next()
val arr = str.split(",")
if (arr.length == 2 && arr(1).length == length) {
sources.close()
return str
}
if (arr(1).length > length && arr(1).length < min) {
minStr = str.toString
min = arr(1).length
}
}
sources.close()
if (min - length < interval) {
return minStr
}
null
}
def findSequnceByName(file: String, name: String): String = {
val sources = Source.fromFile(file)
val iter = sources.getLines()
var flag = true
var min = Integer.MAX_VALUE
var minStr = ""
while (iter.hasNext && flag) {
val str = iter.next()
val arr = str.split(",")
if (arr.length == 2 && arr(0).equals(name)) {
sources.close()
return str
}
}
sources.close()
println("null")
null
}
}
|
xubo245/CloudSW
|
src/main/scala/org/dsa/core/prepare/FindLocal.scala
|
Scala
|
gpl-2.0
| 2,941
|
/*
* Copyright 2011-2013 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.vertx.scala.core
import org.vertx.java.core.{ MultiMap => JMultiMap }
import scala.collection.mutable
import org.vertx.java.core.http.CaseInsensitiveMultiMap
package object http {
type HttpVersion = org.vertx.java.core.http.HttpVersion
type WebSocketVersion = org.vertx.java.core.http.WebSocketVersion
import scala.language.implicitConversions
/**
* Implicit conversion for [[org.vertx.java.core.MultiMap]] to [[scala.collection.mutable.MultiMap]].
*/
implicit def multiMapToScalaMultiMap(n: JMultiMap): mutable.MultiMap[String, String] = {
new JMultiMapWrapper(n)
}
/**
* Implicit conversion for [[scala.collection.mutable.MultiMap]] to [[org.vertx.java.core.MultiMap]].
*/
implicit def scalaMultiMapToMultiMap(n: mutable.MultiMap[String, String]): JMultiMap = {
val jmultiMap = new CaseInsensitiveMultiMap
n.foreach { entry => jmultiMap.put(entry._1, entry._2)}
jmultiMap
}
private class JMultiMapWrapper(val underlying: JMultiMap) extends mutable.MultiMap[String, String] {
override def addBinding(key: String, value: String): this.type = {
underlying.add(key, value)
this
}
override def removeBinding(key: String, value: String): this.type = {
val it = underlying.iterator()
while (it.hasNext) {
val next = it.next()
if (next.getKey.equalsIgnoreCase(key) && next.getValue == value)
it.remove()
}
this
}
override def entryExists(key: String, p: (String) => Boolean): Boolean = {
val it = underlying.iterator()
while (it.hasNext) {
val next = it.next()
if (next.getKey.equalsIgnoreCase(key) && p(next.getValue))
return true
}
false
}
override def iterator: Iterator[(String, mutable.Set[String])] = {
val mm = new mutable.HashMap[String, mutable.Set[String]] with MultiMap
val it = underlying.iterator()
while (it.hasNext) {
val next = it.next()
mm.addBinding(next.getKey, next.getValue)
}
mm.iterator
}
override def get(key: String): Option[mutable.Set[String]] = {
val set = mutable.HashSet[String]()
val it = underlying.iterator()
while (it.hasNext) {
val next = it.next()
if (next.getKey.equalsIgnoreCase(key))
set.add(next.getValue)
}
if (seq.isEmpty) None else Some(set)
}
override def -=(key: String): this.type = {
underlying.remove(key)
this
}
override def +=(kv: (String, mutable.Set[String])): this.type = {
kv._2.foreach { v =>
underlying.add(kv._1, v)
}
this
}
}
}
|
vert-x/mod-lang-scala
|
src/main/scala/org/vertx/scala/core/http/package.scala
|
Scala
|
apache-2.0
| 3,286
|
package com.softwaremill.codebrag.usecases.user
import org.scalatest.matchers.ShouldMatchers
import org.scalatest.mock.MockitoSugar
import org.scalatest.{BeforeAndAfter, FlatSpec}
import com.softwaremill.codebrag.service.user.RegisterService
import com.softwaremill.codebrag.dao.user.UserDAO
import org.mockito.Mockito._
import com.softwaremill.codebrag.service.invitations.InvitationService
import com.softwaremill.scalaval.Validation
class RegisterNewUserUseCaseSpec extends FlatSpec with ShouldMatchers with MockitoSugar with BeforeAndAfter {
val validator = mock[UserRegistrationValidator]
val registerService = mock[RegisterService]
val useCase: RegisterNewUserUseCase = new RegisterNewUserUseCase(registerService, validator)
val noValidationErrors = Validation.Result(errors = Map.empty)
before {
reset(registerService, validator)
}
it should "allow first user to be registered" in {
// given
val form = RegistrationForm("john", "john@codebrag.com", "secret", "123456")
when(registerService.isFirstRegistration).thenReturn(true)
when(validator.validateRegistration(form, firstRegistration = true)).thenReturn(noValidationErrors)
// when
useCase.execute(form)
// then
verify(registerService).registerUser(form.toUser.makeAdmin)
}
it should "allow new user to be registered when validation passes" in {
// given
val form = RegistrationForm("john", "john@codebrag.com", "secret", "123456")
when(registerService.isFirstRegistration).thenReturn(false)
when(validator.validateRegistration(form, firstRegistration = false)).thenReturn(noValidationErrors)
// when
useCase.execute(form)
// then
verify(registerService).registerUser(form.toUser)
}
it should "not allow new user to be registered when validation fails" in {
// given
val form = RegistrationForm("john", "john@codebrag.com", "secret", "123456")
when(registerService.isFirstRegistration).thenReturn(false)
val errors = Map("userName" -> Seq("User already exists"))
when(validator.validateRegistration(form, firstRegistration = false)).thenReturn(Validation.Result(errors))
// when
val Left(result) = useCase.execute(form)
// then
verify(registerService, times(0)).registerUser(form.toUser)
result should be(errors)
}
}
|
softwaremill/codebrag
|
codebrag-service/src/test/scala/com/softwaremill/codebrag/usecases/user/RegisterNewUserUseCaseSpec.scala
|
Scala
|
agpl-3.0
| 2,327
|
package com.wavesplatform.state.diffs.ci.sync
import com.wavesplatform.account.Address
import com.wavesplatform.db.WithDomain
import com.wavesplatform.db.WithState.AddrWithBalance
import com.wavesplatform.features.BlockchainFeatures._
import com.wavesplatform.lang.directives.values.V5
import com.wavesplatform.lang.script.Script
import com.wavesplatform.lang.v1.compiler.TestCompiler
import com.wavesplatform.settings.TestFunctionalitySettings
import com.wavesplatform.test._
import com.wavesplatform.transaction.Asset.IssuedAsset
import com.wavesplatform.transaction.{Asset, TxHelpers}
class SyncDAppNegativeTransferTest extends PropSpec with WithDomain {
private def sigVerify(c: Boolean) =
s""" strict c = ${if (c) (1 to 5).map(_ => "sigVerify(base58'', base58'', base58'')").mkString(" || ") else "true"} """
private def dApp1Script(dApp2: Address, bigComplexity: Boolean): Script =
TestCompiler(V5).compileContract(
s"""
| @Callable(i)
| func default() = {
| ${sigVerify(bigComplexity)}
| strict r = Address(base58'$dApp2').invoke("default", [], [])
| []
| }
""".stripMargin
)
private def dApp2Script(asset: Asset, bigComplexity: Boolean): Script =
TestCompiler(V5).compileContract(
s"""
| @Callable(i)
| func default() = {
| ${sigVerify(bigComplexity)}
| [
| ScriptTransfer(i.caller, -1, base58'$asset')
| ]
| }
""".stripMargin
)
private val settings =
TestFunctionalitySettings
.withFeatures(BlockV5, SynchronousCalls)
.copy(syncDAppCheckTransfersHeight = 4)
property("negative transfer amount") {
for {
bigComplexityDApp1 <- Seq(false, true)
bigComplexityDApp2 <- Seq(false, true)
} {
val invoker = TxHelpers.signer(0)
val dApp1 = TxHelpers.signer(1)
val dApp2 = TxHelpers.signer(2)
val balances = AddrWithBalance.enoughBalances(invoker, dApp1, dApp2)
val issue = TxHelpers.issue(dApp2, 100)
val asset = IssuedAsset(issue.id.value())
val setScript1 = TxHelpers.setScript(dApp1, dApp1Script(dApp2.toAddress, bigComplexityDApp1))
val setScript2 = TxHelpers.setScript(dApp2, dApp2Script(asset, bigComplexityDApp2))
val preparingTxs = Seq(issue, setScript1, setScript2)
val invoke1 = TxHelpers.invoke(dApp1.toAddress, func = None, invoker = invoker)
val invoke2 = TxHelpers.invoke(dApp1.toAddress, func = None, invoker = invoker)
withDomain(domainSettingsWithFS(settings), balances) { d =>
d.appendBlock(preparingTxs: _*)
if (bigComplexityDApp1 || bigComplexityDApp2) {
d.appendBlock(invoke1)
d.liquidDiff.errorMessage(invoke1.txId).get.text should include("Negative amount")
} else {
d.appendBlockE(invoke1) should produce("Negative amount")
d.appendBlock()
}
d.appendBlock()
d.appendBlockE(invoke2) should produce("Negative transfer amount = -1")
}
}
}
}
|
wavesplatform/Waves
|
node/src/test/scala/com/wavesplatform/state/diffs/ci/sync/SyncDAppNegativeTransferTest.scala
|
Scala
|
mit
| 3,090
|
package scruffy.examples
import com.sksamuel.scruffy.{ScruffyConfiguration, Scruffy}
/** @author Stephen Samuel */
object Main extends App {
val port = 8080
val scruffy = new Scruffy(ScruffyConfiguration.port(port).compression(false).requestLogging(false))
scruffy.mount(new Test1Endpoint)
scruffy.mount(new Test2Endpoint)
scruffy.mount(new Test6Endpoint)
println("Starting Scruffy...")
val lifecycle = scruffy.start()
println(s"Started on port [$port]. Interrupt to exit.")
lifecycle.await()
}
|
kellabyte/FrameworkBenchmarks
|
frameworks/Scala/scruffy/src/main/scala/scruffy/examples/Main.scala
|
Scala
|
bsd-3-clause
| 516
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.classification
import org.apache.spark.SparkFunSuite
import org.apache.spark.ml.feature.LabeledPoint
import org.apache.spark.ml.linalg.{Vector, Vectors}
import org.apache.spark.ml.param.ParamsSuite
import org.apache.spark.ml.tree.{CategoricalSplit, InternalNode, LeafNode}
import org.apache.spark.ml.tree.impl.TreeTests
import org.apache.spark.ml.util.{DefaultReadWriteTest, MLTestingUtils}
import org.apache.spark.mllib.regression.{LabeledPoint => OldLabeledPoint}
import org.apache.spark.mllib.tree.{DecisionTree => OldDecisionTree, DecisionTreeSuite => OldDecisionTreeSuite}
import org.apache.spark.mllib.util.MLlibTestSparkContext
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Row}
class DecisionTreeClassifierSuite
extends SparkFunSuite with MLlibTestSparkContext with DefaultReadWriteTest {
import DecisionTreeClassifierSuite.compareAPIs
import testImplicits._
private var categoricalDataPointsRDD: RDD[LabeledPoint] = _
private var orderedLabeledPointsWithLabel0RDD: RDD[LabeledPoint] = _
private var orderedLabeledPointsWithLabel1RDD: RDD[LabeledPoint] = _
private var categoricalDataPointsForMulticlassRDD: RDD[LabeledPoint] = _
private var continuousDataPointsForMulticlassRDD: RDD[LabeledPoint] = _
private var categoricalDataPointsForMulticlassForOrderedFeaturesRDD: RDD[LabeledPoint] = _
override def beforeAll() {
super.beforeAll()
categoricalDataPointsRDD =
sc.parallelize(OldDecisionTreeSuite.generateCategoricalDataPoints()).map(_.asML)
orderedLabeledPointsWithLabel0RDD =
sc.parallelize(OldDecisionTreeSuite.generateOrderedLabeledPointsWithLabel0()).map(_.asML)
orderedLabeledPointsWithLabel1RDD =
sc.parallelize(OldDecisionTreeSuite.generateOrderedLabeledPointsWithLabel1()).map(_.asML)
categoricalDataPointsForMulticlassRDD =
sc.parallelize(OldDecisionTreeSuite.generateCategoricalDataPointsForMulticlass()).map(_.asML)
continuousDataPointsForMulticlassRDD =
sc.parallelize(OldDecisionTreeSuite.generateContinuousDataPointsForMulticlass()).map(_.asML)
categoricalDataPointsForMulticlassForOrderedFeaturesRDD = sc.parallelize(
OldDecisionTreeSuite.generateCategoricalDataPointsForMulticlassForOrderedFeatures())
.map(_.asML)
}
test("params") {
ParamsSuite.checkParams(new DecisionTreeClassifier)
val model = new DecisionTreeClassificationModel("dtc", new LeafNode(0.0, 0.0, null), 1, 2)
ParamsSuite.checkParams(model)
}
/////////////////////////////////////////////////////////////////////////////
// Tests calling train()
/////////////////////////////////////////////////////////////////////////////
test("Binary classification stump with ordered categorical features") {
val dt = new DecisionTreeClassifier()
.setImpurity("gini")
.setMaxDepth(2)
.setMaxBins(100)
.setSeed(1)
val categoricalFeatures = Map(0 -> 3, 1 -> 3)
val numClasses = 2
compareAPIs(categoricalDataPointsRDD, dt, categoricalFeatures, numClasses)
}
test("Binary classification stump with fixed labels 0,1 for Entropy,Gini") {
val dt = new DecisionTreeClassifier()
.setMaxDepth(3)
.setMaxBins(100)
val numClasses = 2
Array(orderedLabeledPointsWithLabel0RDD, orderedLabeledPointsWithLabel1RDD).foreach { rdd =>
DecisionTreeClassifier.supportedImpurities.foreach { impurity =>
dt.setImpurity(impurity)
compareAPIs(rdd, dt, categoricalFeatures = Map.empty[Int, Int], numClasses)
}
}
}
test("Multiclass classification stump with 3-ary (unordered) categorical features") {
val rdd = categoricalDataPointsForMulticlassRDD
val dt = new DecisionTreeClassifier()
.setImpurity("Gini")
.setMaxDepth(4)
val numClasses = 3
val categoricalFeatures = Map(0 -> 3, 1 -> 3)
compareAPIs(rdd, dt, categoricalFeatures, numClasses)
}
test("Binary classification stump with 1 continuous feature, to check off-by-1 error") {
val arr = Array(
LabeledPoint(0.0, Vectors.dense(0.0)),
LabeledPoint(1.0, Vectors.dense(1.0)),
LabeledPoint(1.0, Vectors.dense(2.0)),
LabeledPoint(1.0, Vectors.dense(3.0)))
val rdd = sc.parallelize(arr)
val dt = new DecisionTreeClassifier()
.setImpurity("Gini")
.setMaxDepth(4)
val numClasses = 2
compareAPIs(rdd, dt, categoricalFeatures = Map.empty[Int, Int], numClasses)
}
test("Binary classification stump with 2 continuous features") {
val arr = Array(
LabeledPoint(0.0, Vectors.sparse(2, Seq((0, 0.0)))),
LabeledPoint(1.0, Vectors.sparse(2, Seq((1, 1.0)))),
LabeledPoint(0.0, Vectors.sparse(2, Seq((0, 0.0)))),
LabeledPoint(1.0, Vectors.sparse(2, Seq((1, 2.0)))))
val rdd = sc.parallelize(arr)
val dt = new DecisionTreeClassifier()
.setImpurity("Gini")
.setMaxDepth(4)
val numClasses = 2
compareAPIs(rdd, dt, categoricalFeatures = Map.empty[Int, Int], numClasses)
}
test("Multiclass classification stump with unordered categorical features," +
" with just enough bins") {
val maxBins = 2 * (math.pow(2, 3 - 1).toInt - 1) // just enough bins to allow unordered features
val rdd = categoricalDataPointsForMulticlassRDD
val dt = new DecisionTreeClassifier()
.setImpurity("Gini")
.setMaxDepth(4)
.setMaxBins(maxBins)
val categoricalFeatures = Map(0 -> 3, 1 -> 3)
val numClasses = 3
compareAPIs(rdd, dt, categoricalFeatures, numClasses)
}
test("Multiclass classification stump with continuous features") {
val rdd = continuousDataPointsForMulticlassRDD
val dt = new DecisionTreeClassifier()
.setImpurity("Gini")
.setMaxDepth(4)
.setMaxBins(100)
val numClasses = 3
compareAPIs(rdd, dt, categoricalFeatures = Map.empty[Int, Int], numClasses)
}
test("Multiclass classification stump with continuous + unordered categorical features") {
val rdd = continuousDataPointsForMulticlassRDD
val dt = new DecisionTreeClassifier()
.setImpurity("Gini")
.setMaxDepth(4)
.setMaxBins(100)
val categoricalFeatures = Map(0 -> 3)
val numClasses = 3
compareAPIs(rdd, dt, categoricalFeatures, numClasses)
}
test("Multiclass classification stump with 10-ary (ordered) categorical features") {
val rdd = categoricalDataPointsForMulticlassForOrderedFeaturesRDD
val dt = new DecisionTreeClassifier()
.setImpurity("Gini")
.setMaxDepth(4)
.setMaxBins(100)
val categoricalFeatures = Map(0 -> 10, 1 -> 10)
val numClasses = 3
compareAPIs(rdd, dt, categoricalFeatures, numClasses)
}
test("Multiclass classification tree with 10-ary (ordered) categorical features," +
" with just enough bins") {
val rdd = categoricalDataPointsForMulticlassForOrderedFeaturesRDD
val dt = new DecisionTreeClassifier()
.setImpurity("Gini")
.setMaxDepth(4)
.setMaxBins(10)
val categoricalFeatures = Map(0 -> 10, 1 -> 10)
val numClasses = 3
compareAPIs(rdd, dt, categoricalFeatures, numClasses)
}
test("split must satisfy min instances per node requirements") {
val arr = Array(
LabeledPoint(0.0, Vectors.sparse(2, Seq((0, 0.0)))),
LabeledPoint(1.0, Vectors.sparse(2, Seq((1, 1.0)))),
LabeledPoint(0.0, Vectors.sparse(2, Seq((0, 1.0)))))
val rdd = sc.parallelize(arr)
val dt = new DecisionTreeClassifier()
.setImpurity("Gini")
.setMaxDepth(2)
.setMinInstancesPerNode(2)
val numClasses = 2
compareAPIs(rdd, dt, categoricalFeatures = Map.empty[Int, Int], numClasses)
}
test("do not choose split that does not satisfy min instance per node requirements") {
// if a split does not satisfy min instances per node requirements,
// this split is invalid, even though the information gain of split is large.
val arr = Array(
LabeledPoint(0.0, Vectors.dense(0.0, 1.0)),
LabeledPoint(1.0, Vectors.dense(1.0, 1.0)),
LabeledPoint(0.0, Vectors.dense(0.0, 0.0)),
LabeledPoint(0.0, Vectors.dense(0.0, 0.0)))
val rdd = sc.parallelize(arr)
val dt = new DecisionTreeClassifier()
.setImpurity("Gini")
.setMaxBins(2)
.setMaxDepth(2)
.setMinInstancesPerNode(2)
val categoricalFeatures = Map(0 -> 2, 1 -> 2)
val numClasses = 2
compareAPIs(rdd, dt, categoricalFeatures, numClasses)
}
test("split must satisfy min info gain requirements") {
val arr = Array(
LabeledPoint(0.0, Vectors.sparse(2, Seq((0, 0.0)))),
LabeledPoint(1.0, Vectors.sparse(2, Seq((1, 1.0)))),
LabeledPoint(0.0, Vectors.sparse(2, Seq((0, 1.0)))))
val rdd = sc.parallelize(arr)
val dt = new DecisionTreeClassifier()
.setImpurity("Gini")
.setMaxDepth(2)
.setMinInfoGain(1.0)
val numClasses = 2
compareAPIs(rdd, dt, categoricalFeatures = Map.empty[Int, Int], numClasses)
}
test("predictRaw and predictProbability") {
val rdd = continuousDataPointsForMulticlassRDD
val dt = new DecisionTreeClassifier()
.setImpurity("Gini")
.setMaxDepth(4)
.setMaxBins(100)
val categoricalFeatures = Map(0 -> 3)
val numClasses = 3
val newData: DataFrame = TreeTests.setMetadata(rdd, categoricalFeatures, numClasses)
val newTree = dt.fit(newData)
MLTestingUtils.checkCopyAndUids(dt, newTree)
val predictions = newTree.transform(newData)
.select(newTree.getPredictionCol, newTree.getRawPredictionCol, newTree.getProbabilityCol)
.collect()
predictions.foreach { case Row(pred: Double, rawPred: Vector, probPred: Vector) =>
assert(pred === rawPred.argmax,
s"Expected prediction $pred but calculated ${rawPred.argmax} from rawPrediction.")
val sum = rawPred.toArray.sum
assert(Vectors.dense(rawPred.toArray.map(_ / sum)) === probPred,
"probability prediction mismatch")
}
}
test("training with 1-category categorical feature") {
val data = sc.parallelize(Seq(
LabeledPoint(0, Vectors.dense(0, 2, 3)),
LabeledPoint(1, Vectors.dense(0, 3, 1)),
LabeledPoint(0, Vectors.dense(0, 2, 2)),
LabeledPoint(1, Vectors.dense(0, 3, 9)),
LabeledPoint(0, Vectors.dense(0, 2, 6))
))
val df = TreeTests.setMetadata(data, Map(0 -> 1), 2)
val dt = new DecisionTreeClassifier().setMaxDepth(3)
dt.fit(df)
}
test("Use soft prediction for binary classification with ordered categorical features") {
// The following dataset is set up such that the best split is {1} vs. {0, 2}.
// If the hard prediction is used to order the categories, then {0} vs. {1, 2} is chosen.
val arr = Array(
LabeledPoint(0.0, Vectors.dense(0.0)),
LabeledPoint(0.0, Vectors.dense(0.0)),
LabeledPoint(0.0, Vectors.dense(0.0)),
LabeledPoint(1.0, Vectors.dense(0.0)),
LabeledPoint(0.0, Vectors.dense(1.0)),
LabeledPoint(0.0, Vectors.dense(1.0)),
LabeledPoint(0.0, Vectors.dense(1.0)),
LabeledPoint(0.0, Vectors.dense(1.0)),
LabeledPoint(0.0, Vectors.dense(2.0)),
LabeledPoint(0.0, Vectors.dense(2.0)),
LabeledPoint(0.0, Vectors.dense(2.0)),
LabeledPoint(1.0, Vectors.dense(2.0)))
val data = sc.parallelize(arr)
val df = TreeTests.setMetadata(data, Map(0 -> 3), 2)
// Must set maxBins s.t. the feature will be treated as an ordered categorical feature.
val dt = new DecisionTreeClassifier()
.setImpurity("gini")
.setMaxDepth(1)
.setMaxBins(3)
val model = dt.fit(df)
model.rootNode match {
case n: InternalNode =>
n.split match {
case s: CategoricalSplit =>
assert(s.leftCategories === Array(1.0))
case other =>
fail(s"All splits should be categorical, but got ${other.getClass.getName}: $other.")
}
case other =>
fail(s"Root node should be an internal node, but got ${other.getClass.getName}: $other.")
}
}
test("Feature importance with toy data") {
val dt = new DecisionTreeClassifier()
.setImpurity("gini")
.setMaxDepth(3)
.setSeed(123)
// In this data, feature 1 is very important.
val data: RDD[LabeledPoint] = TreeTests.featureImportanceData(sc)
val numFeatures = data.first().features.size
val categoricalFeatures = (0 to numFeatures).map(i => (i, 2)).toMap
val df = TreeTests.setMetadata(data, categoricalFeatures, 2)
val model = dt.fit(df)
val importances = model.featureImportances
val mostImportantFeature = importances.argmax
assert(mostImportantFeature === 1)
assert(importances.toArray.sum === 1.0)
assert(importances.toArray.forall(_ >= 0.0))
}
test("should support all NumericType labels and not support other types") {
val dt = new DecisionTreeClassifier().setMaxDepth(1)
MLTestingUtils.checkNumericTypes[DecisionTreeClassificationModel, DecisionTreeClassifier](
dt, spark) { (expected, actual) =>
TreeTests.checkEqual(expected, actual)
}
}
test("Fitting without numClasses in metadata") {
val df: DataFrame = TreeTests.featureImportanceData(sc).toDF()
val dt = new DecisionTreeClassifier().setMaxDepth(1)
dt.fit(df)
}
/////////////////////////////////////////////////////////////////////////////
// Tests of model save/load
/////////////////////////////////////////////////////////////////////////////
test("read/write") {
def checkModelData(
model: DecisionTreeClassificationModel,
model2: DecisionTreeClassificationModel): Unit = {
TreeTests.checkEqual(model, model2)
assert(model.numFeatures === model2.numFeatures)
assert(model.numClasses === model2.numClasses)
}
val dt = new DecisionTreeClassifier()
val rdd = TreeTests.getTreeReadWriteData(sc)
val allParamSettings = TreeTests.allParamSettings ++ Map("impurity" -> "entropy")
// Categorical splits with tree depth 2
val categoricalData: DataFrame =
TreeTests.setMetadata(rdd, Map(0 -> 2, 1 -> 3), numClasses = 2)
testEstimatorAndModelReadWrite(dt, categoricalData, allParamSettings,
allParamSettings, checkModelData)
// Continuous splits with tree depth 2
val continuousData: DataFrame =
TreeTests.setMetadata(rdd, Map.empty[Int, Int], numClasses = 2)
testEstimatorAndModelReadWrite(dt, continuousData, allParamSettings,
allParamSettings, checkModelData)
// Continuous splits with tree depth 0
testEstimatorAndModelReadWrite(dt, continuousData, allParamSettings ++ Map("maxDepth" -> 0),
allParamSettings ++ Map("maxDepth" -> 0), checkModelData)
}
test("SPARK-20043: " +
"ImpurityCalculator builder fails for uppercase impurity type Gini in model read/write") {
val rdd = TreeTests.getTreeReadWriteData(sc)
val data: DataFrame =
TreeTests.setMetadata(rdd, Map.empty[Int, Int], numClasses = 2)
val dt = new DecisionTreeClassifier()
.setImpurity("Gini")
.setMaxDepth(2)
val model = dt.fit(data)
testDefaultReadWrite(model)
}
}
private[ml] object DecisionTreeClassifierSuite extends SparkFunSuite {
/**
* Train 2 decision trees on the given dataset, one using the old API and one using the new API.
* Convert the old tree to the new format, compare them, and fail if they are not exactly equal.
*/
def compareAPIs(
data: RDD[LabeledPoint],
dt: DecisionTreeClassifier,
categoricalFeatures: Map[Int, Int],
numClasses: Int): Unit = {
val numFeatures = data.first().features.size
val oldStrategy = dt.getOldStrategy(categoricalFeatures, numClasses)
val oldTree = OldDecisionTree.train(data.map(OldLabeledPoint.fromML), oldStrategy)
val newData: DataFrame = TreeTests.setMetadata(data, categoricalFeatures, numClasses)
val newTree = dt.fit(newData)
// Use parent from newTree since this is not checked anyways.
val oldTreeAsNew = DecisionTreeClassificationModel.fromOld(
oldTree, newTree.parent.asInstanceOf[DecisionTreeClassifier], categoricalFeatures)
TreeTests.checkEqual(oldTreeAsNew, newTree)
assert(newTree.numFeatures === numFeatures)
}
}
|
aokolnychyi/spark
|
mllib/src/test/scala/org/apache/spark/ml/classification/DecisionTreeClassifierSuite.scala
|
Scala
|
apache-2.0
| 16,964
|
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package wvlet.airframe
import wvlet.airframe.surface.reflect.ReflectSurfaceFactory
import scala.collection.mutable
/**
*/
package object surface {
def getCached(fullName: String): Surface = ReflectSurfaceFactory.get(fullName)
def newCacheMap[A, B]: mutable.Map[A, B] = new mutable.WeakHashMap[A, B]()
}
|
wvlet/airframe
|
airframe-surface/.jvm/src/main/scala-2/wvlet/airframe/surface/package.scala
|
Scala
|
apache-2.0
| 873
|
package deaktator.pops.msgs
import java.io.{IOException, InputStream}
import com.google.protobuf.Descriptors.Descriptor
import com.google.protobuf.{ByteString, CodedInputStream, ExtensionRegistryLite, GeneratedMessage, InvalidProtocolBufferException}
/**
* A Runtime-based version of [[ProtoOps]]. This really should be used unless necessary. For instance,
* if calling `Class.forName` isn't necessary, this should really be either. In which case, the preferable
* way to get a [[ProtoOps]] instance is implicitly via the implicit factory method in the [[ProtoOps]]
* companion object.
*/
private[pops] final case class RuntimeProtoOps[A <: GeneratedMessage](messageClass: Class[A]) extends ProtoOps[A] {
def getDefaultInstance(): A =
messageClass.getMethod("getDefaultInstance").invoke(null).asInstanceOf[A]
def getDescriptor(): Descriptor =
messageClass.getMethod("getDescriptor").invoke(null).asInstanceOf[Descriptor]
@throws(classOf[InvalidProtocolBufferException])
def parseFrom(data: ByteString): A =
messageClass.getMethod("parseFrom", classOf[ByteString]).invoke(null, data).asInstanceOf[A]
@throws(classOf[InvalidProtocolBufferException])
def parseFrom(data: ByteString, extensionRegistry: ExtensionRegistryLite): A =
messageClass.getMethod("parseFrom", classOf[ByteString], classOf[ExtensionRegistryLite]).
invoke(null, data, extensionRegistry).asInstanceOf[A]
@throws(classOf[InvalidProtocolBufferException])
def parseFrom(data: Array[Byte]): A =
messageClass.getMethod("parseFrom", classOf[Array[Byte]]).invoke(null, data).asInstanceOf[A]
@throws(classOf[InvalidProtocolBufferException])
def parseFrom(data: Array[Byte], extensionRegistry: ExtensionRegistryLite): A =
messageClass.getMethod("parseFrom", classOf[Array[Byte]], classOf[ExtensionRegistryLite]).
invoke(null, data, extensionRegistry).asInstanceOf[A]
@throws(classOf[IOException])
def parseFrom(input: InputStream): A =
messageClass.getMethod("parseFrom", classOf[InputStream]).invoke(null, input).asInstanceOf[A]
@throws(classOf[IOException])
def parseFrom(input: InputStream, extensionRegistry: ExtensionRegistryLite): A =
messageClass.getMethod("parseFrom", classOf[InputStream], classOf[ExtensionRegistryLite]).
invoke(null, input, extensionRegistry).asInstanceOf[A]
@throws(classOf[IOException])
def parseDelimitedFrom(input: InputStream): A =
messageClass.getMethod("parseDelimitedFrom", classOf[InputStream]).invoke(null, input).asInstanceOf[A]
@throws(classOf[IOException])
def parseDelimitedFrom(input: InputStream, extensionRegistry: ExtensionRegistryLite): A =
messageClass.getMethod("parseDelimitedFrom", classOf[InputStream], classOf[ExtensionRegistryLite]).
invoke(null, input, extensionRegistry).asInstanceOf[A]
@throws(classOf[IOException])
def parseFrom(input: CodedInputStream): A =
messageClass.getMethod("parseFrom", classOf[CodedInputStream]).invoke(null, input).asInstanceOf[A]
@throws(classOf[IOException])
def parseFrom(input: CodedInputStream, extensionRegistry: ExtensionRegistryLite): A =
messageClass.getMethod("parseFrom", classOf[CodedInputStream], classOf[ExtensionRegistryLite]).
invoke(null, input, extensionRegistry).asInstanceOf[A]
}
|
deaktator/pops
|
pops-2.4.1/src/main/scala/deaktator/pops/msgs/RuntimeProtoOps.scala
|
Scala
|
mit
| 3,347
|
object Test {
def foo(): Unit = {
try {
for (i <- 1 until 5) return
} catch {
case _: NullPointerException | _: RuntimeException =>
// was: "catch block may intercept non-local return from method check"
}
}
}
|
yusuke2255/dotty
|
tests/untried/pos/t7433.scala
|
Scala
|
bsd-3-clause
| 245
|
import akka.actor.ActorRef
/**
* Created by ashu on 3/26/2017.
*/
object Events {
trait ChatEvent
case class User(name: String) extends ChatEvent
case class UserWithActor(name: String, actor: ActorRef) extends ChatEvent
case class UserJoined(user: UserWithActor) extends ChatEvent
case class UserLeft(user: User) extends ChatEvent
case class SystemMessage(msg: String) extends ChatEvent
case class IncomingMessage(user: User, msg: String) extends ChatEvent
}
|
ashsingh21/ChatServer
|
src/main/scala/Events.scala
|
Scala
|
mit
| 485
|
// This file contains an interpreter for SCFWAE with recursive first-class functions, conditionals, mutable boxes, variables and sequencing.
/*
* Based on the lecture notes for the "Programming Languages and Types"
* course by Klaus Ostermann at the University of Marburg.
*/
package V5
/* To be able to experiment with different store and gc designs, we
* create an interface for stores. The stack parameter in malloc is
* needed during gc to determine the root nodes from which the
* algorithms can start.
*/
trait Store[Val] {
def malloc(stack: List[Map[Symbol, Int]], v: Val) : (Int, Store[Val])
def update(index: Int, v: Val) : Store[Val]
def lookup(index: Int) : Val
def free(index: Int) : Store[Val]
}
/* Here is one implementation of the Store interface that does not
* perform gc. It just runs out of memory once the store is full.
*/
class NoGCStore[Val](var maxSize: Int) extends Store[Val] {
val memory = new scala.collection.mutable.ArraySeq[Val](maxSize)
var freed = Set[Int]()
var nextFreeAddr : Int = 0
def malloc(stack: List[Map[Symbol, Int]], v: Val) = {
if (!freed.isEmpty) {
val next = freed.head
freed -= next
update(next, v)
(next, this)
}
else {
val x = nextFreeAddr
if (x > maxSize) sys.error("out of memory")
nextFreeAddr += 1
update(x, v)
(x, this)
}
}
def update(index: Int, v: Val) = {
memory.update(index, v)
this
}
def free(index: Int) = {
freed += index
this
}
def lookup(index: Int) = memory(index)
override def toString() = memory.toString
}
object SRCFWAEInterp extends App {
sealed abstract class Expr
case class Num(n: Int) extends Expr
case class Add(lhs: Expr, rhs: Expr) extends Expr
case class Mult(lhs: Expr, rhs: Expr) extends Expr
case class With(name: Symbol, namedExpr: Expr, body: Expr) extends Expr
case class Id(name: Symbol) extends Expr
case class If0(test: Expr, posBody: Expr, negBody: Expr) extends Expr
case class Fun(param: Symbol, body: Expr) extends Expr
case class Rec(name: Symbol, namedExpr: Expr, body: Expr) extends Expr
case class App(funExpr: Expr, argExpr: Expr) extends Expr
case class Seqn(e1: Expr, e2: Expr) extends Expr
case class SetId(id: Symbol, valueExpr: Expr) extends Expr
case class NewBox(valExpr: Expr) extends Expr
case class SetBox(boxExpr: Expr, valueExpr: Expr) extends Expr
case class OpenBox(boxExpr: Expr) extends Expr
type Location = Int
type Env = Map[Symbol, Location]
sealed abstract class Val
case class NumV(n: Int) extends Val
case class Closure(param: Symbol, body: Expr, env: Env) extends Val
case class Box(location: Location) extends Val
/* In our interpreter, the stack of environments is only implicitly
* available on the stack of the meta-language. To reify the call-
* stack we need to make it explicit. We do so by constructing the
* stack explicitly and passing it as parameter. The first element
* of the stack is the current environment; the rest is only needed
* for gc.
*/
def interp(
expr: Expr,
stack: List[Env] = List(Map()),
store: Store[Val] = new NoGCStore[Val](100)): (Val, Store[Val]) = expr match {
case Num(n) => (NumV(n), store)
case Add(lhs, rhs) => {
val (lhsv, s1) = interp(lhs, stack, store)
(lhsv, s1) match {
case (NumV(n1), _) => {
val (rhsv, s2) = interp(rhs, stack, s1)
(rhsv, s2) match {
case (NumV(n2), _) => (NumV(n1 + n2), s2)
case _ => sys.error(
"can only add numbers, but got: %s and %s".format(lhsv, rhsv))
}
}
case _ => sys.error(
"can only add numbers, but got: '%s' as left hand side".format(lhsv))
}
}
case Mult(lhs, rhs) => {
val (lhsv, s1) = interp(lhs, stack, store)
(lhsv, s1) match {
case (NumV(n1), _) => {
val (rhsv, s2) = interp(rhs, stack, s1)
(rhsv, s2) match {
case (NumV(n2), _) => (NumV(n1 * n2), s2)
case _ => sys.error(
"can only add numbers, but got: %s and %s".format(lhsv, rhsv))
}
}
case _ => sys.error(
"can only add numbers, but got: '%s' as left hand side".format(lhsv))
}
}
case With(boundId, namedExpr, boundBody) => {
val (namedVal, s1) = interp(namedExpr, stack, store)
val (newLoc, s2) = s1.malloc(stack, namedVal)
interp(boundBody, stack.head + (boundId -> newLoc) :: stack.tail, s2)
}
case Id(name) => (store.lookup(stack.head(name)), store)
case Fun(arg, body) => (Closure(arg, body, stack.head), store)
case If0(testExpr, thenExpr, elseExpr) => {
val (testV, s1) = interp(testExpr, stack, store)
testV match {
case NumV(n) => {
if (n == 0) interp(thenExpr, stack, s1)
else interp(elseExpr, stack, s1)
}
case _ => sys.error("can only test numbers, but got: " + testV)
}
}
/**
* In our stateful language, we do not require mutation from the
* host language to implement cyclic environments.
*/
case Rec(boundId, namedExpr, boundBody) => {
val (newLoc,s2) = store.malloc(stack, NumV(0))
val extStack = stack.head + (boundId -> newLoc) :: stack
val (namedVal, bodyStore) = interp(namedExpr, extStack, store)
interp(boundBody, extStack, bodyStore.update(newLoc, namedVal))
}
case App(funExpr, argExpr) => {
val (funV, funStore) = interp(funExpr, stack, store)
val (argV, argStore) = interp(argExpr, stack, funStore)
funV match {
case Closure(fParam, fBody, fEnv) => {
val (newLoc, resStore) = argStore.malloc(stack, argV)
interp(fBody, fEnv + (fParam -> newLoc) :: stack, resStore)
}
case _ => sys.error("can only apply functions, but got: " + funV)
}
}
case Seqn(e1, e2) => {
val (v1, s1) = interp(e1, stack, store)
interp(e2, stack, s1)
}
case NewBox(boxExpr) => {
val (boxV, boxStore) = interp(boxExpr, stack, store)
val (newLoc, resStore) = boxStore.malloc(stack, boxV)
(Box(newLoc), resStore)
}
case SetBox(boxExpr, valueExpr) => {
val (boxV, s1) = interp(boxExpr, stack, store)
val (value, s2) = interp(valueExpr, stack, s1)
boxV match {
case Box(loc) => (value, s2.update(loc, value))
case _ => sys.error("can only set to boxes, but got: " + boxV)
}
}
case OpenBox(boxExpr) => {
val (boxV, s1) = interp(boxExpr, stack, store)
boxV match {
case Box(loc) => (s1.lookup(loc), s1)
case _ => sys.error("can only open boxes, but got: " + boxV)
}
}
case SetId(id, valExpr) => {
val (value, s1) = interp(valExpr, stack, store)
(value, s1.update(stack.head(id), value))
}
}
// Some assertions on the interpreter
import scala.language.implicitConversions
implicit def idToSCFWAE(id: Symbol) = Id(id)
implicit def numToSCFWAE(n: Int) = Num(n)
val (tv1, _) = interp(With('a, NewBox(1), OpenBox('a)))
assert(tv1 == NumV(1))
val (tv2, _) = interp(
With('a, NewBox(1),
With('f, Fun('x, Add('x, OpenBox('a))),
Seqn(SetBox('a, 2), App('f, 5)))))
assert(tv2 == NumV(7))
val (tv3, _) = interp(
With('switch, NewBox(0),
With('toggle,
Fun('dummy,
If0(OpenBox('switch),
Seqn(SetBox('switch, 1), 1),
Seqn(SetBox('switch, 0), 0))),
Add(App('toggle, 42), App('toggle, 42)))))
assert(tv3 == NumV(1))
val (tv4, _) = interp(
With('switch, 0,
With('toggle,
Fun('dummy,
If0('switch,
Seqn(SetId('switch, 1), 1),
Seqn(SetId('switch, 0), 0))),
Add(App('toggle, 42), App('toggle, 42)))))
assert(tv4 == NumV(1))
val (tv5, ts5) = interp(
App(Fun('b1, App(Fun('b2, Seqn(SetBox('b1, 6), OpenBox('b2))), NewBox(7))),
NewBox(5)))
assert(tv5 == NumV(7))
assert(ts5.lookup(0) == NumV(6))
val (tv6, _) = interp(
With('b, 0,
If0(Seqn(SetId('b, 5), 'b),
1,
'b)))
assert(tv6 == NumV(5))
val (tv7, _) = interp(With('b, 4, Add('b, Seqn(SetId('b, 5), 'b))))
assert(tv7 == NumV(9))
assert(interp(
Rec('fact, Fun('n, If0('n, 1, Mult('n, App('fact, Add('n, -1))))),
App('fact, 5)))._1 == NumV(120))
println(interp(With('x, 3, Fun('y, Add('x, 'y)))))
assert(interp(With('x, 3, Fun('y, Add('x, 'y)))) == (Closure('y, Add('x, 'y), Map('x -> 0)), Map(0 -> NumV(3))))
assert(interp(
With('inc, Fun('x, Add('x, 1)),
Add(App('inc, 4), App('inc, 5))))._1 == NumV(11))
assert(interp(
With('inc, Fun('x, Add('x, 1)), 'inc))._1 == Closure('x, Add('x, 1), Map()))
assert(interp(With('x, 3, App(Fun('y, Add('x, 'y)), 4)))._1 == NumV(7))
def whatDoesThisDo(n: Int) : Expr = {
var v: Expr = Num(17)
for (i <- 1 to n)
v = Seqn(NewBox(i), v)
v
}
/*override def main(args: Array[String]) {
val iterations = args(0).toInt
val storeSize = args(1).toInt
val store = new NoGCStore[Val](storeSize)
interp(whatDoesThisDo(iterations), List(Map()), store)
println(s"all ok, final store size is ${store.nextFreeAddr - store.freed.size}")
}*/
}
|
Tooa/interpreters
|
src/V5/SRCFWAEInterp.scala
|
Scala
|
apache-2.0
| 9,411
|
/*
* Copyright (c) 2014 Contributor. All rights reserved.
*/
package annotations.varCaseConstructor
@scala.annotation.meta.field
case class foo() extends scala.annotation.StaticAnnotation
case class Entity(@foo var bar: Int)
object Annotations {
val entity = new Entity(1)
println(entity.bar)
}
|
Kwestor/scala-ide
|
org.scala-ide.sdt.core.tests/test-workspace/custom-highlighting/src/custom/AnnotationsVarCaseConstructor.scala
|
Scala
|
bsd-3-clause
| 304
|
import reactivemongo.api.commands.{ CommandError, DefaultWriteResult }
class WriteResultSpec extends org.specs2.mutable.Specification {
"Write result" title
section("unit")
"WriteResult" should {
val error = DefaultWriteResult(
ok = false,
n = 1,
writeErrors = Nil,
writeConcernError = None,
code = Some(23),
errmsg = Some("Foo"))
"be matched as a CommandError when failed" in {
error must beLike {
case CommandError.Code(code) => code must_== 23
} and (error must beLike {
case CommandError.Message(msg) => msg must_== "Foo"
})
}
"not be matched as a CommandError when successful" in {
(error.copy(ok = true) match {
case CommandError.Code(_) | CommandError.Message(_) => true
case _ => false
}) must beFalse
}
}
section("unit")
}
|
ornicar/ReactiveMongo
|
driver/src/test/scala/WriteResultSpec.scala
|
Scala
|
apache-2.0
| 864
|
package map
import collection._
import scala.collection.mutable.{Builder, MapBuilder}
import scala.collection.generic.CanBuildFrom
class PrefixMap[T]
extends mutable.Map[String, T] with mutable.MapLike[String, T, PrefixMap[T]]
{
var suffixes: immutable.Map[Char, PrefixMap[T]] = Map.empty
var value: Option[T] = None
def get(s: String): Option[T] =
if (s.isEmpty) value
else suffixes get s(0) flatMap (_.get(s substring 1))
def withPrefix(s: String): PrefixMap[T] =
if (s.isEmpty) this
else {
val leading = s(0)
suffixes get leading match {
case None => suffixes = suffixes + (leading -> empty)
case _ =>
}
suffixes(leading) withPrefix (s substring 1)
}
override def update(s: String, elem: T) = withPrefix(s).value = Some(elem)
override def remove(s: String): Option[T] =
if (s.isEmpty) {
val prev = value
value = None
prev
}
else suffixes get s(0) flatMap (_.remove(s substring 1))
def iterator: Iterator[(String, T)] =
(for (v <- value.iterator) yield ("", v)) ++
(for ((chr, m) <- suffixes.iterator;
(s, v) <- m.iterator) yield (chr +: s, v))
def += (kv: (String, T)): this.type = {update(kv._1, kv._2); this}
def -= (s: String): this.type = {remove(s); this}
override def empty = new PrefixMap[T]
}
object PrefixMap extends {
def empty[T] = new PrefixMap[T]
def apply[T](kvs: (String, T)*): PrefixMap[T] = {
val m: PrefixMap[T] = empty
for (kv <- kvs) m += kv
m
}
def newBuilder[T]: Builder[(String, T), PrefixMap[T]] =
new mutable.MapBuilder[String, T, PrefixMap[T]](empty)
implicit def canBuildFrom[T]:
CanBuildFrom[PrefixMap[_], (String, T), PrefixMap[T]] =
new CanBuildFrom[PrefixMap[_], (String, T), PrefixMap[T]] {
def apply(from: PrefixMap[_]) = newBuilder[T]
def apply() = newBuilder[T]
}
}
|
mhotchen/programming-in-scala
|
src/map/PrefixMap.scala
|
Scala
|
apache-2.0
| 1,900
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.samza.util
import java.util
import org.apache.samza.SamzaException
import org.apache.samza.config._
import org.apache.samza.coordinator.metadatastore.NamespaceAwareCoordinatorStreamStore
import org.apache.samza.coordinator.stream.{CoordinatorStreamSystemConsumer, CoordinatorStreamSystemProducer, CoordinatorStreamValueSerde}
import org.apache.samza.coordinator.stream.messages.{Delete, SetConfig}
import org.apache.samza.job.JobRunner
import org.apache.samza.metadatastore.MetadataStore
import org.apache.samza.metrics.MetricsRegistryMap
import org.apache.samza.system.{StreamSpec, SystemAdmin, SystemAdmins, SystemFactory, SystemStream}
import org.apache.samza.util.ScalaJavaUtil.JavaOptionals
import scala.collection.JavaConverters._
object CoordinatorStreamUtil extends Logging {
/**
* Given a job's full config object, build a subset config which includes
* only the job name, job id, and system config for the coordinator stream.
*/
def buildCoordinatorStreamConfig(config: Config): MapConfig = {
val jobConfig = new JobConfig(config)
val buildConfigFactory = jobConfig.getCoordinatorStreamFactory
val coordinatorSystemConfig = Class.forName(buildConfigFactory).newInstance().asInstanceOf[CoordinatorStreamConfigFactory].buildCoordinatorStreamConfig(config)
new MapConfig(coordinatorSystemConfig)
}
/**
* Creates a coordinator stream.
* @param coordinatorSystemStream the {@see SystemStream} that describes the stream to create.
* @param coordinatorSystemAdmin the {@see SystemAdmin} used to create the stream.
*/
def createCoordinatorStream(coordinatorSystemStream: SystemStream, coordinatorSystemAdmin: SystemAdmin): Unit = {
// TODO: This logic should be part of the final coordinator stream metadata store abstraction. See SAMZA-2182
val streamName = coordinatorSystemStream.getStream
val coordinatorSpec = StreamSpec.createCoordinatorStreamSpec(streamName, coordinatorSystemStream.getSystem)
if (coordinatorSystemAdmin.createStream(coordinatorSpec)) {
info("Created coordinator stream: %s." format streamName)
} else {
info("Coordinator stream: %s already exists." format streamName)
}
}
/**
* Get the coordinator system stream from the configuration
* @param config Configuration to get coordinator system stream from.
* @return
*/
def getCoordinatorSystemStream(config: Config): SystemStream = {
val jobConfig = new JobConfig(config)
val systemName = jobConfig.getCoordinatorSystemName
val (jobName, jobId) = getJobNameAndId(jobConfig)
val streamName = getCoordinatorStreamName(jobName, jobId)
new SystemStream(systemName, streamName)
}
/**
* Get the coordinator system factory from the configuration
* @param config Configuration to get coordinator system factory from.
* @return
*/
def getCoordinatorSystemFactory(config: Config): SystemFactory = {
val systemName = new JobConfig(config).getCoordinatorSystemName
val systemConfig = new SystemConfig(config)
val systemFactoryClassName = JavaOptionals.toRichOptional(systemConfig.getSystemFactory(systemName)).toOption
.getOrElse(throw new SamzaException("Missing configuration: " + SystemConfig.SYSTEM_FACTORY_FORMAT format systemName))
ReflectionUtil.getObj(systemFactoryClassName, classOf[SystemFactory])
}
/**
* Generates a coordinator stream name based on the job name and job id
* for the job. The format of the stream name will be:
* __samza_coordinator_<JOBNAME>_<JOBID>.
*/
def getCoordinatorStreamName(jobName: String, jobId: String): String = {
"__samza_coordinator_%s_%s" format (jobName.replaceAll("_", "-"), jobId.replaceAll("_", "-"))
}
/**
* Get a job's name and ID given a config. Job ID is defaulted to 1 if not
* defined in the config, and job name must be defined in config.
*
* @return A tuple of (jobName, jobId)
*/
private def getJobNameAndId(jobConfig: JobConfig) = {
(JavaOptionals.toRichOptional(jobConfig.getName).toOption
.getOrElse(throw new ConfigException("Missing required config: job.name")),
jobConfig.getJobId)
}
/**
* Reads and returns launch config persisted in coordinator stream. Only job.auto sizing configs are currently supported.
* @param config full job config
* @param metadataStore an instance of the instantiated MetadataStore
* @return empty config if auto sizing is disabled, otherwise auto sizing related configs.
*/
def readLaunchConfigFromCoordinatorStream(config: Config, metadataStore: MetadataStore): Config = {
if (!config.getBoolean(JobConfig.JOB_AUTOSIZING_ENABLED, false)) {
new MapConfig()
} else {
val config = readConfigFromCoordinatorStream(metadataStore)
val launchConfig = config.asScala.filterKeys(key => JobConfig.isAutosizingConfig(key)).asJava
new MapConfig(launchConfig)
}
}
/**
* Reads and returns the complete configuration stored in the coordinator stream.
* @param metadataStore an instance of the instantiated {@link CoordinatorStreamStore}.
* @return the configuration read from the coordinator stream.
*/
def readConfigFromCoordinatorStream(metadataStore: MetadataStore): Config = {
val namespaceAwareCoordinatorStreamStore: NamespaceAwareCoordinatorStreamStore = new NamespaceAwareCoordinatorStreamStore(metadataStore, SetConfig.TYPE)
val configFromCoordinatorStream: util.Map[String, Array[Byte]] = namespaceAwareCoordinatorStreamStore.all
val configMap: util.Map[String, String] = new util.HashMap[String, String]
for ((key: String, valueAsBytes: Array[Byte]) <- configFromCoordinatorStream.asScala) {
if (valueAsBytes == null) {
warn("Value for key: %s in config is null. Ignoring it." format key)
} else {
val valueSerde: CoordinatorStreamValueSerde = new CoordinatorStreamValueSerde(SetConfig.TYPE)
val valueAsString: String = valueSerde.fromBytes(valueAsBytes)
if (valueAsString == null) {
warn("Value for key: %s in config is decoded to be null. Ignoring it." format key)
} else {
configMap.put(key, valueAsString)
}
}
}
new MapConfig(configMap)
}
def writeConfigToCoordinatorStream(config: Config, resetJobConfig: Boolean = true) {
debug("config: %s" format config)
val coordinatorSystemConsumer = new CoordinatorStreamSystemConsumer(config, new MetricsRegistryMap)
val coordinatorSystemProducer = new CoordinatorStreamSystemProducer(config, new MetricsRegistryMap)
val systemAdmins = new SystemAdmins(config)
// Create the coordinator stream if it doesn't exist
info("Creating coordinator stream")
val coordinatorSystemStream = CoordinatorStreamUtil.getCoordinatorSystemStream(config)
val coordinatorSystemAdmin = systemAdmins.getSystemAdmin(coordinatorSystemStream.getSystem)
coordinatorSystemAdmin.start()
CoordinatorStreamUtil.createCoordinatorStream(coordinatorSystemStream, coordinatorSystemAdmin)
coordinatorSystemAdmin.stop()
if (resetJobConfig) {
info("Storing config in coordinator stream.")
coordinatorSystemProducer.register(JobRunner.SOURCE)
coordinatorSystemProducer.start()
coordinatorSystemProducer.writeConfig(JobRunner.SOURCE, config)
}
info("Loading old config from coordinator stream.")
coordinatorSystemConsumer.register()
coordinatorSystemConsumer.start()
coordinatorSystemConsumer.bootstrap()
coordinatorSystemConsumer.stop()
val oldConfig = coordinatorSystemConsumer.getConfig
if (resetJobConfig) {
var keysToRemove = oldConfig.keySet.asScala.toSet.diff(config.keySet.asScala)
val jobConfig = new JobConfig(config)
if (jobConfig.getAutosizingEnabled) {
// If autosizing is enabled, we retain auto-sizing related configs
keysToRemove = keysToRemove.filter(configKey => !JobConfig.isAutosizingConfig(configKey))
}
info("Deleting old configs that are no longer defined: %s".format(keysToRemove))
keysToRemove.foreach(key => { coordinatorSystemProducer.send(new Delete(JobRunner.SOURCE, key, SetConfig.TYPE)) })
}
coordinatorSystemProducer.stop()
}
}
|
abhishekshivanna/samza
|
samza-core/src/main/scala/org/apache/samza/util/CoordinatorStreamUtil.scala
|
Scala
|
apache-2.0
| 9,093
|
/*
*
* * Copyright (c) 2014-2016. National Institute of Advanced Industrial Science and Technology (AIST)
* * All rights reserved.
*
*/
package jp.go.aist.cspe
import jp.go.aist.cspe.CSPE._
object STOP extends Process{
// used for verification
override def acceptPrim(e : AbsEvent): ProcessSet = processSet(List.empty)
override def canTerminate = false
override def toString = "STOP"
override def equals(other: Any) = other match {
case that: AnyRef => this eq that
case _ => false
}
override def hashCode() = "STOP".hashCode()
}
|
yoriyuki/cspe
|
src/main/scala/jp/go/aist/cspe/STOP.scala
|
Scala
|
bsd-3-clause
| 565
|
package elevators
import org.scalatest._
class ElevatorSpec extends WordSpec with Matchers {
implicit val config = ElevatorConfig.defaultConfig
"An Elevator" when {
"standing at floor 1" should {
val elevator = Elevator(Set(), Floor(1).toPosition)
"have score 11 to floor 1" in {
elevator.floorRequestScore(Floor(1), Up) shouldEqual 11
}
"have score 10 to floor 0" in {
elevator.floorRequestScore(Floor(0), Up) shouldEqual 10
}
"have score 10 to floor 2" in {
elevator.floorRequestScore(Floor(2), Up) shouldEqual 10
}
"don't move when stepped" in {
elevator.step shouldEqual elevator
}
}
"traveling to floor 5 from floor 1" should {
val elevator = Elevator(Set(Floor(5)), Floor(1).toPosition)
"have score 1 to floor 0" in {
elevator.floorRequestScore(Floor(0), Up) shouldEqual 1
}
"have score 10 to floor 2 when passenger travels Up" in {
elevator.floorRequestScore(Floor(2), Up) shouldEqual 10
}
"have score 9 to floor 2 when passenger travels Down" in {
elevator.floorRequestScore(Floor(2), Down) shouldEqual 9
}
"progress up when stepped" in {
elevator.step.position shouldEqual Floor(1).toPosition.step(Up)
}
"remove goal when reached" in {
elevator.step(40) shouldEqual Elevator(Set(), Floor(5).toPosition)
}
"still be traveling until goal is reached" in {
elevator.step(39) shouldEqual Elevator(Set(Floor(5)), Position(49))
}
}
"traveling to floor 0 from floor 1" should {
val elevator = Elevator(Set(Floor(0)), Floor(1).toPosition)
"have score 1 to floor 2" in {
elevator.floorRequestScore(Floor(2), Up) shouldEqual 1
}
"have score 9 to floor 0 when passenger travels Up" in {
elevator.floorRequestScore(Floor(0), Up) shouldEqual 9
}
}
}
}
|
petterarvidsson/elevators
|
src/test/scala/elevators/ElevatorSpec.scala
|
Scala
|
mit
| 1,939
|
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.