code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1 value | license stringclasses 15 values | size int64 5 1M |
|---|---|---|---|---|---|
package com.codesimples.simple.restapi.recommendation
import com.codesimples.simple.restapi.BasicMessage
object RecommendationMessage extends BasicMessage {
override def endpoint() = "/api/recommendRegularSize"
def happyDay(userId: String, userProfileId:String, productIds:String) = {
"{"+
"\\"request_header\\": {"+
"\\"timestamp\\": "+timestamp+","+
"\\"key\\": \\""+publicKeyBase64()+"\\""+
"},"+
"\\"request_body\\": {"+
"\\"user_id\\": "+userId+","+
"\\"user_profile_id\\": "+userProfileId+","+
"\\"product_ids\\": ["+productIds+"]"+
"}"+
"}"
}
} | agnaldo4j/estudos_arquitetura_limpa_scala | planner-restapi-harness/src/main/scala/com/codesimples/simple/restapi/recommendation/RecommendationMessage.scala | Scala | mit | 617 |
package chat.tox.antox.utils
import android.content.ContentValues
object DatabaseConstants {
val FALSE = 0
val TRUE = 1
val DATABASE_VERSION = 14
val USER_DATABASE_VERSION = 4
val TABLE_CONTACTS = "contacts"
val TABLE_MESSAGES = "messages"
val TABLE_FRIEND_REQUESTS = "friend_requests"
val TABLE_GROUP_INVITES = "group_invites"
val TABLE_USERS = "users"
val COLUMN_NAME_KEY = "tox_key"
val COLUMN_NAME_GROUP_INVITER = "group_inviter"
val COLUMN_NAME_GROUP_DATA = "group_data"
val COLUMN_NAME_SENDER_NAME = "sender_name"
val COLUMN_NAME_MESSAGE = "message"
val COLUMN_NAME_NAME = "name"
val COLUMN_NAME_USERNAME = "username"
val COLUMN_NAME_PASSWORD = "password"
val COLUMN_NAME_NICKNAME = "nickname"
val COLUMN_NAME_TIMESTAMP = "timestamp"
val COLUMN_NAME_NOTE = "note"
val COLUMN_NAME_STATUS_MESSAGE = "status_message"
val COLUMN_NAME_LOGGING_ENABLED = "logging_enabled"
val COLUMN_NAME_STATUS = "status"
val COLUMN_NAME_TYPE = "type"
val COLUMN_NAME_MESSAGE_ID = "message_id"
val COLUMN_NAME_HAS_BEEN_RECEIVED = "has_been_received"
val COLUMN_NAME_HAS_BEEN_READ = "has_been_read"
val COLUMN_NAME_SUCCESSFULLY_SENT = "successfully_sent"
val COLUMN_NAME_SIZE = "size"
val COLUMN_NAME_FILE_KIND = "file_kind"
val COLUMN_NAME_ISONLINE = "isonline"
val COLUMN_NAME_ALIAS = "alias"
val COLUMN_NAME_IGNORED = "ignored"
val COLUMN_NAME_ISBLOCKED = "isblocked"
val COLUMN_NAME_FAVORITE = "favorite"
val COLUMN_NAME_AVATAR = "avatar"
val COLUMN_NAME_CONTACT_TYPE = "contact_type"
val COLUMN_NAME_RECEIVED_AVATAR = "received_avatar"
val COLUMN_NAME_UNSENT_MESSAGE = "unsent_message"
def createSqlEqualsCondition(columnName: String, list: Iterable[_], tableName: String = ""): String = {
val table = if (!tableName.isEmpty) tableName + "." else ""
"(" + list.slice(0, list.size - 1).map(i => s"$table$columnName == " + i.toString + " OR ").mkString + list.last + ")"
}
def contentValue(key: String, value: String) = {
val values = new ContentValues()
values.put(key, value)
values
}
def contentValue(key: String, value: Int) = {
val values = new ContentValues()
values.put(key, value.asInstanceOf[java.lang.Integer])
values
}
} | gale320/Antox | app/src/main/scala/chat/tox/antox/utils/DatabaseConstants.scala | Scala | gpl-3.0 | 2,284 |
package uk.ac.ncl.openlab.intake24.systemsql.user
import java.time.ZonedDateTime
import anorm.{Macro, SQL}
import com.google.inject.Inject
import javax.inject.Named
import javax.sql.DataSource
import uk.ac.ncl.openlab.intake24.errors.{CreateError, DeleteError, LookupError, UnexpectedDatabaseError}
import uk.ac.ncl.openlab.intake24.services.systemdb.user.{SurveyService, UserSession, UserSessionDataService}
import uk.ac.ncl.openlab.intake24.sql.{SqlDataService, SqlResourceLoader}
/**
* Created by Tim Osadchiy on 21/03/2018.
*/
class UserSessionDataServiceImpl @Inject()(@Named("intake24_system") val dataSource: DataSource, surveyService: SurveyService) extends UserSessionDataService with SqlDataService with SqlResourceLoader {
private val TABLE_NAME = "user_sessions"
private val FIELD_LIST = "user_id, survey_id, session_data, created"
private val GET_Q = s"SELECT $FIELD_LIST FROM $TABLE_NAME WHERE survey_id={survey_id} AND user_id={user_id}"
private val INSERT_Q =
s"""
|INSERT INTO user_sessions ($FIELD_LIST)
|VALUES ({user_id}, {survey_id}, {session_data}, {created})
|ON CONFLICT (user_id, survey_id)
| DO UPDATE SET session_data={session_data}, created={created}
| RETURNING $FIELD_LIST
""".stripMargin
private val DELETE_Q = s"DELETE FROM $TABLE_NAME WHERE survey_id = {survey_id} AND user_id = {user_id}"
case class UserSessionRow(user_id: Long, survey_id: String, session_data: String, created: ZonedDateTime) {
def toUserSession = UserSession(user_id, survey_id, session_data, created)
}
override def save(userSession: UserSession): Either[CreateError, UserSession] = tryWithConnection {
implicit conn =>
surveyService.getSurveyParameters(userSession.surveyId) match {
case Right(surveyParameters) if surveyParameters.storeUserSessionOnServer =>
val r = SQL(INSERT_Q).on('user_id -> userSession.userId, 'survey_id -> userSession.surveyId,
'session_data -> userSession.sessionData, 'created -> userSession.created).executeQuery()
.as(Macro.namedParser[UserSessionRow].single)
Right(r.toUserSession)
case Right(surveyParameters) if !surveyParameters.storeUserSessionOnServer =>
Left(UnexpectedDatabaseError(new Exception(s"Survey ${userSession.surveyId} does not store user sessions")))
case Left(error) => Left(UnexpectedDatabaseError(error.exception))
}
}
override def get(surveyId: String, userId: Long): Either[LookupError, UserSession] = tryWithConnection {
implicit conn =>
val r = SQL(GET_Q).on('survey_id -> surveyId, 'user_id -> userId).executeQuery().as(Macro.namedParser[UserSessionRow].single)
Right(r.toUserSession)
}
override def clean(surveyId: String, userId: Long): Either[DeleteError, Unit] = tryWithConnection {
implicit conn =>
SQL(DELETE_Q).on('survey_id -> surveyId, 'user_id -> userId).execute()
Right(());
}
}
| digitalinteraction/intake24 | SystemDataSQL/src/main/scala/uk/ac/ncl/openlab/intake24/systemsql/user/UserSessionDataServiceImpl.scala | Scala | apache-2.0 | 2,967 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources
import org.apache.spark.sql.catalyst.analysis.MultiInstanceRelation
import org.apache.spark.sql.catalyst.expressions.{AttributeMap, AttributeReference}
import org.apache.spark.sql.catalyst.plans.logical.{LeafNode, LogicalPlan, Statistics}
import org.apache.spark.sql.sources.BaseRelation
/**
* Used to link a [[BaseRelation]] in to a logical query plan.
*/
private[sql] case class LogicalRelation(relation: BaseRelation)
extends LeafNode
with MultiInstanceRelation {
override val output: Seq[AttributeReference] = relation.schema.toAttributes
// Logical Relations are distinct if they have different output for the sake of transformations.
override def equals(other: Any): Boolean = other match {
case l @ LogicalRelation(otherRelation) => relation == otherRelation && output == l.output
case _ => false
}
override def hashCode: Int = {
com.google.common.base.Objects.hashCode(relation, output)
}
override def sameResult(otherPlan: LogicalPlan): Boolean = otherPlan match {
case LogicalRelation(otherRelation) => relation == otherRelation
case _ => false
}
@transient override lazy val statistics: Statistics = Statistics(
sizeInBytes = BigInt(relation.sizeInBytes)
)
/** Used to lookup original attribute capitalization */
val attributeMap: AttributeMap[AttributeReference] = AttributeMap(output.map(o => (o, o)))
def newInstance(): this.type = LogicalRelation(relation).asInstanceOf[this.type]
override def simpleString: String = s"Relation[${output.mkString(",")}] $relation"
}
| ArvinDevel/onlineAggregationOnSparkV2 | sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/LogicalRelation.scala | Scala | apache-2.0 | 2,402 |
/*
* Artificial Intelligence for Humans
* Volume 2: Nature Inspired Algorithms
* Java Version
* http://www.aifh.org
* http://www.jeffheaton.com
*
* Code repository:
* https://github.com/jeffheaton/aifh
*
* Copyright 2014 by Jeff Heaton
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* For more information on Heaton Research copyrights, licenses
* and trademarks visit:
* http://www.heatonresearch.com/copyright
*/
package com.heatonresearch.aifh.examples.capstone.model.milestone1
/**
* Stats that were collected about the titanic passengers to help with normalization, interpolation
* and modeling.
*/
class TitanicStats {
/**
* @return Passengers with the title "master", mean age.
*/
def getMeanMaster: CalcMean = meanMaster
/**
* @return Passengers with the title "mr", mean age.
*/
def getMeanMr: CalcMean = meanMr
/**
* @return Passengers with the title "miss", mean age.
*/
def getMeanMiss: CalcMean = meanMiss
/**
* @return Passengers with the title "mrs", mean age.
*/
def getMeanMrs: CalcMean = meanMrs
/**
* @return Passengers with a military title, mean age.
*/
def getMeanMilitary: CalcMean = meanMilitary
/**
* @return Passengers with a noble title, mean age.
*/
def getMeanNobility: CalcMean = meanNobility
/**
* @return Passengers with the title "dr", mean age.
*/
def getMeanDr: CalcMean = meanDr
/**
* @return Passengers with the title "rev", mean age.
*/
def getMeanClergy: CalcMean = meanClergy
/**
* @return Mean age for all passengers.
*/
def getMeanTotal: CalcMean = meanTotal
/**
* @return Survival stats for passengers with a title of "master".
*/
def getSurvivalMaster: CalcSurvival = survivalMaster
/**
* @return Survival stats for passengers with a title of "mr".
*/
def getSurvivalMr: CalcSurvival = survivalMr
/**
* @return Survival stats for passengers with a title of "miss".
*/
def getSurvivalMiss: CalcSurvival = survivalMiss
/**
* @return Survival stats for passengers with a title of "mrs".
*/
def getSurvivalMrs: CalcSurvival = survivalMrs
/**
* @return Survival stats for passengers with a military title.
*/
def getSurvivalMilitary: CalcSurvival = survivalMilitary
/**
* @return Survival stats for passengers with a noble title.
*/
def getSurvivalNobility: CalcSurvival = survivalNobility
/**
* @return Survival stats for passengers with a title of "dr".
*/
def getSurvivalDr: CalcSurvival = survivalDr
/**
* @return Survival stats for passengers with a title of "clergy".
*/
def getSurvivalClergy: CalcSurvival = survivalClergy
/**
* @return Survival stats on the total number of passengers.
*/
def getSurvivalTotal: CalcSurvival = survivalTotal
/**
* @return Survival stats for passengers that embarked from Southampton, England.
*/
def getEmbarkedS: CalcSurvival = embarkedS
/**
* @return Survival stats for passengers that embarked from Cherbourg, France.
*/
def getEmbarkedC: CalcSurvival = embarkedC
/**
* @return Survival stats for passengers that embarked from Queenstown, England.
*/
def getEmbarkedQ: CalcSurvival = embarkedQ
/**
* @return Histogram of embark locations.
*/
def getEmbarkedHisto: CalcHistogram = embarkedHisto
/**
* @return Mean age for male passengers.
*/
def getMeanMale: CalcMean = meanMale
/**
* @return Mean age for female passengers.
*/
def getMeanFemale: CalcMean = meanFemale
/**
* @return Mean fare for first class.
*/
def getMeanFare1: CalcMean = meanFare1
/**
* @return Mean fare for second class.
*/
def getMeanFare2: CalcMean = meanFare2
/**
* @return Mean fare for second class.
*/
def getMeanFare3: CalcMean = meanFare3
/**
* Dump all stats to stdout.
*/
def dump() {
println("Mean Master: Mean Age: " + meanMaster.calculate + " " + survivalMaster.toString)
println("Mr.: Mean Age: " + meanMr.calculate + " " + survivalMr.toString)
println("Miss.: Mean Age: " + meanMiss.calculate + " " + survivalMiss.toString)
println("Mrs.: Mean Age: " + meanMrs.calculate + " " + survivalMrs.toString)
println("Military: Mean Age: " + meanMrs.calculate + " " + survivalMilitary.toString)
println("Clergy: Mean Age: " + meanClergy.calculate + " " + survivalClergy.toString)
println("Nobility: Mean Age: " + meanNobility.calculate + " " + survivalNobility.toString)
println("Dr: Mean Age: " + meanDr.calculate + " " + survivalDr.toString)
println("Total known survival: Mean Age: " + meanTotal.calculate + " " + survivalTotal.toString)
println()
println("Embarked Queenstown: Mean Age: " + embarkedQ.toString)
println("Embarked Southampton: Mean Age: " + embarkedS.toString)
println("Embarked Cherbourg: Mean Age: " + embarkedC.toString)
println("Most common embarked: Mean Age: " + this.embarkedHisto.max)
println()
println("Mean Age Male: " + this.meanMale.calculate)
println("Mean Age Female: " + this.meanFemale.calculate)
println()
println("Mean Fair 1st Class: " + this.meanFare1.calculate)
println("Mean Fair 2st Class: " + this.meanFare2.calculate)
println("Mean Fair 3st Class: " + this.meanFare3.calculate)
}
/**
* Passengers with the title "master", mean age.
*/
private val meanMaster: CalcMean = new CalcMean
/**
* Passengers with the title "mr", mean age.
*/
private val meanMr: CalcMean = new CalcMean
/**
* Passengers with the title "miss", mean age.
*/
private val meanMiss: CalcMean = new CalcMean
/**
* Passengers with the title "mrs", mean age.
*/
private val meanMrs: CalcMean = new CalcMean
/**
* Passengers with a military title, mean age.
*/
private val meanMilitary: CalcMean = new CalcMean
/**
* Passengers with a nobility title, mean age.
*/
private val meanNobility: CalcMean = new CalcMean
/**
* Passengers with the title "dr".
*/
private val meanDr: CalcMean = new CalcMean
/**
* Passengers with the title "rev".
*/
private val meanClergy: CalcMean = new CalcMean
/**
* Total passengers.
*/
private val meanTotal: CalcMean = new CalcMean
/**
* Total male passengers.
*/
private val meanMale: CalcMean = new CalcMean
/**
* Total female passengers.
*/
private val meanFemale: CalcMean = new CalcMean
/**
* Passengers in 1st class, average fare.
*/
private val meanFare1: CalcMean = new CalcMean
/**
* Passengers in 2st class, average fare.
*/
private val meanFare2: CalcMean = new CalcMean
/**
* Passengers in 3rd class, average fare.
*/
private val meanFare3: CalcMean = new CalcMean
/**
* Survival stats for passengers with a title of "master".
*/
private val survivalMaster: CalcSurvival = new CalcSurvival
/**
* Survival stats for passengers with a title of "mr".
*/
private val survivalMr: CalcSurvival = new CalcSurvival
/**
* Survival stats for passengers with a title of "miss".
*/
private val survivalMiss: CalcSurvival = new CalcSurvival
/**
* Survival stats for passengers with a title of "mrs".
*/
private val survivalMrs: CalcSurvival = new CalcSurvival
/**
* Survival stats for passengers with a military title.
*/
private val survivalMilitary: CalcSurvival = new CalcSurvival
/**
* Survival stats for passengers with a nobility title.
*/
private val survivalNobility: CalcSurvival = new CalcSurvival
/**
* Survival stats for passengers with a title of "dr".
*/
private val survivalDr: CalcSurvival = new CalcSurvival
/**
* Survival stats for passengers with a title of "rev".
*/
private val survivalClergy: CalcSurvival = new CalcSurvival
/**
* Survival stats for all passengers.
*/
private val survivalTotal: CalcSurvival = new CalcSurvival
/**
* Survival stats for passengers that embarked from Southampton, England.
*/
private val embarkedS: CalcSurvival = new CalcSurvival
/**
* Survival stats for passengers that embarked from Cherbourg, France.
*/
private val embarkedC: CalcSurvival = new CalcSurvival
/**
* Survival stats for passengers that embarked from Queenstown, England.
*/
private val embarkedQ: CalcSurvival = new CalcSurvival
/**
* Histogram of embark locations.
*/
private val embarkedHisto: CalcHistogram = new CalcHistogram
} | PeterLauris/aifh | vol2/vol2-scala-examples/src/main/scala/com/heatonresearch/aifh/examples/capstone/model/milestone1/TitanicStats.scala | Scala | apache-2.0 | 8,950 |
package uk.co.turingatemyhamster
package owl2
/**
* An abstraction of: http://www.w3.org/TR/2012/REC-owl2-syntax-20121211/#Object_Property_Expressions
*
* @author Matthew Pocock
*/
trait ObjectPropertyExpressionsModule {
importedModules : owl2.IriModule with owl2.EntitiesLiteralsAnonymousIndividualsModule =>
type ObjectPropertyExpression <: AnyRef
type ObjectProperty <: Entity with ObjectPropertyExpression
type InverseObjectProperty <: ObjectPropertyExpression
}
| drdozer/owl2 | core/src/main/scala/uk/co/turingatemyhamster/owl2/ObjectPropertyExpressionsModule.scala | Scala | apache-2.0 | 485 |
import scala.io.StdIn
object SadCycles extends App {
def sad(n: Int)(x: Int): Int = (x.toString map (c => math pow(c.asDigit, n))).sum.toInt
def findCycle(f: Int => Int, x0: Int): Stream[Int] = {
@annotation.tailrec
def floyd(t: Int, h: Int): Int = {
val tortoise = f(t)
val hare = f(f(h))
if (tortoise == hare) tortoise
else floyd(tortoise, hare)
}
val end = floyd(x0, x0)
lazy val cycle: Stream[Int] = end #:: cycle map f
(cycle takeWhile (_ != end)) :+ end
}
val n = StdIn.readInt()
val x = StdIn.readInt()
print(findCycle(sad(n), x) mkString ", ")
}
| ccampo133/daily-programmer | 215-easy/src/SadCycles.scala | Scala | mit | 619 |
package io.flow.reference.data
import io.flow.reference.v0.models.Language
object Languages {
val Aa: Language = Language(iso6392 = "aa", name = "Afar")
val Af: Language = Language(iso6392 = "af", name = "Afrikaans")
val Ak: Language = Language(iso6392 = "ak", name = "Akan")
val Sq: Language = Language(iso6392 = "sq", name = "Albanian")
val Am: Language = Language(iso6392 = "am", name = "Amharic")
val Ar: Language = Language(iso6392 = "ar", name = "Arabic")
val Hy: Language = Language(iso6392 = "hy", name = "Armenian")
val Ay: Language = Language(iso6392 = "ay", name = "Aymara")
val Az: Language = Language(iso6392 = "az", name = "Azerbaijani")
val Bm: Language = Language(iso6392 = "bm", name = "Bambara")
val Be: Language = Language(iso6392 = "be", name = "Belarusian")
val Bn: Language = Language(iso6392 = "bn", name = "Bengali")
val Bi: Language = Language(iso6392 = "bi", name = "Bislama")
val Bs: Language = Language(iso6392 = "bs", name = "Bosnian")
val Bg: Language = Language(iso6392 = "bg", name = "Bulgarian")
val My: Language = Language(iso6392 = "my", name = "Burmese")
val Ca: Language = Language(iso6392 = "ca", name = "Catalan")
val Ny: Language = Language(iso6392 = "ny", name = "Chichewa")
val Zh: Language = Language(iso6392 = "zh", name = "Chinese")
val Hr: Language = Language(iso6392 = "hr", name = "Croatian")
val Cs: Language = Language(iso6392 = "cs", name = "Czech")
val Da: Language = Language(iso6392 = "da", name = "Danish")
val Dv: Language = Language(iso6392 = "dv", name = "Divehi")
val Nl: Language = Language(iso6392 = "nl", name = "Dutch")
val Dz: Language = Language(iso6392 = "dz", name = "Dzongkha")
val En: Language = Language(iso6392 = "en", name = "English")
val Et: Language = Language(iso6392 = "et", name = "Estonian")
val Ee: Language = Language(iso6392 = "ee", name = "Ewe")
val Fj: Language = Language(iso6392 = "fj", name = "Fijian")
val Fi: Language = Language(iso6392 = "fi", name = "Finnish")
val Fr: Language = Language(iso6392 = "fr", name = "French")
val Ff: Language = Language(iso6392 = "ff", name = "Fula")
val Ka: Language = Language(iso6392 = "ka", name = "Georgian")
val De: Language = Language(iso6392 = "de", name = "German")
val El: Language = Language(iso6392 = "el", name = "Greek")
val Gn: Language = Language(iso6392 = "gn", name = "Guaraní")
val Gu: Language = Language(iso6392 = "gu", name = "Gujarati")
val Ht: Language = Language(iso6392 = "ht", name = "Haitian")
val Ha: Language = Language(iso6392 = "ha", name = "Hausa")
val He: Language = Language(iso6392 = "he", name = "Hebrew")
val Hi: Language = Language(iso6392 = "hi", name = "Hindi")
val Ho: Language = Language(iso6392 = "ho", name = "Hiri Motu")
val Hu: Language = Language(iso6392 = "hu", name = "Hungarian")
val Is: Language = Language(iso6392 = "is", name = "Icelandic")
val Ig: Language = Language(iso6392 = "ig", name = "Igbo")
val Id: Language = Language(iso6392 = "id", name = "Indonesian")
val Ga: Language = Language(iso6392 = "ga", name = "Irish")
val It: Language = Language(iso6392 = "it", name = "Italian")
val Ja: Language = Language(iso6392 = "ja", name = "Japanese")
val Kr: Language = Language(iso6392 = "kr", name = "Kanuri")
val Kk: Language = Language(iso6392 = "kk", name = "Kazakh")
val Km: Language = Language(iso6392 = "km", name = "Khmer")
val Rw: Language = Language(iso6392 = "rw", name = "Kinyarwanda")
val Rn: Language = Language(iso6392 = "rn", name = "Kirundi")
val Kg: Language = Language(iso6392 = "kg", name = "Kongo")
val Ko: Language = Language(iso6392 = "ko", name = "Korean")
val Ku: Language = Language(iso6392 = "ku", name = "Kurdish")
val Kj: Language = Language(iso6392 = "kj", name = "Kwanyama")
val Ky: Language = Language(iso6392 = "ky", name = "Kyrgyz")
val Lo: Language = Language(iso6392 = "lo", name = "Lao")
val La: Language = Language(iso6392 = "la", name = "Latin")
val Lv: Language = Language(iso6392 = "lv", name = "Latvian")
val Ln: Language = Language(iso6392 = "ln", name = "Lingala")
val Lt: Language = Language(iso6392 = "lt", name = "Lithuanian")
val Lu: Language = Language(iso6392 = "lu", name = "Luba-Katanga")
val Lb: Language = Language(iso6392 = "lb", name = "Luxembourgish")
val Mk: Language = Language(iso6392 = "mk", name = "Macedonian")
val Mg: Language = Language(iso6392 = "mg", name = "Malagasy")
val Ms: Language = Language(iso6392 = "ms", name = "Malay")
val Mt: Language = Language(iso6392 = "mt", name = "Maltese")
val Mh: Language = Language(iso6392 = "mh", name = "Marshallese")
val Mn: Language = Language(iso6392 = "mn", name = "Mongolian")
val Mi: Language = Language(iso6392 = "mi", name = "Māori")
val Ne: Language = Language(iso6392 = "ne", name = "Nepali")
val Nd: Language = Language(iso6392 = "nd", name = "Northern Ndebele")
val No: Language = Language(iso6392 = "no", name = "Norwegian")
val Nb: Language = Language(iso6392 = "nb", name = "Norwegian Bokmål")
val Nn: Language = Language(iso6392 = "nn", name = "Norwegian Nynorsk")
val Pa: Language = Language(iso6392 = "pa", name = "Panjabi")
val Ps: Language = Language(iso6392 = "ps", name = "Pashto")
val Fa: Language = Language(iso6392 = "fa", name = "Persian")
val Pl: Language = Language(iso6392 = "pl", name = "Polish")
val Pt: Language = Language(iso6392 = "pt", name = "Portuguese")
val Qu: Language = Language(iso6392 = "qu", name = "Quechua")
val Ro: Language = Language(iso6392 = "ro", name = "Romanian")
val Rm: Language = Language(iso6392 = "rm", name = "Romansh")
val Ru: Language = Language(iso6392 = "ru", name = "Russian")
val Sg: Language = Language(iso6392 = "sg", name = "Sango")
val Sr: Language = Language(iso6392 = "sr", name = "Serbian")
val Sn: Language = Language(iso6392 = "sn", name = "Shona")
val Si: Language = Language(iso6392 = "si", name = "Sinhala")
val Sk: Language = Language(iso6392 = "sk", name = "Slovak")
val Sl: Language = Language(iso6392 = "sl", name = "Slovene")
val So: Language = Language(iso6392 = "so", name = "Somali")
val Nr: Language = Language(iso6392 = "nr", name = "Southern Ndebele")
val St: Language = Language(iso6392 = "st", name = "Southern Sotho")
val Es: Language = Language(iso6392 = "es", name = "Spanish")
val Sw: Language = Language(iso6392 = "sw", name = "Swahili")
val Ss: Language = Language(iso6392 = "ss", name = "Swati")
val Sv: Language = Language(iso6392 = "sv", name = "Swedish")
val Tl: Language = Language(iso6392 = "tl", name = "Tagalog")
val Tg: Language = Language(iso6392 = "tg", name = "Tajik")
val Ta: Language = Language(iso6392 = "ta", name = "Tamil")
val Te: Language = Language(iso6392 = "te", name = "Telugu")
val Th: Language = Language(iso6392 = "th", name = "Thai")
val Ti: Language = Language(iso6392 = "ti", name = "Tigrinya")
val Ts: Language = Language(iso6392 = "ts", name = "Tsonga")
val Tn: Language = Language(iso6392 = "tn", name = "Tswana")
val Tr: Language = Language(iso6392 = "tr", name = "Turkish")
val Tk: Language = Language(iso6392 = "tk", name = "Turkmen")
val Uk: Language = Language(iso6392 = "uk", name = "Ukrainian")
val Ur: Language = Language(iso6392 = "ur", name = "Urdu")
val Uz: Language = Language(iso6392 = "uz", name = "Uzbek")
val Ve: Language = Language(iso6392 = "ve", name = "Venda")
val Vi: Language = Language(iso6392 = "vi", name = "Vietnamese")
val Cy: Language = Language(iso6392 = "cy", name = "Welsh")
val Wo: Language = Language(iso6392 = "wo", name = "Wolof")
val Xh: Language = Language(iso6392 = "xh", name = "Xhosa")
val Yo: Language = Language(iso6392 = "yo", name = "Yoruba")
val Zu: Language = Language(iso6392 = "zu", name = "Zulu")
val all: Seq[Language] = Seq(
Aa,
Af,
Ak,
Sq,
Am,
Ar,
Hy,
Ay,
Az,
Bm,
Be,
Bn,
Bi,
Bs,
Bg,
My,
Ca,
Ny,
Zh,
Hr,
Cs,
Da,
Dv,
Nl,
Dz,
En,
Et,
Ee,
Fj,
Fi,
Fr,
Ff,
Ka,
De,
El,
Gn,
Gu,
Ht,
Ha,
He,
Hi,
Ho,
Hu,
Is,
Ig,
Id,
Ga,
It,
Ja,
Kr,
Kk,
Km,
Rw,
Rn,
Kg,
Ko,
Ku,
Kj,
Ky,
Lo,
La,
Lv,
Ln,
Lt,
Lu,
Lb,
Mk,
Mg,
Ms,
Mt,
Mh,
Mn,
Mi,
Ne,
Nd,
No,
Nb,
Nn,
Pa,
Ps,
Fa,
Pl,
Pt,
Qu,
Ro,
Rm,
Ru,
Sg,
Sr,
Sn,
Si,
Sk,
Sl,
So,
Nr,
St,
Es,
Sw,
Ss,
Sv,
Tl,
Tg,
Ta,
Te,
Th,
Ti,
Ts,
Tn,
Tr,
Tk,
Uk,
Ur,
Uz,
Ve,
Vi,
Cy,
Wo,
Xh,
Yo,
Zu
)
}
| flowcommerce/lib-reference-scala | src/main/scala/io/flow/reference/data/Languages.scala | Scala | mit | 8,801 |
import scala.io.Source
object ModifiedKaprekarNumbers extends App {
val lines = Source.stdin.getLines().map(_.toInt).toList
val lower = lines(0)
val upper = lines(1)
val kaprekarNums = (lower to upper).filter(isKaprekar)
val output = if(!kaprekarNums.isEmpty) kaprekarNums.mkString(" ") else "INVALID RANGE"
def isKaprekar(num: Int): Boolean = {
val square = Math.pow(num, 2).toInt.toString
if(square.length > 1) {
val parts = square.splitAt(square.length / 2)
val l = parts._1.toInt
val r = parts._2.toInt
return (l + r) == num
} else {
return square.toInt == num
}
}
} | PaulNoth/hackerrank | practice/algorithms/implementation/modified_kaprekar_numbers/ModifiedKaprekarNumbers.scala | Scala | mit | 632 |
package de.zalando.play.controllers
import org.specs2.mutable._
/**
* @author slasch
* @since 07.01.2016.
*/
class PlayPathBindablesTest extends Specification {
"createMapper" should {
"should read different formats" in {
val wrapper = PipesArrayWrapper.apply(Seq.empty[String])
val mapper = PlayPathBindables.createMapper
val reader = PlayPathBindables.createReader(mapper, wrapper)
val result = PlayPathBindables.readArray(reader)("a|b|c|d")
result must have size 4
}
"should write different formats" in {
val wrapper = PipesArrayWrapper.apply(Seq.empty[String])
val mapper = PlayPathBindables.createMapper
val writer = PlayPathBindables.createWriter(mapper, wrapper)
val result = PlayPathBindables.writeArray(writer)(Seq("a", "b", "c", "d"))
result must_== """"a"|"b"|"c"|"d"""" + "\n"
}
}
}
| zalando/play-swagger | api/src/test/scala/de/zalando/play/controllers/PlayPathBindablesTest.scala | Scala | mit | 883 |
/*
* Copyright 2009-2016 DigitalGlobe, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and limitations under the License.
*
*/
package org.mrgeo.hdfs.output.image
import java.io.IOException
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.hadoop.io.{SequenceFile, Writable, WritableComparable}
import org.apache.hadoop.mapreduce.{Job, OutputFormat}
import org.apache.hadoop.mapreduce.lib.output.{FileOutputFormat, SequenceFileOutputFormat}
import org.apache.spark.rdd.PairRDDFunctions
import org.mrgeo.data.DataProviderException
import org.mrgeo.data.image.{ImageOutputFormatContext, MrsImageOutputFormatProvider}
import org.mrgeo.data.rdd.RasterRDD
import org.mrgeo.data.tile.TileIdWritable
import org.mrgeo.hdfs.image.HdfsMrsImageDataProvider
import org.mrgeo.hdfs.partitioners.{RowPartitioner, BlockSizePartitioner, FileSplitPartitioner}
import org.mrgeo.hdfs.tile.FileSplit
import org.mrgeo.hdfs.utils.HadoopFileUtils
class HdfsMrsPyramidOutputFormatProvider(context: ImageOutputFormatContext) extends MrsImageOutputFormatProvider(context) {
private[image] object PartitionType extends Enumeration {
val ROW, BLOCKSIZE = Value
}
private[image] var provider: HdfsMrsImageDataProvider = null
private[image] var partitioner: PartitionType.Value = null
def this(provider: HdfsMrsImageDataProvider, context: ImageOutputFormatContext) {
this(context)
this.provider = provider
partitioner = PartitionType.BLOCKSIZE
}
def setInfo(conf: Configuration, job: Job) {
conf.set("io.map.index.interval", "1")
if (job != null) {
SequenceFileOutputFormat.setOutputCompressionType(job, SequenceFile.CompressionType.RECORD)
}
}
def setOutputInfo(conf: Configuration, job: Job, output: String) {
setInfo(conf, job)
if (job != null) {
FileOutputFormat.setOutputPath(job, new Path(output))
}
else {
conf.set("mapred.output.dir", output)
conf.set("mapreduce.output.fileoutputformat.outputdir", output)
}
}
override def save(raster: RasterRDD, conf:Configuration): Unit = {
implicit val tileIdOrdering = new Ordering[TileIdWritable] {
override def compare(x: TileIdWritable, y: TileIdWritable): Int = x.compareTo(y)
}
val outputWithZoom: String = provider.getResolvedResourceName(false) + "/" + context.getZoomlevel
val outputPath: Path = new Path(outputWithZoom)
val jobconf = try {
val fs: FileSystem = HadoopFileUtils.getFileSystem(conf, outputPath)
if (fs.exists(outputPath)) {
fs.delete(outputPath, true)
}
// location of the output
conf.set("mapred.output.dir", outputPath.toString)
conf.set("mapreduce.output.fileoutputformat.outputdir", outputPath.toString)
// compress
// The constant seems to be missing from at least CDH 5.6.0 (non-yarn), so we'll use the
// hard-coded string...
//conf.setBoolean(FileOutputFormat.COMPRESS, true)
conf.setBoolean("mapreduce.output.fileoutputformat.compress", true)
// add every tile to the index
conf.set("io.map.index.interval", "1")
Job.getInstance(super.setupOutput(conf)).getConfiguration
}
catch {
case e: IOException =>
throw new DataProviderException("Error running spark job setup", e)
}
val sparkPartitioner = getSparkPartitioner
// Repartition the output if the output data provider requires it
val sorted = RasterRDD(
if (sparkPartitioner == null) {
raster.sortByKey()
}
else if (sparkPartitioner.hasFixedPartitions) {
raster.sortByKey(numPartitions = sparkPartitioner.calculateNumPartitions(raster, outputWithZoom))
}
else {
raster.repartitionAndSortWithinPartitions(sparkPartitioner)
})
val wrappedForSave = new PairRDDFunctions(sorted)
wrappedForSave.saveAsNewAPIHadoopDataset(jobconf)
if (sparkPartitioner != null)
{
sparkPartitioner.writeSplits(sorted, context.getOutput, context.getZoomlevel, jobconf)
}
}
override def finalizeExternalSave(conf: Configuration): Unit = {
try {
val imagePath: String = provider.getResolvedResourceName(true)
val outputWithZoom: Path = new Path(imagePath + "/" + context.getZoomlevel)
val split: FileSplit = new FileSplit
split.generateSplits(outputWithZoom, conf)
split.writeSplits(outputWithZoom)
}
catch {
case e: IOException => {
throw new DataProviderException("Error in finalizeExternalSave", e)
}
}
}
override def validateProtectionLevel(protectionLevel: String): Boolean = true
private def getSparkPartitioner:FileSplitPartitioner = {
partitioner match {
case PartitionType.ROW =>
new RowPartitioner(context.getBounds, context.getZoomlevel, context.getTilesize)
case PartitionType.BLOCKSIZE =>
new BlockSizePartitioner()
case _ =>
new BlockSizePartitioner()
}
}
override protected def getOutputFormat: OutputFormat[WritableComparable[_], Writable] = new HdfsMrsPyramidOutputFormat
}
| ttislerdg/mrgeo | mrgeo-core/src/main/scala/org/mrgeo/hdfs/output/image/HdfsMrsPyramidOutputFormatProvider.scala | Scala | apache-2.0 | 5,585 |
/**
* Created by Romain Reuillon on 03/06/16.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
*/
package org.openmole.plugin.domain.modifier
import org.openmole.core.dsl._
import org.openmole.core.dsl.extension._
object ZipWithIndexDomain {
implicit def isDiscrete[D, I]: DiscreteFromContextDomain[ZipWithIndexDomain[D, I], (I, Int)] = domain ⇒
Domain(
domain.iterator,
domain.inputs,
domain.validate
)
}
case class ZipWithIndexDomain[D, I](domain: D)(implicit discrete: DiscreteFromContextDomain[D, I]) { d ⇒
def iterator = FromContext { p ⇒
import p._
discrete(domain).domain.from(context).zipWithIndex
}
def inputs = discrete(domain).inputs
def validate = discrete(domain).validate
} | openmole/openmole | openmole/plugins/org.openmole.plugin.domain.modifier/src/main/scala/org/openmole/plugin/domain/modifier/ZipWithIndexDomain.scala | Scala | agpl-3.0 | 1,355 |
package com.ing.baker.runtime.akka.actor.recipe_manager
import akka.actor.{ActorLogging, Props}
import akka.persistence.PersistentActor
import com.ing.baker.il.CompiledRecipe
import com.ing.baker.runtime.akka.actor.recipe_manager.RecipeManagerActor._
import com.ing.baker.runtime.akka.actor.recipe_manager.RecipeManagerProtocol._
import com.ing.baker.runtime.akka.actor.serialization.BakerSerializable
import scala.collection.mutable
object RecipeManagerActor {
def props() = Props(new RecipeManagerActor)
//Events
//When a recipe is added
case class RecipeAdded(compiledRecipe: CompiledRecipe, timeStamp: Long) extends BakerSerializable
}
class RecipeManagerActor extends PersistentActor with ActorLogging {
val compiledRecipes: mutable.Map[String, (CompiledRecipe, Long)] = mutable.Map[String, (CompiledRecipe, Long)]()
private def hasCompiledRecipe(compiledRecipe: CompiledRecipe): Option[String] =
compiledRecipes.collectFirst { case (recipeId, (`compiledRecipe`, _)) => recipeId}
private def addRecipe(compiledRecipe: CompiledRecipe, timestamp: Long) =
compiledRecipes += (compiledRecipe.recipeId -> (compiledRecipe, timestamp))
override def receiveCommand: Receive = {
case AddRecipe(compiledRecipe) =>
val foundRecipe = hasCompiledRecipe(compiledRecipe)
if (foundRecipe.isEmpty) {
val timestamp = System.currentTimeMillis()
persist(RecipeAdded(compiledRecipe, timestamp)) { _ =>
addRecipe(compiledRecipe, timestamp)
context.system.eventStream.publish(
com.ing.baker.runtime.scaladsl.RecipeAdded(compiledRecipe.name, compiledRecipe.recipeId, timestamp, compiledRecipe))
sender() ! AddRecipeResponse(compiledRecipe.recipeId)
}
}
else {
sender() ! AddRecipeResponse(foundRecipe.get)
}
case GetRecipe(recipeId: String) =>
compiledRecipes.get(recipeId) match {
case Some((compiledRecipe, timestamp)) => sender() ! RecipeFound(compiledRecipe, timestamp)
case None => sender() ! NoRecipeFound(recipeId)
}
case GetAllRecipes =>
sender() ! AllRecipes(compiledRecipes.map {
case (recipeId, (compiledRecipe, timestamp)) => RecipeInformation(compiledRecipe, timestamp)
}.toSeq)
}
override def receiveRecover: Receive = {
case RecipeAdded(compiledRecipe, timeStamp) => addRecipe(compiledRecipe, timeStamp)
}
override def persistenceId: String = self.path.name
}
| ing-bank/baker | core/akka-runtime/src/main/scala/com/ing/baker/runtime/akka/actor/recipe_manager/RecipeManagerActor.scala | Scala | mit | 2,472 |
package net.ceedubs.ficus.readers
trait AllValueReaderInstances
extends AnyValReaders
with StringReader
with SymbolReader
with OptionReader
with CollectionReaders
with ConfigReader
with DurationReaders
with ArbitraryTypeReader
with TryReader
with ConfigValueReader
with PeriodReader
with ChronoUnitReader
with LocalDateReader
object AllValueReaderInstances extends AllValueReaderInstances
| mdedetrich/ficus | src/main/scala/net/ceedubs/ficus/readers/AllValueReaderInstances.scala | Scala | mit | 443 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.tools
import java.util
import java.util.concurrent.atomic.{AtomicBoolean, AtomicInteger}
import java.util.concurrent.{CountDownLatch, TimeUnit}
import java.util.regex.Pattern
import java.util.{Collections, Properties}
import com.yammer.metrics.core.Gauge
import joptsimple.OptionParser
import kafka.client.ClientUtils
import kafka.consumer.{BaseConsumerRecord, ConsumerIterator, BaseConsumer, Blacklist, ConsumerConfig, ConsumerThreadId, ConsumerTimeoutException, TopicFilter, Whitelist, ZookeeperConsumerConnector}
import kafka.javaapi.consumer.ConsumerRebalanceListener
import kafka.message.MessageAndMetadata
import kafka.metrics.KafkaMetricsGroup
import kafka.serializer.DefaultDecoder
import kafka.utils.{CommandLineUtils, CoreUtils, Logging}
import org.apache.kafka.clients.consumer.{OffsetAndMetadata, Consumer, ConsumerRecord, KafkaConsumer}
import org.apache.kafka.clients.producer.internals.ErrorLoggingCallback
import org.apache.kafka.clients.producer.{KafkaProducer, ProducerConfig, ProducerRecord, RecordMetadata}
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.serialization.ByteArrayDeserializer
import org.apache.kafka.common.utils.Utils
import org.apache.kafka.common.errors.WakeupException
import scala.collection.JavaConversions._
import scala.collection.mutable.HashMap
import scala.util.control.ControlThrowable
/**
* The mirror maker has the following architecture:
* - There are N mirror maker thread shares one ZookeeperConsumerConnector and each owns a Kafka stream.
* - All the mirror maker threads share one producer.
* - Each mirror maker thread periodically flushes the producer and then commits all offsets.
*
* @note For mirror maker, the following settings are set by default to make sure there is no data loss:
* 1. use new producer with following settings
* acks=all
* retries=max integer
* block.on.buffer.full=true
* max.in.flight.requests.per.connection=1
* 2. Consumer Settings
* auto.commit.enable=false
* 3. Mirror Maker Setting:
* abort.on.send.failure=true
*/
object MirrorMaker extends Logging with KafkaMetricsGroup {
private var producer: MirrorMakerProducer = null
private var mirrorMakerThreads: Seq[MirrorMakerThread] = null
private val isShuttingdown: AtomicBoolean = new AtomicBoolean(false)
// Track the messages not successfully sent by mirror maker.
private val numDroppedMessages: AtomicInteger = new AtomicInteger(0)
private var messageHandler: MirrorMakerMessageHandler = null
private var offsetCommitIntervalMs = 0
private var abortOnSendFailure: Boolean = true
@volatile private var exitingOnSendFailure: Boolean = false
// If a message send failed after retries are exhausted. The offset of the messages will also be removed from
// the unacked offset list to avoid offset commit being stuck on that offset. In this case, the offset of that
// message was not really acked, but was skipped. This metric records the number of skipped offsets.
newGauge("MirrorMaker-numDroppedMessages",
new Gauge[Int] {
def value = numDroppedMessages.get()
})
def main(args: Array[String]) {
info("Starting mirror maker")
try {
val parser = new OptionParser
val consumerConfigOpt = parser.accepts("consumer.config",
"Embedded consumer config for consuming from the source cluster.")
.withRequiredArg()
.describedAs("config file")
.ofType(classOf[String])
val useNewConsumerOpt = parser.accepts("new.consumer",
"Use new consumer in mirror maker.")
val producerConfigOpt = parser.accepts("producer.config",
"Embedded producer config.")
.withRequiredArg()
.describedAs("config file")
.ofType(classOf[String])
val numStreamsOpt = parser.accepts("num.streams",
"Number of consumption streams.")
.withRequiredArg()
.describedAs("Number of threads")
.ofType(classOf[java.lang.Integer])
.defaultsTo(1)
val whitelistOpt = parser.accepts("whitelist",
"Whitelist of topics to mirror.")
.withRequiredArg()
.describedAs("Java regex (String)")
.ofType(classOf[String])
val blacklistOpt = parser.accepts("blacklist",
"Blacklist of topics to mirror. Only old consumer supports blacklist.")
.withRequiredArg()
.describedAs("Java regex (String)")
.ofType(classOf[String])
val offsetCommitIntervalMsOpt = parser.accepts("offset.commit.interval.ms",
"Offset commit interval in ms")
.withRequiredArg()
.describedAs("offset commit interval in millisecond")
.ofType(classOf[java.lang.Integer])
.defaultsTo(60000)
val consumerRebalanceListenerOpt = parser.accepts("consumer.rebalance.listener",
"The consumer rebalance listener to use for mirror maker consumer.")
.withRequiredArg()
.describedAs("A custom rebalance listener of type ConsumerRebalanceListener")
.ofType(classOf[String])
val rebalanceListenerArgsOpt = parser.accepts("rebalance.listener.args",
"Arguments used by custom rebalance listener for mirror maker consumer")
.withRequiredArg()
.describedAs("Arguments passed to custom rebalance listener constructor as a string.")
.ofType(classOf[String])
val messageHandlerOpt = parser.accepts("message.handler",
"Message handler which will process every record in-between consumer and producer.")
.withRequiredArg()
.describedAs("A custom message handler of type MirrorMakerMessageHandler")
.ofType(classOf[String])
val messageHandlerArgsOpt = parser.accepts("message.handler.args",
"Arguments used by custom message handler for mirror maker.")
.withRequiredArg()
.describedAs("Arguments passed to message handler constructor.")
.ofType(classOf[String])
val abortOnSendFailureOpt = parser.accepts("abort.on.send.failure",
"Configure the mirror maker to exit on a failed send.")
.withRequiredArg()
.describedAs("Stop the entire mirror maker when a send failure occurs")
.ofType(classOf[String])
.defaultsTo("true")
val helpOpt = parser.accepts("help", "Print this message.")
if (args.length == 0)
CommandLineUtils.printUsageAndDie(parser, "Continuously copy data between two Kafka clusters.")
val options = parser.parse(args: _*)
if (options.has(helpOpt)) {
parser.printHelpOn(System.out)
System.exit(0)
}
CommandLineUtils.checkRequiredArgs(parser, options, consumerConfigOpt, producerConfigOpt)
if (List(whitelistOpt, blacklistOpt).count(options.has) != 1) {
println("Exactly one of whitelist or blacklist is required.")
System.exit(1)
}
abortOnSendFailure = options.valueOf(abortOnSendFailureOpt).toBoolean
offsetCommitIntervalMs = options.valueOf(offsetCommitIntervalMsOpt).intValue()
val numStreams = options.valueOf(numStreamsOpt).intValue()
Runtime.getRuntime.addShutdownHook(new Thread("MirrorMakerShutdownHook") {
override def run() {
cleanShutdown()
}
})
// create producer
val producerProps = Utils.loadProps(options.valueOf(producerConfigOpt))
// Defaults to no data loss settings.
maybeSetDefaultProperty(producerProps, ProducerConfig.RETRIES_CONFIG, Int.MaxValue.toString)
maybeSetDefaultProperty(producerProps, ProducerConfig.BLOCK_ON_BUFFER_FULL_CONFIG, "true")
maybeSetDefaultProperty(producerProps, ProducerConfig.ACKS_CONFIG, "all")
maybeSetDefaultProperty(producerProps, ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION, "1")
// Always set producer key and value serializer to ByteArraySerializer.
producerProps.setProperty(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.ByteArraySerializer")
producerProps.setProperty(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.ByteArraySerializer")
producer = new MirrorMakerProducer(producerProps)
val useNewConsumer = options.has(useNewConsumerOpt)
// Create consumers
val mirrorMakerConsumers = if (!useNewConsumer) {
val customRebalanceListener = {
val customRebalanceListenerClass = options.valueOf(consumerRebalanceListenerOpt)
if (customRebalanceListenerClass != null) {
val rebalanceListenerArgs = options.valueOf(rebalanceListenerArgsOpt)
if (rebalanceListenerArgs != null) {
Some(CoreUtils.createObject[ConsumerRebalanceListener](customRebalanceListenerClass, rebalanceListenerArgs))
} else {
Some(CoreUtils.createObject[ConsumerRebalanceListener](customRebalanceListenerClass))
}
} else {
None
}
}
if (customRebalanceListener.exists(!_.isInstanceOf[ConsumerRebalanceListener]))
throw new IllegalArgumentException("The rebalance listener should be an instance of kafka.consumer.ConsumerRebalanceListener")
createOldConsumers(
numStreams,
options.valueOf(consumerConfigOpt),
customRebalanceListener,
Option(options.valueOf(whitelistOpt)),
Option(options.valueOf(blacklistOpt)))
} else {
val customRebalanceListener = {
val customRebalanceListenerClass = options.valueOf(consumerRebalanceListenerOpt)
if (customRebalanceListenerClass != null) {
val rebalanceListenerArgs = options.valueOf(rebalanceListenerArgsOpt)
if (rebalanceListenerArgs != null) {
Some(CoreUtils.createObject[org.apache.kafka.clients.consumer.ConsumerRebalanceListener](customRebalanceListenerClass, rebalanceListenerArgs))
} else {
Some(CoreUtils.createObject[org.apache.kafka.clients.consumer.ConsumerRebalanceListener](customRebalanceListenerClass))
}
} else {
None
}
}
if (customRebalanceListener.exists(!_.isInstanceOf[org.apache.kafka.clients.consumer.ConsumerRebalanceListener]))
throw new IllegalArgumentException("The rebalance listener should be an instance of" +
"org.apache.kafka.clients.consumer.ConsumerRebalanceListner")
createNewConsumers(
numStreams,
options.valueOf(consumerConfigOpt),
customRebalanceListener,
Option(options.valueOf(whitelistOpt)))
}
// Create mirror maker threads.
mirrorMakerThreads = (0 until numStreams) map (i =>
new MirrorMakerThread(mirrorMakerConsumers(i), i))
// Create and initialize message handler
val customMessageHandlerClass = options.valueOf(messageHandlerOpt)
val messageHandlerArgs = options.valueOf(messageHandlerArgsOpt)
messageHandler = {
if (customMessageHandlerClass != null) {
if (messageHandlerArgs != null)
CoreUtils.createObject[MirrorMakerMessageHandler](customMessageHandlerClass, messageHandlerArgs)
else
CoreUtils.createObject[MirrorMakerMessageHandler](customMessageHandlerClass)
} else {
defaultMirrorMakerMessageHandler
}
}
} catch {
case ct : ControlThrowable => throw ct
case t : Throwable =>
error("Exception when starting mirror maker.", t)
}
mirrorMakerThreads.foreach(_.start())
mirrorMakerThreads.foreach(_.awaitShutdown())
}
private def createOldConsumers(numStreams: Int,
consumerConfigPath: String,
customRebalanceListener: Option[ConsumerRebalanceListener],
whitelist: Option[String],
blacklist: Option[String]) : Seq[MirrorMakerBaseConsumer] = {
// Create consumer connector
val consumerConfigProps = Utils.loadProps(consumerConfigPath)
// Disable consumer auto offsets commit to prevent data loss.
maybeSetDefaultProperty(consumerConfigProps, "auto.commit.enable", "false")
// Set the consumer timeout so we will not block for low volume pipeline. The timeout is necessary to make sure
// Offsets are still committed for those low volume pipelines.
maybeSetDefaultProperty(consumerConfigProps, "consumer.timeout.ms", "10000")
// The default client id is group id, we manually set client id to groupId-index to avoid metric collision
val groupIdString = consumerConfigProps.getProperty("group.id")
val connectors = (0 until numStreams) map { i =>
consumerConfigProps.setProperty("client.id", groupIdString + "-" + i.toString)
val consumerConfig = new ConsumerConfig(consumerConfigProps)
new ZookeeperConsumerConnector(consumerConfig)
}
// create filters
val filterSpec = if (whitelist.isDefined)
new Whitelist(whitelist.get)
else if (blacklist.isDefined)
new Blacklist(blacklist.get)
else
throw new IllegalArgumentException("Either whitelist or blacklist should be defined!")
(0 until numStreams) map { i =>
val consumer = new MirrorMakerOldConsumer(connectors(i), filterSpec)
val consumerRebalanceListener = new InternalRebalanceListenerForOldConsumer(consumer, customRebalanceListener)
connectors(i).setConsumerRebalanceListener(consumerRebalanceListener)
consumer
}
}
def createNewConsumers(numStreams: Int,
consumerConfigPath: String,
customRebalanceListener: Option[org.apache.kafka.clients.consumer.ConsumerRebalanceListener],
whitelist: Option[String]) : Seq[MirrorMakerBaseConsumer] = {
// Create consumer connector
val consumerConfigProps = Utils.loadProps(consumerConfigPath)
// Disable consumer auto offsets commit to prevent data loss.
maybeSetDefaultProperty(consumerConfigProps, "enable.auto.commit", "false")
// Hardcode the deserializer to ByteArrayDeserializer
consumerConfigProps.setProperty("key.deserializer", classOf[ByteArrayDeserializer].getName)
consumerConfigProps.setProperty("value.deserializer", classOf[ByteArrayDeserializer].getName)
// The default client id is group id, we manually set client id to groupId-index to avoid metric collision
val groupIdString = consumerConfigProps.getProperty("group.id")
val consumers = (0 until numStreams) map { i =>
consumerConfigProps.setProperty("client.id", groupIdString + "-" + i.toString)
new KafkaConsumer[Array[Byte], Array[Byte]](consumerConfigProps)
}
whitelist.getOrElse(throw new IllegalArgumentException("White list cannot be empty for new consumer"))
consumers.map(consumer => new MirrorMakerNewConsumer(consumer, customRebalanceListener, whitelist))
}
def commitOffsets(mirrorMakerConsumer: MirrorMakerBaseConsumer) {
if (!exitingOnSendFailure) {
trace("Committing offsets.")
try {
mirrorMakerConsumer.commit()
} catch {
case e: WakeupException =>
// we only call wakeup() once to close the consumer,
// so if we catch it in commit we can safely retry
// and re-throw to break the loop
mirrorMakerConsumer.commit()
throw e
}
} else {
info("Exiting on send failure, skip committing offsets.")
}
}
def cleanShutdown() {
if (isShuttingdown.compareAndSet(false, true)) {
info("Start clean shutdown.")
// Shutdown consumer threads.
info("Shutting down consumer threads.")
if (mirrorMakerThreads != null) {
mirrorMakerThreads.foreach(_.shutdown())
mirrorMakerThreads.foreach(_.awaitShutdown())
}
info("Closing producer.")
producer.close()
info("Kafka mirror maker shutdown successfully")
}
}
private def maybeSetDefaultProperty(properties: Properties, propertyName: String, defaultValue: String) {
val propertyValue = properties.getProperty(propertyName)
properties.setProperty(propertyName, Option(propertyValue).getOrElse(defaultValue))
if (properties.getProperty(propertyName) != defaultValue)
info("Property %s is overridden to %s - data loss or message reordering is possible.".format(propertyName, propertyValue))
}
class MirrorMakerThread(mirrorMakerConsumer: MirrorMakerBaseConsumer,
val threadId: Int) extends Thread with Logging with KafkaMetricsGroup {
private val threadName = "mirrormaker-thread-" + threadId
private val shutdownLatch: CountDownLatch = new CountDownLatch(1)
private var lastOffsetCommitMs = System.currentTimeMillis()
@volatile private var shuttingDown: Boolean = false
this.logIdent = "[%s] ".format(threadName)
setName(threadName)
override def run() {
info("Starting mirror maker thread " + threadName)
mirrorMakerConsumer.init()
try {
// We need the two while loop to make sure when old consumer is used, even there is no message we
// still commit offset. When new consumer is used, this is handled by poll(timeout).
while (!exitingOnSendFailure && !shuttingDown) {
try {
while (!exitingOnSendFailure && !shuttingDown && mirrorMakerConsumer.hasData) {
val data = mirrorMakerConsumer.receive()
trace("Sending message with value size %d and offset %d".format(data.value.length, data.offset))
val records = messageHandler.handle(data)
records.foreach(producer.send)
maybeFlushAndCommitOffsets()
}
} catch {
case cte: ConsumerTimeoutException =>
trace("Caught ConsumerTimeoutException, continue iteration.")
case we: WakeupException =>
trace("Caught ConsumerWakeupException, continue iteration.")
}
maybeFlushAndCommitOffsets()
}
} catch {
case t: Throwable =>
fatal("Mirror maker thread failure due to ", t)
} finally {
info("Flushing producer.")
producer.flush()
info("Committing consumer offsets.")
try {
commitOffsets(mirrorMakerConsumer)
} catch {
case e: WakeupException => // just ignore
}
info("Shutting down consumer connectors.")
// we do not need to call consumer.close() since the consumer has already been interrupted
mirrorMakerConsumer.cleanup()
shutdownLatch.countDown()
info("Mirror maker thread stopped")
// if it exits accidentally, stop the entire mirror maker
if (!isShuttingdown.get()) {
fatal("Mirror maker thread exited abnormally, stopping the whole mirror maker.")
System.exit(-1)
}
}
}
def maybeFlushAndCommitOffsets() {
if (System.currentTimeMillis() - lastOffsetCommitMs > offsetCommitIntervalMs) {
debug("Committing MirrorMaker state automatically.")
producer.flush()
commitOffsets(mirrorMakerConsumer)
lastOffsetCommitMs = System.currentTimeMillis()
}
}
def shutdown() {
try {
info(threadName + " shutting down")
shuttingDown = true
mirrorMakerConsumer.stop()
}
catch {
case ie: InterruptedException =>
warn("Interrupt during shutdown of the mirror maker thread")
}
}
def awaitShutdown() {
try {
shutdownLatch.await()
info("Mirror maker thread shutdown complete")
} catch {
case ie: InterruptedException =>
warn("Shutdown of the mirror maker thread interrupted")
}
}
}
private[kafka] trait MirrorMakerBaseConsumer extends BaseConsumer {
def init()
def hasData : Boolean
}
private class MirrorMakerOldConsumer(connector: ZookeeperConsumerConnector,
filterSpec: TopicFilter) extends MirrorMakerBaseConsumer {
private var iter: ConsumerIterator[Array[Byte], Array[Byte]] = null
override def init() {
// Creating one stream per each connector instance
val streams = connector.createMessageStreamsByFilter(filterSpec, 1, new DefaultDecoder(), new DefaultDecoder())
require(streams.size == 1)
val stream = streams(0)
iter = stream.iterator()
}
override def hasData = iter.hasNext()
override def receive() : BaseConsumerRecord = {
val messageAndMetadata = iter.next()
BaseConsumerRecord(messageAndMetadata.topic, messageAndMetadata.partition, messageAndMetadata.offset, messageAndMetadata.key, messageAndMetadata.message)
}
override def stop() {
// Do nothing
}
override def cleanup() {
connector.shutdown()
}
override def commit() {
connector.commitOffsets
}
}
private class MirrorMakerNewConsumer(consumer: Consumer[Array[Byte], Array[Byte]],
customRebalanceListener: Option[org.apache.kafka.clients.consumer.ConsumerRebalanceListener],
whitelistOpt: Option[String])
extends MirrorMakerBaseConsumer {
val regex = whitelistOpt.getOrElse(throw new IllegalArgumentException("New consumer only supports whitelist."))
var recordIter: java.util.Iterator[ConsumerRecord[Array[Byte], Array[Byte]]] = null
// TODO: we need to manually maintain the consumed offsets for new consumer
// since its internal consumed position is updated in batch rather than one
// record at a time, this can be resolved when we break the unification of both consumers
private val offsets = new HashMap[TopicPartition, Long]()
override def init() {
debug("Initiating new consumer")
val consumerRebalanceListener = new InternalRebalanceListenerForNewConsumer(this, customRebalanceListener)
if (whitelistOpt.isDefined)
consumer.subscribe(Pattern.compile(whitelistOpt.get), consumerRebalanceListener)
}
override def hasData = true
override def receive() : BaseConsumerRecord = {
if (recordIter == null || !recordIter.hasNext) {
recordIter = consumer.poll(1000).iterator
if (!recordIter.hasNext)
throw new ConsumerTimeoutException
}
val record = recordIter.next()
val tp = new TopicPartition(record.topic, record.partition)
offsets.put(tp, record.offset + 1)
BaseConsumerRecord(record.topic, record.partition, record.offset, record.key, record.value)
}
override def stop() {
consumer.wakeup()
}
override def cleanup() {
ClientUtils.swallow(consumer.close())
}
override def commit() {
consumer.commitSync(offsets.map { case (tp, offset) => (tp, new OffsetAndMetadata(offset, ""))})
offsets.clear()
}
}
private class InternalRebalanceListenerForNewConsumer(mirrorMakerConsumer: MirrorMakerBaseConsumer,
customRebalanceListenerForNewConsumer: Option[org.apache.kafka.clients.consumer.ConsumerRebalanceListener])
extends org.apache.kafka.clients.consumer.ConsumerRebalanceListener {
override def onPartitionsRevoked(partitions: util.Collection[TopicPartition]) {
producer.flush()
commitOffsets(mirrorMakerConsumer)
customRebalanceListenerForNewConsumer.foreach(_.onPartitionsRevoked(partitions))
}
override def onPartitionsAssigned(partitions: util.Collection[TopicPartition]) {
customRebalanceListenerForNewConsumer.foreach(_.onPartitionsAssigned(partitions))
}
}
private class InternalRebalanceListenerForOldConsumer(mirrorMakerConsumer: MirrorMakerBaseConsumer,
customRebalanceListenerForOldConsumer: Option[ConsumerRebalanceListener])
extends ConsumerRebalanceListener {
override def beforeReleasingPartitions(partitionOwnership: java.util.Map[String, java.util.Set[java.lang.Integer]]) {
producer.flush()
commitOffsets(mirrorMakerConsumer)
// invoke custom consumer rebalance listener
customRebalanceListenerForOldConsumer.foreach(_.beforeReleasingPartitions(partitionOwnership))
}
override def beforeStartingFetchers(consumerId: String,
partitionAssignment: java.util.Map[String, java.util.Map[java.lang.Integer, ConsumerThreadId]]) {
customRebalanceListenerForOldConsumer.foreach(_.beforeStartingFetchers(consumerId, partitionAssignment))
}
}
private class MirrorMakerProducer(val producerProps: Properties) {
val sync = producerProps.getProperty("producer.type", "async").equals("sync")
val producer = new KafkaProducer[Array[Byte], Array[Byte]](producerProps)
def send(record: ProducerRecord[Array[Byte], Array[Byte]]) {
if (sync) {
this.producer.send(record).get()
} else {
this.producer.send(record,
new MirrorMakerProducerCallback(record.topic(), record.key(), record.value()))
}
}
def flush() {
this.producer.flush()
}
def close() {
this.producer.close()
}
def close(timeout: Long) {
this.producer.close(timeout, TimeUnit.MILLISECONDS)
}
}
private class MirrorMakerProducerCallback (topic: String, key: Array[Byte], value: Array[Byte])
extends ErrorLoggingCallback(topic, key, value, false) {
override def onCompletion(metadata: RecordMetadata, exception: Exception) {
if (exception != null) {
// Use default call back to log error. This means the max retries of producer has reached and message
// still could not be sent.
super.onCompletion(metadata, exception)
// If abort.on.send.failure is set, stop the mirror maker. Otherwise log skipped message and move on.
if (abortOnSendFailure) {
info("Closing producer due to send failure.")
exitingOnSendFailure = true
producer.close(0)
}
numDroppedMessages.incrementAndGet()
}
}
}
/**
* If message.handler.args is specified. A constructor that takes in a String as argument must exist.
*/
trait MirrorMakerMessageHandler {
def handle(record: MessageAndMetadata[Array[Byte], Array[Byte]]): util.List[ProducerRecord[Array[Byte], Array[Byte]]]
def handle(record: BaseConsumerRecord): util.List[ProducerRecord[Array[Byte], Array[Byte]]]
}
private object defaultMirrorMakerMessageHandler extends MirrorMakerMessageHandler {
override def handle(record: MessageAndMetadata[Array[Byte], Array[Byte]]): util.List[ProducerRecord[Array[Byte], Array[Byte]]] = {
Collections.singletonList(new ProducerRecord[Array[Byte], Array[Byte]](record.topic, record.key(), record.message()))
}
override def handle(record: BaseConsumerRecord): util.List[ProducerRecord[Array[Byte], Array[Byte]]] = {
Collections.singletonList(new ProducerRecord[Array[Byte], Array[Byte]](record.topic, record.key, record.value))
}
}
}
| OpenPOWER-BigData/HDP-kafka | core/src/main/scala/kafka/tools/MirrorMaker.scala | Scala | apache-2.0 | 28,010 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.expressions
import org.apache.flink.api.common.typeinfo.BasicTypeInfo._
import org.apache.flink.api.common.typeinfo.{BasicTypeInfo, TypeInformation}
import org.apache.flink.table.planner.typeutils.TypeInfoCheckUtils
import org.apache.flink.table.planner.validate._
case class Abs(child: PlannerExpression) extends UnaryExpression {
override private[flink] def resultType: TypeInformation[_] = child.resultType
override private[flink] def validateInput(): ValidationResult =
TypeInfoCheckUtils.assertNumericExpr(child.resultType, "Abs")
override def toString: String = s"abs($child)"
}
case class Ceil(child: PlannerExpression) extends UnaryExpression {
override private[flink] def resultType: TypeInformation[_] = {
ReturnTypeInference.inferCeil(this)
}
override private[flink] def validateInput(): ValidationResult =
TypeInfoCheckUtils.assertNumericExpr(child.resultType, "Ceil")
override def toString: String = s"ceil($child)"
}
case class Exp(child: PlannerExpression) extends UnaryExpression with InputTypeSpec {
override private[flink] def resultType: TypeInformation[_] = DOUBLE_TYPE_INFO
override private[flink] def expectedTypes: Seq[TypeInformation[_]] = DOUBLE_TYPE_INFO :: Nil
override def toString: String = s"exp($child)"
}
case class Floor(child: PlannerExpression) extends UnaryExpression {
override private[flink] def resultType: TypeInformation[_] = {
ReturnTypeInference.inferFloor(this)
}
override private[flink] def validateInput(): ValidationResult =
TypeInfoCheckUtils.assertNumericExpr(child.resultType, "Floor")
override def toString: String = s"floor($child)"
}
case class Log10(child: PlannerExpression) extends UnaryExpression with InputTypeSpec {
override private[flink] def resultType: TypeInformation[_] = DOUBLE_TYPE_INFO
override private[flink] def expectedTypes: Seq[TypeInformation[_]] = DOUBLE_TYPE_INFO :: Nil
override def toString: String = s"log10($child)"
}
case class Log2(child: PlannerExpression) extends UnaryExpression with InputTypeSpec {
override private[flink] def expectedTypes: Seq[TypeInformation[_]] = DOUBLE_TYPE_INFO :: Nil
override private[flink] def resultType: TypeInformation[_] = DOUBLE_TYPE_INFO
override def toString: String = s"log2($child)"
}
case class Cosh(child: PlannerExpression) extends UnaryExpression {
override private[flink] def resultType: TypeInformation[_] = DOUBLE_TYPE_INFO
override private[flink] def validateInput(): ValidationResult =
TypeInfoCheckUtils.assertNumericExpr(child.resultType, "Cosh")
override def toString = s"cosh($child)"
}
case class Log(base: PlannerExpression, antilogarithm: PlannerExpression)
extends PlannerExpression with InputTypeSpec {
def this(antilogarithm: PlannerExpression) = this(E(), antilogarithm)
override private[flink] def resultType: TypeInformation[_] = DOUBLE_TYPE_INFO
override private[flink] def children: Seq[PlannerExpression] =
if (base == null) Seq(antilogarithm) else Seq(base, antilogarithm)
override private[flink] def expectedTypes: Seq[TypeInformation[_]] =
Seq.fill(children.length)(DOUBLE_TYPE_INFO)
override def toString: String = s"log(${children.mkString(",")})"
}
object Log {
def apply(antilogarithm: PlannerExpression): Log = Log(null, antilogarithm)
}
case class Ln(child: PlannerExpression) extends UnaryExpression with InputTypeSpec {
override private[flink] def resultType: TypeInformation[_] = DOUBLE_TYPE_INFO
override private[flink] def expectedTypes: Seq[TypeInformation[_]] = DOUBLE_TYPE_INFO :: Nil
override def toString: String = s"ln($child)"
}
case class Power(left: PlannerExpression, right: PlannerExpression)
extends BinaryExpression with InputTypeSpec {
override private[flink] def resultType: TypeInformation[_] = DOUBLE_TYPE_INFO
override private[flink] def expectedTypes: Seq[TypeInformation[_]] =
DOUBLE_TYPE_INFO :: DOUBLE_TYPE_INFO :: Nil
override def toString: String = s"pow($left, $right)"
}
case class Sinh(child: PlannerExpression) extends UnaryExpression {
override private[flink] def resultType: TypeInformation[_] = DOUBLE_TYPE_INFO;
override private[flink] def validateInput(): ValidationResult =
TypeInfoCheckUtils.assertNumericExpr(child.resultType, "Sinh")
override def toString = s"sinh($child)"
}
case class Sqrt(child: PlannerExpression) extends UnaryExpression with InputTypeSpec {
override private[flink] def resultType: TypeInformation[_] = DOUBLE_TYPE_INFO
override private[flink] def expectedTypes: Seq[TypeInformation[_]] =
Seq(DOUBLE_TYPE_INFO)
override def toString: String = s"sqrt($child)"
}
case class Sin(child: PlannerExpression) extends UnaryExpression {
override private[flink] def resultType: TypeInformation[_] = DOUBLE_TYPE_INFO
override private[flink] def validateInput(): ValidationResult =
TypeInfoCheckUtils.assertNumericExpr(child.resultType, "Sin")
override def toString: String = s"sin($child)"
}
case class Cos(child: PlannerExpression) extends UnaryExpression {
override private[flink] def resultType: TypeInformation[_] = DOUBLE_TYPE_INFO
override private[flink] def validateInput(): ValidationResult =
TypeInfoCheckUtils.assertNumericExpr(child.resultType, "Cos")
override def toString: String = s"cos($child)"
}
case class Tan(child: PlannerExpression) extends UnaryExpression {
override private[flink] def resultType: TypeInformation[_] = DOUBLE_TYPE_INFO
override private[flink] def validateInput(): ValidationResult =
TypeInfoCheckUtils.assertNumericExpr(child.resultType, "Tan")
override def toString: String = s"tan($child)"
}
case class Tanh(child: PlannerExpression) extends UnaryExpression {
override private[flink] def resultType: TypeInformation[_] = DOUBLE_TYPE_INFO
override private[flink] def validateInput(): ValidationResult =
TypeInfoCheckUtils.assertNumericExpr(child.resultType, "Tanh")
override def toString = s"tanh($child)"
}
case class Cot(child: PlannerExpression) extends UnaryExpression {
override private[flink] def resultType: TypeInformation[_] = DOUBLE_TYPE_INFO
override private[flink] def validateInput(): ValidationResult =
TypeInfoCheckUtils.assertNumericExpr(child.resultType, "Cot")
override def toString: String = s"cot($child)"
}
case class Asin(child: PlannerExpression) extends UnaryExpression {
override private[flink] def resultType: TypeInformation[_] = DOUBLE_TYPE_INFO
override private[flink] def validateInput(): ValidationResult =
TypeInfoCheckUtils.assertNumericExpr(child.resultType, "Asin")
override def toString: String = s"asin($child)"
}
case class Acos(child: PlannerExpression) extends UnaryExpression {
override private[flink] def resultType: TypeInformation[_] = DOUBLE_TYPE_INFO
override private[flink] def validateInput(): ValidationResult =
TypeInfoCheckUtils.assertNumericExpr(child.resultType, "Acos")
override def toString: String = s"acos($child)"
}
case class Atan(child: PlannerExpression) extends UnaryExpression {
override private[flink] def resultType: TypeInformation[_] = DOUBLE_TYPE_INFO
override private[flink] def validateInput(): ValidationResult =
TypeInfoCheckUtils.assertNumericExpr(child.resultType, "Atan")
override def toString: String = s"atan($child)"
}
case class Atan2(y: PlannerExpression, x: PlannerExpression) extends BinaryExpression {
override private[flink] def left = y
override private[flink] def right = x
override private[flink] def resultType: TypeInformation[_] = DOUBLE_TYPE_INFO
override private[flink] def validateInput() = {
TypeInfoCheckUtils.assertNumericExpr(y.resultType, "atan2")
TypeInfoCheckUtils.assertNumericExpr(x.resultType, "atan2")
}
override def toString: String = s"atan2($left, $right)"
}
case class Degrees(child: PlannerExpression) extends UnaryExpression {
override private[flink] def resultType: TypeInformation[_] = DOUBLE_TYPE_INFO
override private[flink] def validateInput(): ValidationResult =
TypeInfoCheckUtils.assertNumericExpr(child.resultType, "Degrees")
override def toString: String = s"degrees($child)"
}
case class Radians(child: PlannerExpression) extends UnaryExpression {
override private[flink] def resultType: TypeInformation[_] = DOUBLE_TYPE_INFO
override private[flink] def validateInput(): ValidationResult =
TypeInfoCheckUtils.assertNumericExpr(child.resultType, "Radians")
override def toString: String = s"radians($child)"
}
case class Sign(child: PlannerExpression) extends UnaryExpression {
override private[flink] def resultType: TypeInformation[_] = child.resultType
override private[flink] def validateInput(): ValidationResult =
TypeInfoCheckUtils.assertNumericExpr(child.resultType, "sign")
override def toString: String = s"sign($child)"
}
case class Round(left: PlannerExpression, right: PlannerExpression)
extends BinaryExpression {
override private[flink] def resultType: TypeInformation[_] = {
ReturnTypeInference.inferRound(this)
}
override private[flink] def validateInput(): ValidationResult = {
if (!TypeInfoCheckUtils.isInteger(right.resultType)) {
ValidationFailure(s"round right requires int, get " +
s"$right : ${right.resultType}")
}
TypeInfoCheckUtils.assertNumericExpr(left.resultType, s"round left :$left")
}
override def toString: String = s"round($left, $right)"
}
case class Pi() extends LeafExpression {
override private[flink] def resultType: TypeInformation[_] = DOUBLE_TYPE_INFO
override def toString: String = s"pi()"
}
case class E() extends LeafExpression {
override private[flink] def resultType: TypeInformation[_] = DOUBLE_TYPE_INFO
override def toString: String = s"e()"
}
case class Rand(seed: PlannerExpression) extends PlannerExpression with InputTypeSpec {
def this() = this(null)
override private[flink] def children: Seq[PlannerExpression] = if (seed != null) {
seed :: Nil
} else {
Nil
}
override private[flink] def resultType: TypeInformation[_] = BasicTypeInfo.DOUBLE_TYPE_INFO
override private[flink] def expectedTypes: Seq[TypeInformation[_]] = if (seed != null) {
INT_TYPE_INFO :: Nil
} else {
Nil
}
override def toString: String = if (seed != null) {
s"rand($seed)"
} else {
s"rand()"
}
}
case class RandInteger(seed: PlannerExpression, bound: PlannerExpression)
extends PlannerExpression with InputTypeSpec {
def this(bound: PlannerExpression) = this(null, bound)
override private[flink] def children: Seq[PlannerExpression] = if (seed != null) {
seed :: bound :: Nil
} else {
bound :: Nil
}
override private[flink] def resultType: TypeInformation[_] = BasicTypeInfo.INT_TYPE_INFO
override private[flink] def expectedTypes: Seq[TypeInformation[_]] = if (seed != null) {
INT_TYPE_INFO :: INT_TYPE_INFO :: Nil
} else {
INT_TYPE_INFO :: Nil
}
override def toString: String = if (seed != null) {
s"randInteger($seed, $bound)"
} else {
s"randInteger($bound)"
}
}
case class Bin(child: PlannerExpression) extends UnaryExpression {
override private[flink] def resultType: TypeInformation[_] = BasicTypeInfo.STRING_TYPE_INFO
override private[flink] def validateInput(): ValidationResult =
TypeInfoCheckUtils.assertIntegerFamilyExpr(child.resultType, "Bin")
override def toString: String = s"bin($child)"
}
case class Hex(child: PlannerExpression) extends UnaryExpression {
override private[flink] def resultType: TypeInformation[_] = BasicTypeInfo.STRING_TYPE_INFO
override private[flink] def validateInput(): ValidationResult = {
if (TypeInfoCheckUtils.isIntegerFamily(child.resultType) ||
TypeInfoCheckUtils.isString(child.resultType)) {
ValidationSuccess
} else {
ValidationFailure(s"hex() requires an integer or string input but was '${child.resultType}'.")
}
}
override def toString: String = s"hex($child)"
}
case class UUID() extends LeafExpression {
override private[flink] def resultType = BasicTypeInfo.STRING_TYPE_INFO
override def toString: String = s"uuid()"
}
case class Truncate(base: PlannerExpression, num: PlannerExpression)
extends PlannerExpression with InputTypeSpec {
def this(base: PlannerExpression) = this(base, null)
override private[flink] def resultType: TypeInformation[_] = base.resultType
override private[flink] def children: Seq[PlannerExpression] =
if (num == null) Seq(base) else Seq(base, num)
override private[flink] def expectedTypes: Seq[TypeInformation[_]] =
if (num == null) Seq(base.resultType) else Seq(base.resultType, INT_TYPE_INFO)
override def toString: String = s"truncate(${children.mkString(",")})"
override private[flink] def validateInput(): ValidationResult = {
if (num != null) {
if (!TypeInfoCheckUtils.isInteger(num.resultType)) {
ValidationFailure(s"truncate num requires int, get " +
s"$num : ${num.resultType}")
}
}
TypeInfoCheckUtils.assertNumericExpr(base.resultType, s"truncate base :$base")
}
}
object Truncate {
def apply(base: PlannerExpression): Truncate = Truncate(base, null)
}
| hequn8128/flink | flink-table/flink-table-planner-blink/src/main/scala/org/apache/flink/table/planner/expressions/mathExpressions.scala | Scala | apache-2.0 | 14,031 |
import edu.uta.diql._
import org.apache.spark._
import org.apache.spark.rdd._
object Test {
def main ( args: Array[String] ) {
val conf = new SparkConf().setAppName("Test")
val sc = new SparkContext(conf)
explain(true)
val A = sc.textFile(args(0)).map( line => line.toDouble )
v(sc,"""
var sum: Double = 0.0;
var count: Int = 0;
for a in A do {
sum += a;
count += 1;
};
println(sum/count);
""")
}
}
| fegaras/DIQL | tests/diablo/spark/Average2.scala | Scala | apache-2.0 | 488 |
/**
* Copyright (C) 2014 Orbeon, Inc.
*
* This program is free software; you can redistribute it and/or modify it under the terms of the
* GNU Lesser General Public License as published by the Free Software Foundation; either version
* 2.1 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Lesser General Public License for more details.
*
* The full text of the license is available at http://www.gnu.org/copyleft/lesser.html
*/
package org.orbeon.oxf.fr.relational.crud
import java.sql.Connection
import org.orbeon.oxf.fr.FormRunner
import org.orbeon.saxon.om.{NodeInfo, DocumentInfo}
import org.orbeon.scaxon.XML._
import scala.annotation.tailrec
import scala.collection.mutable
import RequestReader._
private object FlatView {
val SupportedProviders = Set("oracle", "db2", "postgresql")
case class Col(extractExpression: String, colName: String)
val MetadataPairs =
List("document_id", "created", "last_modified_time", "last_modified_by")
.map(_.toUpperCase)
.map(col ⇒ Col(s"d.$col", s"METADATA_$col"))
val PrefixedMetadataColumns = MetadataPairs.map{case Col(_, colName) ⇒ colName}
val MaxNameLength = 30
val TablePrefix = "ORBEON_F_"
// Create a flat relational view. See related issues:
//
// - https://github.com/orbeon/orbeon-forms/issues/1069
// - https://github.com/orbeon/orbeon-forms/issues/1571
def createFlatView(req: Request, connection: Connection): Unit = {
val viewName = {
val appXML = xmlToSQLId(req.app)
val formXML = xmlToSQLId(req.form)
TablePrefix + joinParts(appXML, formXML, MaxNameLength - TablePrefix.length)
}
// Delete view if it exists
// - Only for DB2 and postgresql; on Oracle we can use "OR REPLACE" when creating the view.
if (Set("db2", "postgresql")(req.provider)) {
val viewExists = {
val query = req.provider match{
case "db2" ⇒ s"""|SELECT *
| FROM SYSIBM.SYSVIEWS
| WHERE creator = (SELECT current_schema
| FROM SYSIBM.SYSDUMMY1)
| AND name = ?
|""".stripMargin
case "postgresql" ⇒ s"""|SELECT *
| FROM pg_catalog.pg_class c
| JOIN pg_catalog.pg_namespace n
| ON n.oid = c.relnamespace
| WHERE n.nspname = current_schema
| AND c.relkind = 'v'
| AND upper(c.relname) = ?
|""".stripMargin
case _ ⇒ ???
}
val ps = connection.prepareStatement(query)
ps.setString(1, viewName)
val rs = ps.executeQuery()
rs.next()
}
if (viewExists)
connection.prepareStatement(s"DROP VIEW $viewName").executeUpdate()
}
// Computer columns in the view
val cols = {
val userCols = extractPathsCols(xmlDocument()) map { case (path, col) ⇒
val extractFunction = req.provider match {
case "oracle" ⇒ s"extractValue(d.xml, '/*/$path')"
case "db2" ⇒ s"XMLCAST(XMLQUERY('$$XML/*/$path/text()') AS VARCHAR(4000))"
case "postgresql" ⇒ s"(xpath('/*/$path/text()', d.xml))[1]::text"
case _ ⇒ ???
}
Col(extractFunction, col)
}
MetadataPairs.iterator ++ userCols
}
// Create view
// - Generate app/form name in SQL, as Oracle doesn't allow bind variables for data definition operations.
locally {
val query =
s"""|CREATE ${if (Set("oracle", "postgresql")(req.provider)) "OR REPLACE" else ""} VIEW $viewName AS
|SELECT ${cols map { case Col(col, name) ⇒ col + " " + name} mkString ", "}
| FROM orbeon_form_data d,
| (
| SELECT max(last_modified_time) last_modified_time,
| app, form, document_id
| FROM orbeon_form_data d
| WHERE app = '${escapeSQL(req.app)}'
| AND form = '${escapeSQL(req.form)}'
| AND draft = 'N'
| GROUP BY app, form, document_id
| ) m
| WHERE d.last_modified_time = m.last_modified_time
| AND d.app = m.app
| AND d.form = m.form
| AND d.document_id = m.document_id
| AND d.deleted = 'N'
|""".stripMargin
val ps = connection.prepareStatement(query)
ps.executeUpdate()
}
}
def collectControls(document: DocumentInfo): Iterator[(NodeInfo, NodeInfo)] = {
import FormRunner._
def topLevelSections =
document descendant (FR → "section") filter (findAncestorContainers(_).isEmpty)
def descendantControls(container: NodeInfo) =
container descendant * filter
(e ⇒ isIdForControl(e.id))
def isDirectLeafControl(control: NodeInfo) =
! IsContainer(control) && findAncestorRepeats(control).isEmpty && findAncestorSections(control).size <= 1
for {
topLevelSection ← topLevelSections.to[Iterator]
control ← descendantControls(topLevelSection)
if isDirectLeafControl(control)
} yield
topLevelSection → control
}
def extractPathsCols(document: DocumentInfo): Iterator[(String, String)] = {
import FormRunner._
val seen = mutable.HashSet[String](PrefixedMetadataColumns: _*)
for {
(section, control) ← collectControls(document)
sectionName = controlNameFromId(section.id)
controlName = controlNameFromId(control.id)
path = sectionName + "/" + controlName
col = joinParts(xmlToSQLId(sectionName), xmlToSQLId(controlName), MaxNameLength)
uniqueCol = resolveDuplicate(col, MaxNameLength)(seen)
} yield
path → uniqueCol
}
def resolveDuplicate(value: String, maxLength: Int)(seen: mutable.HashSet[String]): String = {
@tailrec def nextValue(value: String, counter: Int = 1): String = {
val guessCounterString = counter.toString
val guess = s"${value take (maxLength - guessCounterString.length)}$guessCounterString"
if (! seen(guess))
guess
else
nextValue(value, counter + 1)
}
val cleanValue =
if (! seen(value))
value
else
nextValue(value)
seen += cleanValue
cleanValue
}
// Create an acceptable SQL name from an XML NCName (but accept any input)
// On Oracle names: http://docs.oracle.com/cd/E11882_01/server.112/e10592/sql_elements008.htm
def xmlToSQLId(id: String) =
id
.replaceAllLiterally("-", "_") // dash to underscore
.toUpperCase // to uppercase
.replaceAll("""[^A-Z0-9_]""", "") // only keep alphanumeric or underscore
.dropWhile(_ == '_') // remove starting underscores
.reverse
.dropWhile(_ == '_') // remove trailing underscores
.reverse
// Try to truncate reasonably smartly when needed to maximize the characters we keep
def fitParts(left: String, right: String, max: Int) = {
val usable = max - 1
val half = usable / 2
if (left.size + right.size <= usable) left → right
else if (left.size > half && right.size > half) (left take half) → (right take half)
else if (left.size > half) (left take usable - right.size) → right
else left → (right take usable - left.size)
}
def joinParts(left: String, right: String, max: Int) =
fitParts(left, right, max).productIterator mkString "_"
def escapeSQL(s: String) =
s.replaceAllLiterally("'", "''")
}
| wesley1001/orbeon-forms | src/main/scala/org/orbeon/oxf/fr/relational/crud/FlatView.scala | Scala | lgpl-2.1 | 8,272 |
package org.jetbrains.plugins.scala.lang.resolve
import org.jetbrains.plugins.scala.failed.resolve.FailableResolveTest
class ImplicitConversionTest extends FailableResolveTest("implicitConversion") {
override protected def shouldPass = true
def testSCL10447(): Unit = doTest()
def testSCL12098(): Unit = doTest()
def testSCL13306(): Unit = doTest()
def testSCL13859(): Unit = doTest()
}
| JetBrains/intellij-scala | scala/scala-impl/test/org/jetbrains/plugins/scala/lang/resolve/ImplicitConversionTest.scala | Scala | apache-2.0 | 403 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.h2o
import org.apache.spark.SparkContext
import water.fvec.Vec
import water.parser.Categorical
import scala.collection.mutable
import scala.language.implicitConversions
/**
* Utilities to work with primitive types such as String, Integer, Double..
*
*/
object H2OPrimitiveTypesUtils {
/** Method used for obtaining column domains */
private[spark]
def collectColumnDomains[T](sc: SparkContext,
rdd: RDD[T],
fnames: Array[String],
ftypes: Array[Class[_]]): Array[Array[String]] = {
val res = Array.ofDim[Array[String]](fnames.length)
for (idx <- 0 until ftypes.length if ftypes(idx).equals(classOf[String])) {
val acc = sc.accumulableCollection(new mutable.HashSet[String]())
rdd.foreach(r => {
acc += r.asInstanceOf[String]
})
res(idx) = if (acc.value.size > Categorical.MAX_ENUM_SIZE) null else acc.value.toArray.sorted
}
res
}
/** Method translating primitive types into Sparkling Water types
* This method is already prepared to handle all mentioned primitive types */
private[spark]
def dataTypeToVecType(t: Class[_], d: Array[String]): Byte = {
t match {
case q if q == classOf[java.lang.Byte] => Vec.T_NUM
case q if q == classOf[java.lang.Short] => Vec.T_NUM
case q if q == classOf[java.lang.Integer] => Vec.T_NUM
case q if q == classOf[java.lang.Long] => Vec.T_NUM
case q if q == classOf[java.lang.Float] => Vec.T_NUM
case q if q == classOf[java.lang.Double] => Vec.T_NUM
case q if q == classOf[java.lang.Boolean] => Vec.T_NUM
case q if q == classOf[java.lang.String] => if (d != null && d.length < water.parser.Categorical.MAX_ENUM_SIZE) {
Vec.T_ENUM
} else {
Vec.T_STR
}
case q => throw new IllegalArgumentException(s"Do not understand type $q")
}
}
}
| printedheart/sparkling-water | core/src/main/scala/org/apache/spark/h2o/H2OPrimitiveTypesUtils.scala | Scala | apache-2.0 | 2,729 |
package scalaprops
import scalaz._
import scalaz.std.anyVal._
import ScalapropsScalaz._
object IndSeqTest extends Scalaprops {
val testLaw =
Properties.list(
scalazlaws.monadPlusStrong.all[IndSeq],
scalazlaws.traverse.all[IndSeq],
scalazlaws.isEmpty.all[IndSeq]
)
}
| scalaprops/scalaprops | scalaz/src/test/scala/scalaprops/IndSeqTest.scala | Scala | mit | 296 |
package stackoverflow
import StackOverflow._
import org.scalatest.{FunSuite, BeforeAndAfterAll}
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import org.apache.spark.SparkContext._
import org.apache.spark.rdd.RDD
import java.net.URL
import java.nio.channels.Channels
import java.io.File
import java.io.FileOutputStream
@RunWith(classOf[JUnitRunner])
class StackOverflowSuite extends FunSuite with BeforeAndAfterAll {
lazy val testObject = new StackOverflow {
override val langs =
List(
"JavaScript", "Java", "PHP", "Python", "C#", "C++", "Ruby", "CSS",
"Objective-C", "Perl", "Scala", "Haskell", "MATLAB", "Clojure", "Groovy")
override def langSpread = 50000
override def kmeansKernels = 45
override def kmeansEta: Double = 20.0D
override def kmeansMaxIterations = 120
}
override def afterAll(): Unit = {
sc.stop()
}
test("testObject can be instantiated") {
val instantiatable = try {
testObject
true
} catch {
case _: Throwable => false
}
assert(instantiatable, "Can't instantiate a StackOverflow object")
}
def samplePostings(): List[String] = {
List("1,27233496,,,0,C#",
"1,5484340,,,0,C#",
"2,5494879,,5484340,1,",
"1,9419744,,,2,Objective-C",
"1,9002524,,,2,,",
"2,9003401,,9002524,4,",
"1,9002525,,,2,C++",
"2,9003401,,9002525,4,",
"2,9005311,,9002525,0,",
"1,5257894,,,1,Java",
"1,21984912,,,0,Java",
"2,21985273,,21984912,0,")
}
test("'rawPostings' should convert strings into 'Postings'") {
val lines = sc.parallelize(samplePostings)
val raw = rawPostings(lines)
val expectedPosting = Posting(1,27233496,None,None,0,Some("C#"))
val res = (raw.take(1)(0) == expectedPosting)
assert(res, "rawPosting given samplePostings first value does not equal expected Posting")
}
test("'groupPostings' should create a grouped RDD of (K: posting ID, V: (question, Iterable[answers]))") {
val lines = sc.parallelize(samplePostings)
val raw = rawPostings(lines)
val grouped = groupedPostings(raw)
val sampleId = 9002525
val expectedGroupRecord = List(
(Posting(1,9002525,None,None,2,Some("C++")),
Posting(2,9003401,None,Some(9002525),4,None)),
(Posting(1,9002525,None,None,2,Some("C++")),
Posting(2,9005311,None,Some(9002525),0,None))
)
val resultGroupedRecord = grouped.filter(_._1 == sampleId).collect()(0)
val res1 = resultGroupedRecord._1 == sampleId
val res2 = resultGroupedRecord._2.toList == expectedGroupRecord
assert(res1, "result from groupedPostings did not have the correct ID")
assert(res2, "result grouping from groupedPostings was not the correct (question, answer) pairs")
}
test("'scoredPostings' should return the top score for each question") {
val lines = sc.parallelize(samplePostings)
val raw = rawPostings(lines)
val grouped = groupedPostings(raw)
val scored = scoredPostings(grouped)
val expected = Array(
(Posting(1,5484340,None,None,0,Some("C#")), 1),
(Posting(1,9002525,None,None,2,Some("C++")), 4),
(Posting(1,21984912,None,None,0,Some("Java")), 0),
(Posting(1,9002524,None,None,2,None),4)
)
val scored_results = scored.collect
scored_results.foreach {
row => {
val res = expected.contains(row)
assert(res, "scoredPostings result did not contain expected results")
}
}
val count_res = scored_results.size == expected.size
assert(count_res, "result from scoredPostings did not have the correct number of records")
}
test("'vectorPostings' should return the vectored langauge and top score in prep for kmeans") {
val lines = sc.parallelize(samplePostings)
val raw = rawPostings(lines)
val grouped = groupedPostings(raw)
val scored = scoredPostings(grouped)
val vectors = vectorPostings(scored)
val expected = Array(
(250000,4),
(200000,1),
(50000,0)
)
val vector_results = vectors.collect
vector_results.foreach {
row => {
val res = expected.contains(row)
assert(res, "vectorPostings result did not contain expected results")
}
}
val count_res = vector_results.size == expected.size
assert(count_res, "result from vectorPostings did not have the correct number of records")
}
test("'getMedianScore' should return the median") {
val cases = List((Array(1,2,3), 2),
(Array(1,2,3,4),2))
cases.foreach(c => assert(getMedianScore(c._1) == c._2, "'getMedianScore' did not return the correct median value"))
}
}
| fdm1/coursera_spark_scala | coursera/week_2/stackoverflow/src/test/scala/stackoverflow/StackOverflowSuite.scala | Scala | apache-2.0 | 4,915 |
/*******************************************************************************
Copyright (c) 2012-2013, KAIST, S-Core.
All rights reserved.
Use is subject to license terms.
This distribution may include materials developed by third parties.
******************************************************************************/
package kr.ac.kaist.jsaf.exceptions
/**
* These are exceptions when the user has asked
* us to perform an illegal operation in the shell.
*/
class UserError(msg: String) extends Exception(msg) {
override def toString = msg
}
| darkrsw/safe | src/main/scala/kr/ac/kaist/jsaf/exceptions/UserError.scala | Scala | bsd-3-clause | 574 |
package pl.writeonly.son2.json.creators
import pl.writeonly.son2.apis.config.{ProviderType, RWTConfig}
import pl.writeonly.son2.apis.notation.PartialCreatorConfigSymbol
import pl.writeonly.son2.json.core.ConfigJson
class PCreatorConfigJson(f: ProviderType)
extends PartialCreatorConfigSymbol(f.s) {
override def apply(s: String): RWTConfig = ConfigJson(s)
}
| writeonly/son2 | scallions-impl/scallions-json/src/main/scala/pl/writeonly/son2/json/creators/PCreatorConfigJson.scala | Scala | apache-2.0 | 366 |
/*
* Copyright (c) 2012-2019 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the Apache License Version 2.0 for the specific language governing permissions and limitations there under.
*/
package com.snowplowanalytics
package snowplow
package enrich
package common
// Java
import java.io.{PrintWriter, StringWriter}
// Joda
import org.joda.time.DateTime
// Iglu
import iglu.client.Resolver
// Scala
import scala.util.control.NonFatal
// Scalaz
import scalaz._
import Scalaz._
// This project
import adapters.AdapterRegistry
import enrichments.{EnrichmentManager, EnrichmentRegistry}
import outputs.EnrichedEvent
/**
* Expresses the end-to-end event pipeline
* supported by the Scala Common Enrich
* project.
*/
object EtlPipeline {
/**
* A helper method to take a ValidatedMaybeCanonicalInput
* and transform it into a List (possibly empty) of
* ValidatedCanonicalOutputs.
*
* We have to do some unboxing because enrichEvent
* expects a raw CanonicalInput as its argument, not
* a MaybeCanonicalInput.
*
* @param adapterRegistry Contains all of the events adapters
* @param enrichmentRegistry Contains configuration for all
* enrichments to apply
* @param etlVersion The ETL version
* @param etlTstamp The ETL timestamp
* @param input The ValidatedMaybeCanonicalInput
* @param resolver (implicit) The Iglu resolver used for
* schema lookup and validation
* @return the ValidatedMaybeCanonicalOutput. Thanks to
* flatMap, will include any validation errors
* contained within the ValidatedMaybeCanonicalInput
*/
def processEvents(
adapterRegistry: AdapterRegistry,
enrichmentRegistry: EnrichmentRegistry,
etlVersion: String,
etlTstamp: DateTime,
input: ValidatedMaybeCollectorPayload)(implicit resolver: Resolver): List[ValidatedEnrichedEvent] = {
def flattenToList[A](v: Validated[Option[Validated[NonEmptyList[Validated[A]]]]]): List[Validated[A]] = v match {
case Success(Some(Success(nel))) => nel.toList
case Success(Some(Failure(f))) => List(f.fail)
case Failure(f) => List(f.fail)
case Success(None) => Nil
}
try {
val e: Validated[Option[Validated[NonEmptyList[ValidatedEnrichedEvent]]]] =
for {
maybePayload <- input
} yield
for {
payload <- maybePayload
} yield
for {
events <- adapterRegistry.toRawEvents(payload)
} yield
for {
event <- events
enriched = EnrichmentManager.enrichEvent(enrichmentRegistry, etlVersion, etlTstamp, event)
} yield enriched
flattenToList[EnrichedEvent](e)
} catch {
case NonFatal(nf) => {
val errorWriter = new StringWriter
nf.printStackTrace(new PrintWriter(errorWriter))
List(s"Unexpected error processing events: $errorWriter".failNel)
}
}
}
}
| RetentionGrid/snowplow | 3-enrich/scala-common-enrich/src/main/scala/com.snowplowanalytics.snowplow.enrich/common/EtlPipeline.scala | Scala | apache-2.0 | 3,514 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy.k8s.submit
import java.io.File
import io.fabric8.kubernetes.client.KubernetesClient
import org.apache.spark.deploy.k8s._
import org.apache.spark.deploy.k8s.features._
private[spark] class KubernetesDriverBuilder {
def buildFromFeatures(
conf: KubernetesDriverConf,
client: KubernetesClient): KubernetesDriverSpec = {
val initialPod = conf.get(Config.KUBERNETES_DRIVER_PODTEMPLATE_FILE)
.map { file =>
KubernetesUtils.loadPodFromTemplate(
client,
new File(file),
conf.get(Config.KUBERNETES_DRIVER_PODTEMPLATE_CONTAINER_NAME))
}
.getOrElse(SparkPod.initialPod())
val features = Seq(
new BasicDriverFeatureStep(conf),
new DriverKubernetesCredentialsFeatureStep(conf),
new DriverServiceFeatureStep(conf),
new MountSecretsFeatureStep(conf),
new EnvSecretsFeatureStep(conf),
new LocalDirsFeatureStep(conf),
new MountVolumesFeatureStep(conf),
new DriverCommandFeatureStep(conf),
new HadoopConfDriverFeatureStep(conf),
new KerberosConfDriverFeatureStep(conf),
new PodTemplateConfigMapStep(conf))
val spec = KubernetesDriverSpec(
initialPod,
driverKubernetesResources = Seq.empty,
conf.sparkConf.getAll.toMap)
features.foldLeft(spec) { case (spec, feature) =>
val configuredPod = feature.configurePod(spec.pod)
val addedSystemProperties = feature.getAdditionalPodSystemProperties()
val addedResources = feature.getAdditionalKubernetesResources()
KubernetesDriverSpec(
configuredPod,
spec.driverKubernetesResources ++ addedResources,
spec.systemProperties ++ addedSystemProperties)
}
}
}
| aosagie/spark | resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/submit/KubernetesDriverBuilder.scala | Scala | apache-2.0 | 2,544 |
/* Copyright 2009-2016 EPFL, Lausanne */
import leon.lang._
import leon.lang.xlang._
object Epsilon5 {
def foo(x: Int, y: Int): Int = {
epsilon((z: Int) => z > x && z < y)
} ensuring(_ >= x)
}
| epfl-lara/leon | src/test/resources/regression/verification/xlang/valid/Epsilon5.scala | Scala | gpl-3.0 | 205 |
package slick.dbio
import org.reactivestreams.Subscription
import scala.collection.mutable.ArrayBuffer
import scala.language.higherKinds
import scala.collection.generic.{CanBuild, CanBuildFrom}
import scala.collection.mutable
import scala.concurrent.{ExecutionContext, Future}
import slick.SlickException
import slick.basic.BasicBackend
import slick.util.{DumpInfo, Dumpable, ignoreFollowOnError}
import scala.util.{Try, Failure, Success}
import scala.util.control.NonFatal
/** A Database I/O Action that can be executed on a database. The DBIOAction type allows a
* separation of execution logic and resource usage management logic from composition logic.
* DBIOActions can be composed with methods such as `andThen`, `andFinally` and `flatMap`.
* Individual parts of a composite DBIOAction are always executed serially on a single database,
* but possibly in different database sessions, unless the session is pinned either explicitly
* (using `withPinnedSession`) or implicitly (e.g. through a transaction).
*
* The actual implementation base type for all Actions is `DBIOAction`. `StreamingDBIO` and
* `DBIO` are type aliases which discard the effect type (and the streaming result type in the
* latter case) to make DBIOAction types easier to write when these features are not needed. All
* primitive DBIOActions and all DBIOActions produced by the standard combinators in Slick have
* correct Effect types and are streaming (if possible).
*
* @tparam R The result type when executing the DBIOAction and fully materializing the result.
* @tparam S An encoding of the result type for streaming results. If this action is capable of
* streaming, it is `Streaming[T]` for an element type `T`. For non-streaming
* DBIOActions it is `NoStream`.
* @tparam E The DBIOAction's effect type, e.g. `Effect.Read with Effect.Write`. When composing
* actions, the correct combined effect type will be inferred. Effects can be used in
* user code, e.g. to automatically direct all read-only Actions to a slave database
* and write Actions to the master copy.
*/
sealed trait DBIOAction[+R, +S <: NoStream, -E <: Effect] extends Dumpable {
/** Transform the result of a successful execution of this action. If this action fails, the
* resulting action also fails. */
def map[R2](f: R => R2)(implicit executor: ExecutionContext): DBIOAction[R2, NoStream, E] =
flatMap[R2, NoStream, E](r => SuccessAction[R2](f(r)))
/** Use the result produced by the successful execution of this action to compute and then
* run the next action in sequence. The resulting action fails if either this action, the
* computation, or the computed action fails. */
def flatMap[R2, S2 <: NoStream, E2 <: Effect](f: R => DBIOAction[R2, S2, E2])(implicit executor: ExecutionContext): DBIOAction[R2, S2, E with E2] =
FlatMapAction[R2, S2, R, E with E2](this, f, executor)
/** Creates a new DBIOAction with one level of nesting flattened, this method is equivalent
* to `flatMap(identity)`.
*/
def flatten[R2, S2 <: NoStream, E2 <: Effect](implicit ev : R <:< DBIOAction[R2,S2,E2]) = flatMap(ev)(DBIO.sameThreadExecutionContext)
/** Run another action after this action, if it completed successfully, and return the result
* of the second action. If either of the two actions fails, the resulting action also fails. */
def andThen[R2, S2 <: NoStream, E2 <: Effect](a: DBIOAction[R2, S2, E2]): DBIOAction[R2, S2, E with E2] = a match {
case AndThenAction(as2) => AndThenAction[R2, S2, E with E2](this +: as2)
case a => AndThenAction[R2, S2, E with E2](Vector(this, a))
}
/** Run another action after this action, if it completed successfully, and return the result
* of both actions. If either of the two actions fails, the resulting action also fails. */
def zip[R2, E2 <: Effect](a: DBIOAction[R2, NoStream, E2]): DBIOAction[(R, R2), NoStream, E with E2] =
SequenceAction[Any, ArrayBuffer[Any], E with E2](Vector(this, a)).map { r =>
(r(0).asInstanceOf[R], r(1).asInstanceOf[R2])
} (DBIO.sameThreadExecutionContext)
/** Run another action after this action, if it completed successfully, and zip the result
* of both actions with a function `f`, then create a new DBIOAction holding this result,
* If either of the two actions fails, the resulting action also fails. */
def zipWith[R2, E2 <: Effect,R3](a: DBIOAction[R2, NoStream, E2])(f:(R,R2) =>R3)(implicit executor: ExecutionContext): DBIOAction[R3, NoStream, E with E2] =
SequenceAction[Any, ArrayBuffer[Any], E with E2](Vector(this, a)).map { r =>
f(r(0).asInstanceOf[R], r(1).asInstanceOf[R2])
} (executor)
/** Run another action after this action, whether it succeeds or fails, and then return the
* result of the first action. If the first action fails, its failure is propagated, whether
* the second action fails or succeeds. If the first action succeeds, a failure of the second
* action is propagated. */
def andFinally[E2 <: Effect](a: DBIOAction[_, NoStream, E2]): DBIOAction[R, S, E with E2] =
cleanUp[E2](_ => a)(DBIO.sameThreadExecutionContext)
/** Run another action after this action, whether it succeeds or fails, in order to clean up or
* transform an error produced by this action. The clean-up action is computed from the failure
* of this action, wrapped in `Some`, or `None` if this action succeeded.
*
* @param keepFailure If this action returns successfully, the resulting action also returns
* successfully unless the clean-up action fails. If this action fails and
* `keepFailure` is set to `true` (the default), the resulting action fails
* with the same error, no matter whether the clean-up action succeeds or
* fails. If `keepFailure` is set to `false`, an error from the clean-up
* action will override the error from this action. */
def cleanUp[E2 <: Effect](f: Option[Throwable] => DBIOAction[_, NoStream, E2], keepFailure: Boolean = true)(implicit executor: ExecutionContext): DBIOAction[R, S, E with E2] =
CleanUpAction[R, S, E with E2](this, f, keepFailure, executor)
/** A shortcut for `andThen`. */
final def >> [R2, S2 <: NoStream, E2 <: Effect](a: DBIOAction[R2, S2, E2]): DBIOAction[R2, S2, E with E2] =
andThen[R2, S2, E2](a)
/** Filter the result of this action with the given predicate. If the predicate matches, the
* original result is returned, otherwise the resulting action fails with a
* NoSuchElementException. */
final def filter(p: R => Boolean)(implicit executor: ExecutionContext): DBIOAction[R, NoStream, E] =
withFilter(p)
def withFilter(p: R => Boolean)(implicit executor: ExecutionContext): DBIOAction[R, NoStream, E] =
flatMap(v => if(p(v)) SuccessAction(v) else throw new NoSuchElementException("Action.withFilter failed"))
/** Transform the result of a successful execution of this action, if the given partial function is defined at that value,
* otherwise, the result DBIOAction will fail with a `NoSuchElementException`.
*
* If this action fails, the resulting action also fails. */
def collect[R2](pf: PartialFunction[R,R2])(implicit executor: ExecutionContext): DBIOAction[R2, NoStream, E] =
map(r1 => pf.applyOrElse(r1,(r:R) => throw new NoSuchElementException(s"DBIOAction.collect partial function is not defined at: $r")))
/** Return an action which contains the Throwable with which this action failed as its result.
* If this action succeeded, the resulting action fails with a NoSuchElementException. */
def failed: DBIOAction[Throwable, NoStream, E] = FailedAction[E](this)
/** Convert a successful result `v` of this action into a successful result `Success(v)` and a
* failure `t` into a successful result `Failure(t)`. This is the most generic combinator that
* can be used for error recovery. If possible, use [[andFinally]] or [[cleanUp]] instead,
* because those combinators, unlike `asTry`, support streaming. */
def asTry: DBIOAction[Try[R], NoStream, E] = AsTryAction[R, E](this)
/** Use a pinned database session when running this action. If it is composed of multiple
* database actions, they will all use the same session, even when sequenced with non-database
* actions. For non-composite or non-database actions, this has no effect. */
def withPinnedSession: DBIOAction[R, S, E] = DBIO.Pin andThen this andFinally DBIO.Unpin
/** Get a wrapping action which has a name that will be included in log output. */
def named(name: String): DBIOAction[R, S, E] =
NamedAction[R, S, E](this, name)
/** Get the equivalent non-fused action if this action has been fused, otherwise this
* action is returned. */
def nonFusedEquivalentAction: DBIOAction[R, S, E] = this
/** Whether or not this action should be included in log output by default. */
def isLogged: Boolean = false
}
object DBIOAction {
/** Convert a `Future` to a [[DBIOAction]]. */
def from[R](f: Future[R]): DBIOAction[R, NoStream, Effect] = FutureAction[R](f)
/** Lift a constant value to a [[DBIOAction]]. */
def successful[R](v: R): DBIOAction[R, NoStream, Effect] = SuccessAction[R](v)
/** Create a [[DBIOAction]] that always fails. */
def failed(t: Throwable): DBIOAction[Nothing, NoStream, Effect] = FailureAction(t)
private[this] def groupBySynchronicity[R, E <: Effect](in: TraversableOnce[DBIOAction[R, NoStream, E]]): Vector[Vector[DBIOAction[R, NoStream, E]]] = {
var state = 0 // no current = 0, sync = 1, async = 2
var current: mutable.Builder[DBIOAction[R, NoStream, E], Vector[DBIOAction[R, NoStream, E]]] = null
val total = Vector.newBuilder[Vector[DBIOAction[R, NoStream, E]]]
(in: TraversableOnce[Any]).foreach { a =>
val msgState = if(a.isInstanceOf[SynchronousDatabaseAction[_, _, _, _]]) 1 else 2
if(msgState != state) {
if(state != 0) total += current.result()
current = Vector.newBuilder
state = msgState
}
current += a.asInstanceOf[DBIOAction[R, NoStream, E]]
}
if(state != 0) total += current.result()
total.result()
}
/** Transform a `Option[ DBIO[R] ]` into a `DBIO[ Option[R] ]`. */
def sequenceOption[R, E <: Effect](in: Option[DBIOAction[R, NoStream, E]]): DBIOAction[Option[R], NoStream, E] = {
implicit val ec = DBIO.sameThreadExecutionContext
sequence(in.toList).map(_.headOption)
}
/** Transform a `TraversableOnce[ DBIO[R] ]` into a `DBIO[ TraversableOnce[R] ]`. */
def sequence[R, M[+_] <: TraversableOnce[_], E <: Effect](in: M[DBIOAction[R, NoStream, E]])(implicit cbf: CanBuildFrom[M[DBIOAction[R, NoStream, E]], R, M[R]]): DBIOAction[M[R], NoStream, E] = {
implicit val ec = DBIO.sameThreadExecutionContext
def sequenceGroupAsM(g: Vector[DBIOAction[R, NoStream, E]]): DBIOAction[M[R], NoStream, E] = {
if(g.head.isInstanceOf[SynchronousDatabaseAction[_, _, _, _]]) { // fuse synchronous group
new SynchronousDatabaseAction.Fused[M[R], NoStream, BasicBackend, E] {
def run(context: BasicBackend#Context) = {
val b = cbf()
g.foreach(a => b += a.asInstanceOf[SynchronousDatabaseAction[R, NoStream, BasicBackend, E]].run(context))
b.result()
}
override def nonFusedEquivalentAction = SequenceAction[R, M[R], E](g)
}
} else SequenceAction[R, M[R], E](g)
}
def sequenceGroupAsSeq(g: Vector[DBIOAction[R, NoStream, E]]): DBIOAction[Seq[R], NoStream, E] = {
if(g.length == 1) {
if(g.head.isInstanceOf[SynchronousDatabaseAction[_, _, _, _]]) { // fuse synchronous group
new SynchronousDatabaseAction.Fused[Seq[R], NoStream, BasicBackend, E] {
def run(context: BasicBackend#Context) =
g.head.asInstanceOf[SynchronousDatabaseAction[R, NoStream, BasicBackend, E]].run(context) :: Nil
override def nonFusedEquivalentAction = g.head.map(_ :: Nil)
}
} else g.head.map(_ :: Nil)
} else {
if(g.head.isInstanceOf[SynchronousDatabaseAction[_, _, _, _]]) { // fuse synchronous group
new SynchronousDatabaseAction.Fused[Seq[R], NoStream, BasicBackend, E] {
def run(context: BasicBackend#Context) = {
val b = new ArrayBuffer[R](g.length)
g.foreach(a => b += a.asInstanceOf[SynchronousDatabaseAction[R, NoStream, BasicBackend, E]].run(context))
b
}
override def nonFusedEquivalentAction = SequenceAction[R, Seq[R], E](g)
}
} else SequenceAction[R, Seq[R], E](g)
}
}
val grouped = groupBySynchronicity[R, E](in.asInstanceOf[TraversableOnce[DBIOAction[R, NoStream, E]]])
grouped.length match {
case 0 => DBIO.successful(cbf().result())
case 1 => sequenceGroupAsM(grouped.head)
case n =>
grouped.foldLeft(DBIO.successful(cbf(in)): DBIOAction[mutable.Builder[R, M[R]], NoStream, E]) { (ar, g) =>
for (r <- ar; ge <- sequenceGroupAsSeq(g)) yield r ++= ge
} map (_.result)
}
}
/** A simpler version of `sequence` that takes a number of DBIOActions with any return type as
* varargs and returns a DBIOAction that performs the individual actions in sequence, returning
* `()` in the end. */
def seq[E <: Effect](actions: DBIOAction[_, NoStream, E]*): DBIOAction[Unit, NoStream, E] = {
def sequenceGroup(g: Vector[DBIOAction[Any, NoStream, E]], forceUnit: Boolean): DBIOAction[Any, NoStream, E] = {
if(g.length == 1 && !forceUnit) g.head
else if(g.head.isInstanceOf[SynchronousDatabaseAction[_, _, _, _]]) sequenceSync(g)
else if(forceUnit) AndThenAction[Any, NoStream, E](g :+ DBIO.successful(()))
else AndThenAction[Any, NoStream, E](g)
}
def sequenceSync(g: Vector[DBIOAction[Any, NoStream, E]]): DBIOAction[Unit, NoStream, E] = {
new SynchronousDatabaseAction.Fused[Unit, NoStream, BasicBackend, E] {
def run(context: BasicBackend#Context) = {
g.foreach(_.asInstanceOf[SynchronousDatabaseAction[Any, NoStream, BasicBackend, E]].run(context))
}
override def nonFusedEquivalentAction = AndThenAction[Unit, NoStream, E](g)
}
}
if(actions.isEmpty) DBIO.successful(()) else {
val grouped = groupBySynchronicity[Any, E](actions :+ DBIO.successful(()))
grouped.length match {
case 1 => sequenceGroup(grouped.head, true).asInstanceOf[DBIOAction[Unit, NoStream, E]]
case n =>
val last = grouped.length - 1
val as = grouped.iterator.zipWithIndex.map { case (g, i) => sequenceGroup(g, i == last) }.toVector
AndThenAction[Unit, NoStream, E](as)
}
}
}
/** Create a DBIOAction that runs some other actions in sequence and combines their results
* with the given function. */
def fold[T, E <: Effect](actions: Seq[DBIOAction[T, NoStream, E]], zero: T)(f: (T, T) => T)(implicit ec: ExecutionContext): DBIOAction[T, NoStream, E] =
actions.foldLeft[DBIOAction[T, NoStream, E]](DBIO.successful(zero)) { (za, va) => za.flatMap(z => va.map(v => f(z, v))) }
/** A DBIOAction that pins the current session */
private[slick] object Pin extends SynchronousDatabaseAction[Unit, NoStream, BasicBackend, Effect] {
def run(context: BasicBackend#Context): Unit = context.pin
def getDumpInfo = DumpInfo(name = "SynchronousDatabaseAction.Pin")
}
/** A DBIOAction that unpins the current session */
private[slick] object Unpin extends SynchronousDatabaseAction[Unit, NoStream, BasicBackend, Effect] {
def run(context: BasicBackend#Context): Unit = context.unpin
def getDumpInfo = DumpInfo(name = "SynchronousDatabaseAction.Unpin")
}
/** An ExecutionContext used internally for executing plumbing operations during DBIOAction
* composition. */
private[slick] object sameThreadExecutionContext extends ExecutionContext {
private[this] val trampoline = new ThreadLocal[List[Runnable]]
private[this] def runTrampoline(first: Runnable): Unit = {
trampoline.set(Nil)
try {
var err: Throwable = null
var r = first
while(r ne null) {
try r.run() catch { case t: Throwable => err = t }
trampoline.get() match {
case r2 :: rest =>
trampoline.set(rest)
r = r2
case _ => r = null
}
}
if(err ne null) throw err
} finally trampoline.set(null)
}
override def execute(runnable: Runnable): Unit = trampoline.get() match {
case null => runTrampoline(runnable)
case r => trampoline.set(runnable :: r)
}
override def reportFailure(t: Throwable): Unit = throw t
}
}
/** A DBIOAction that represents a database operation. Concrete implementations are backend-specific. */
trait DatabaseAction[+R, +S <: NoStream, -E <: Effect] extends DBIOAction[R, S, E] {
override def isLogged = true
}
/** A DBIOAction that returns a constant value. */
case class SuccessAction[+R](value: R) extends SynchronousDatabaseAction[R, NoStream, BasicBackend, Effect] {
def getDumpInfo = DumpInfo("success", String.valueOf(value))
def run(ctx: BasicBackend#Context): R = value
}
/** A DBIOAction that fails. */
case class FailureAction(t: Throwable) extends SynchronousDatabaseAction[Nothing, NoStream, BasicBackend, Effect] {
def getDumpInfo = DumpInfo("failure", String.valueOf(t))
def run(ctx: BasicBackend#Context): Nothing = throw t
}
/** An asynchronous DBIOAction that returns the result of a Future. */
case class FutureAction[+R](f: Future[R]) extends DBIOAction[R, NoStream, Effect] {
def getDumpInfo = DumpInfo("future", String.valueOf(f))
override def isLogged = true
}
/** A DBIOAction that represents a `flatMap` operation for sequencing in the DBIOAction monad. */
case class FlatMapAction[+R, +S <: NoStream, P, -E <: Effect](base: DBIOAction[P, NoStream, E], f: P => DBIOAction[R, S, E], executor: ExecutionContext) extends DBIOAction[R, S, E] {
def getDumpInfo = DumpInfo("flatMap", String.valueOf(f), children = Vector(("base", base)))
}
/** A DBIOAction that represents a `seq` or `andThen` operation for sequencing in the DBIOAction
* monad. Unlike `SequenceAction` it only keeps the last result. */
case class AndThenAction[R, +S <: NoStream, -E <: Effect](as: IndexedSeq[DBIOAction[Any, NoStream, E]]) extends DBIOAction[R, S, E] {
def getDumpInfo = DumpInfo("andThen", children = as.zipWithIndex.map { case (a, i) => (String.valueOf(i+1), a) })
override def andThen[R2, S2 <: NoStream, E2 <: Effect](a: DBIOAction[R2, S2, E2]): DBIOAction[R2, S2, E with E2] = a match {
case AndThenAction(as2) => AndThenAction[R2, S2, E with E2](as ++ as2)
case a => AndThenAction[R2, S2, E with E2](as :+ a)
}
}
/** A DBIOAction that represents a `sequence` or operation for sequencing in the DBIOAction monad. */
case class SequenceAction[R, +R2, -E <: Effect](as: IndexedSeq[DBIOAction[R, NoStream, E]])(implicit val cbf: CanBuild[R, R2]) extends DBIOAction[R2, NoStream, E] {
def getDumpInfo = DumpInfo("sequence", children = as.zipWithIndex.map { case (a, i) => (String.valueOf(i+1), a) })
}
/** A DBIOAction that represents a `cleanUp` operation for sequencing in the DBIOAction monad. */
case class CleanUpAction[+R, +S <: NoStream, -E <: Effect](base: DBIOAction[R, S, E], f: Option[Throwable] => DBIOAction[_, NoStream, E], keepFailure: Boolean, executor: ExecutionContext) extends DBIOAction[R, S, E] {
def getDumpInfo = DumpInfo("cleanUp", children = Vector(("try", base)))
}
/** A DBIOAction that represents a `failed` operation. */
case class FailedAction[-E <: Effect](a: DBIOAction[_, NoStream, E]) extends DBIOAction[Throwable, NoStream, E] {
def getDumpInfo = DumpInfo("failed", children = Vector(("base", a)))
}
/** A DBIOAction that represents an `asTry` operation. */
case class AsTryAction[+R, -E <: Effect](a: DBIOAction[R, NoStream, E]) extends DBIOAction[Try[R], NoStream, E] {
def getDumpInfo = DumpInfo("asTry")
}
/** A DBIOAction that attaches a name for logging purposes to another action. */
case class NamedAction[+R, +S <: NoStream, -E <: Effect](a: DBIOAction[R, S, E], name: String) extends DBIOAction[R, S, E] {
def getDumpInfo = DumpInfo("named", mainInfo = DumpInfo.highlight(name))
override def isLogged = true
}
/** The base trait for the context object passed to synchronous database actions by the execution
* engine. */
trait ActionContext {
private[this] var stickiness = 0
/** Check if the session is pinned. May only be called from a synchronous action context. */
final def isPinned = stickiness > 0
/** Pin the current session. Multiple calls to `pin` may be nested. The same number of calls
* to `unpin` is required in order to mark the session as not pinned anymore. A pinned
* session will not be released at the end of a primitive database action. Instead, the same
* pinned session is passed to all subsequent actions until it is unpinned. Note that pinning
* does not force an actual database connection to be opened. This still happens on demand.
* May only be called from a synchronous action context. */
final def pin: Unit = stickiness += 1
/** Unpin this session once. May only be called from a synchronous action context. */
final def unpin: Unit = stickiness -= 1
}
/** An ActionContext with extra functionality required for streaming DBIOActions. */
trait StreamingActionContext extends ActionContext {
/** Emit a single result of the stream. Any Exception thrown by this method should be passed on
* to the caller. */
def emit(v: Any)
/** Get the Subscription for this stream. */
def subscription: Subscription
}
/** A synchronous database action provides a function from an `ActionContext` to the result
* type. `BasicBackend.DatabaseDef.run` supports this kind of action out of the box
* through `BasicBackend.DatabaseDef.runSynchronousDatabaseAction` so that `run` does not
* need to be extended if all primitive database actions can be expressed in this way. These
* actions also implement construction-time fusion for the `andFinally`, `andThen`, `asTry`,
* `failed`, `withPinnedSession` and `zip` operations.
*
* The execution engine ensures that an [[ActionContext]] is never used concurrently and that
* all state changes performed by one invocation of a SynchronousDatabaseAction are visible
* to the next invocation of the same or a different SynchronousDatabaseAction. */
trait SynchronousDatabaseAction[+R, +S <: NoStream, -B <: BasicBackend, -E <: Effect] extends DatabaseAction[R, S, E] { self =>
/** The type used by this action for the state of a suspended stream. A call to `emitStream`
* produces such a state which is then fed back into the next call. */
type StreamState >: Null <: AnyRef
/** Run this action synchronously and produce a result, or throw an Exception to indicate a
* failure. */
def run(context: B#Context): R
/** Run this action synchronously and emit results to the context. This methods may throw an
* Exception to indicate a failure.
*
* @param limit The maximum number of results to emit, or Long.MaxValue for no limit.
* @param state The state returned by a previous invocation of this method, or `null` if
* a new stream should be produced.
* @return A stream state if there are potentially more results available, or null if the
* stream is finished. */
def emitStream(context: B#StreamingContext, limit: Long, state: StreamState): StreamState =
throw new SlickException("Internal error: Streaming is not supported by this Action")
/** Dispose of a `StreamState` when a streaming action is cancelled. Whenever `emitStream`
* returns `null` or throws an Exception, it needs to dispose of the state itself. This
* method will not be called in these cases. */
def cancelStream(context: B#StreamingContext, state: StreamState): Unit = ()
/** Whether or not this action supports streaming results. An action with a `Streaming` result
* type must either support streaming directly or have a [[nonFusedEquivalentAction]] which
* supports streaming. This flag is not used if the Action has a `NoStream` result type. */
def supportsStreaming: Boolean = true
override def andThen[R2, S2 <: NoStream, E2 <: Effect](a: DBIOAction[R2, S2, E2]): DBIOAction[R2, S2, E with E2] = a match {
case a: SynchronousDatabaseAction.FusedAndThenAction[_, _, _, _] =>
new SynchronousDatabaseAction.FusedAndThenAction[R2, S2, B, E with E2](
self.asInstanceOf[SynchronousDatabaseAction[Any, S2, B, E with E2]] +:
a.as.asInstanceOf[IndexedSeq[SynchronousDatabaseAction[Any, S2, B, E with E2]]])
case a: SynchronousDatabaseAction[_, _, _, _] =>
new SynchronousDatabaseAction.FusedAndThenAction[R2, S2, B, E with E2](
Vector(self.asInstanceOf[SynchronousDatabaseAction[Any, S2, B, E with E2]],
a.asInstanceOf[SynchronousDatabaseAction[Any, S2, B, E with E2]]))
case a => super.andThen[R2, S2, E2](a)
}
private[this] def superZip[R2, E2 <: Effect](a: DBIOAction[R2, NoStream, E2]) = super.zip[R2, E2](a)
override def zip[R2, E2 <: Effect](a: DBIOAction[R2, NoStream, E2]): DBIOAction[(R, R2), NoStream, E with E2] = a match {
case a: SynchronousDatabaseAction[_, _, _, _] => new SynchronousDatabaseAction.Fused[(R, R2), NoStream, B, E with E2] {
def run(context: B#Context): (R, R2) = {
val r1 = self.run(context)
val r2 = a.asInstanceOf[SynchronousDatabaseAction[R2, NoStream, B, E2]].run(context)
(r1, r2)
}
override def nonFusedEquivalentAction: DBIOAction[(R, R2), NoStream, E with E2] = superZip(a)
}
case a => superZip(a)
}
private[this] def superAndFinally[E2 <: Effect](a: DBIOAction[_, NoStream, E2]) = super.andFinally[E2](a)
override def andFinally[E2 <: Effect](a: DBIOAction[_, NoStream, E2]): DBIOAction[R, S, E with E2] = a match {
case a: SynchronousDatabaseAction[_, _, _, _] => new SynchronousDatabaseAction.Fused[R, S, B, E with E2] {
def run(context: B#Context): R = {
val res = try self.run(context) catch {
case NonFatal(ex) =>
try a.asInstanceOf[SynchronousDatabaseAction[Any, NoStream, B, E2]].run(context) catch ignoreFollowOnError
throw ex
}
a.asInstanceOf[SynchronousDatabaseAction[Any, S, B, E2]].run(context)
res
}
override def nonFusedEquivalentAction: DBIOAction[R, S, E with E2] = superAndFinally(a)
}
case a => superAndFinally(a)
}
private[this] def superWithPinnedSession = super.withPinnedSession
override def withPinnedSession: DBIOAction[R, S, E] = new SynchronousDatabaseAction.Fused[R, S, B, E] {
def run(context: B#Context): R = {
context.pin
val res = try self.run(context) catch {
case NonFatal(ex) =>
context.unpin
throw ex
}
context.unpin
res
}
override def nonFusedEquivalentAction = superWithPinnedSession
}
private[this] def superFailed: DBIOAction[Throwable, NoStream, E] = super.failed
override def failed: DBIOAction[Throwable, NoStream, E] = new SynchronousDatabaseAction.Fused[Throwable, NoStream, B, E] {
def run(context: B#Context): Throwable = {
var ok = false
try {
self.run(context)
ok = true
throw new NoSuchElementException("Action.failed (fused) not completed with a Throwable")
} catch {
case NonFatal(ex) if !ok => ex
}
}
override def nonFusedEquivalentAction = superFailed
}
private[this] def superAsTry: DBIOAction[Try[R], NoStream, E] = super.asTry
override def asTry: DBIOAction[Try[R], NoStream, E] = new SynchronousDatabaseAction.Fused[Try[R], NoStream, B, E] {
def run(context: B#Context): Try[R] = {
try Success(self.run(context)) catch {
case NonFatal(ex) => Failure(ex)
}
}
override def nonFusedEquivalentAction = superAsTry
}
}
object SynchronousDatabaseAction {
/** A fused SynchronousDatabaseAction */
trait Fused[+R, +S <: NoStream, B <: BasicBackend, -E <: Effect] extends SynchronousDatabaseAction[R, S, B, E] {
def getDumpInfo = DumpInfo(name = "SynchronousDatabaseAction.Fused", children = Vector(("non-fused", nonFusedEquivalentAction)))
override def supportsStreaming: Boolean = false
}
class FusedAndThenAction[+R, +S <: NoStream, B <: BasicBackend, -E <: Effect](val as: IndexedSeq[SynchronousDatabaseAction[Any, S, B, E]]) extends Fused[R, S, B, E] {
def run(context: B#Context): R = {
var res: Any = null
as.foreach(a => res = a.run(context))
res.asInstanceOf[R]
}
override def nonFusedEquivalentAction: DBIOAction[R, S, E] = AndThenAction[R, S, E](as)
override def andThen[R2, S2 <: NoStream, E2 <: Effect](a: DBIOAction[R2, S2, E2]): DBIOAction[R2, S2, E with E2] = a match {
case a: SynchronousDatabaseAction.FusedAndThenAction[_, _, _, _] =>
new SynchronousDatabaseAction.FusedAndThenAction[R2, S2, B, E with E2](
as.asInstanceOf[IndexedSeq[SynchronousDatabaseAction[Any, S2, B, E with E2]]] ++
a.as.asInstanceOf[IndexedSeq[SynchronousDatabaseAction[Any, S2, B, E with E2]]])
case a: SynchronousDatabaseAction[_, _, _, _] =>
new SynchronousDatabaseAction.FusedAndThenAction[R2, S2, B, E with E2](
as.asInstanceOf[IndexedSeq[SynchronousDatabaseAction[Any, S2, B, E with E2]]] :+
a.asInstanceOf[SynchronousDatabaseAction[Any, S2, B, E with E2]])
case a => super.andThen(a)
}
}
/** Fuse `flatMap` / `map`, `cleanUp` and `filter` / `withFilter` combinators if they use
* `DBIO.sameThreadExecutionContext` and produce a `SynchronousDatabaseAction` in their
* evaluation function (where applicable). This cannot be verified at fusion time, so a wrongly
* fused action can fail with a `ClassCastException` during evaluation. */
private[slick] def fuseUnsafe[R, S <: NoStream, E <: Effect](a: DBIOAction[R, S, E]): DBIOAction[R, S, E] = {
a match {
case FlatMapAction(base: SynchronousDatabaseAction[_, _, _, _], f, ec) if ec eq DBIO.sameThreadExecutionContext =>
new SynchronousDatabaseAction.Fused[R, S, BasicBackend, E] {
def run(context: BasicBackend#Context): R = {
val b = base.asInstanceOf[SynchronousDatabaseAction[Any, NoStream, BasicBackend, Effect]].run(context)
val a2 = f(b)
a2.asInstanceOf[SynchronousDatabaseAction[R, S, BasicBackend, E]].run(context)
}
override def nonFusedEquivalentAction = a
}
case CleanUpAction(base: SynchronousDatabaseAction[_, _, _, _], f, keepFailure, ec) if ec eq DBIO.sameThreadExecutionContext =>
new SynchronousDatabaseAction.Fused[R, S, BasicBackend, E] {
def run(context: BasicBackend#Context): R = {
val res = try {
base.asInstanceOf[SynchronousDatabaseAction[R, S, BasicBackend, Effect]].run(context)
} catch { case NonFatal(ex) =>
try {
val a2 = f(Some(ex))
a2.asInstanceOf[SynchronousDatabaseAction[Any, NoStream, BasicBackend, Effect]].run(context)
} catch { case NonFatal(_) if keepFailure => () }
throw ex
}
val a2 = f(None)
a2.asInstanceOf[SynchronousDatabaseAction[Any, NoStream, BasicBackend, Effect]].run(context)
res
}
override def nonFusedEquivalentAction = a
}
case a => a
}
}
}
| Radsaggi/slick | slick/src/main/scala/slick/dbio/DBIOAction.scala | Scala | bsd-2-clause | 31,800 |
package scala.quoted
import scala.annotation.{compileTimeOnly, experimental, since}
/** Type (or type constructor) `T` needed contextually when using `T` in a quoted expression `'{... T ...}` */
abstract class Type[T <: AnyKind] private[scala]:
/** The type represented by `Type` */
type Underlying = T
end Type
/** Methods to interact with the current `Type[T]` in scope */
object Type:
/** Show a source code like representation of this type without syntax highlight */
def show[T <: AnyKind](using Type[T])(using Quotes): String =
import quotes.reflect.*
TypeTree.of[T].show
/** Return a quoted.Type with the given type */
@compileTimeOnly("Reference to `scala.quoted.Type.of` was not handled by PickleQuotes")
given of[T <: AnyKind](using Quotes): Type[T] = ???
/** Extracts the value of a singleton constant type.
* Returns Some of the value of the type if it is a singleton constant type.
* Returns None if the type is not a singleton constant type.
*
* Example usage:
* ```scala
* //{
* import scala.deriving.*
* def f(using Quotes) = {
* import quotes.reflect.*
* val expr: Expr[Any] = ???
* //}
* expr match {
* case '{ $mirrorExpr : Mirror.Sum { type MirroredLabel = label } } =>
* Type.valueOfConstant[label] // Option[String]
* }
* //{
* }
* //}
* ```
* @syntax markdown
*/
def valueOfConstant[T](using Type[T])(using Quotes): Option[T] =
ValueOf.unapply(quotes.reflect.TypeRepr.of[T]).asInstanceOf[Option[T]]
/** Extracts the value of a tuple of singleton constant types.
* Returns Some of the tuple type if it is a tuple singleton constant types.
* Returns None if the type is not a tuple singleton constant types.
*
* Example usage:
* ```scala
* //{
* import scala.deriving.*
* def f(using Quotes) = {
* import quotes.reflect.*
* val expr: Expr[Any] = ???
* //}
* expr match {
* case '{ type label <: Tuple; $mirrorExpr : Mirror.Sum { type MirroredElemLabels = `label` } } =>
* Type.valueOfTuple[label] // Option[Tuple]
* }
* //{
* }
* //}
* ```
* @syntax markdown
*/
@since("3.1")
def valueOfTuple[T <: Tuple](using Type[T])(using Quotes): Option[T] =
valueOfTuple(quotes.reflect.TypeRepr.of[T]).asInstanceOf[Option[T]]
private def valueOfTuple(using Quotes)(tpe: quotes.reflect.TypeRepr): Option[Tuple] =
import quotes.reflect.*
val cons = Symbol.classSymbol("scala.*:")
def rec(tpe: TypeRepr): Option[Tuple] =
tpe.widenTermRefByName.dealias match
case AppliedType(fn, tpes) if defn.isTupleClass(fn.typeSymbol) =>
tpes.foldRight(Option[Tuple](EmptyTuple)) {
case (_, None) => None
case (ValueOf(v), Some(acc)) => Some(v *: acc)
case _ => None
}
case AppliedType(tp, List(ValueOf(headValue), tail)) if tp.derivesFrom(cons) =>
rec(tail) match
case Some(tailValue) => Some(headValue *: tailValue)
case None => None
case tpe =>
if tpe.derivesFrom(Symbol.classSymbol("scala.EmptyTuple")) then Some(EmptyTuple)
else None
rec(tpe)
private object ValueOf:
def unapply(using Quotes)(tpe: quotes.reflect.TypeRepr): Option[Any] =
import quotes.reflect.*
tpe.widenTermRefByName.dealias match
case ConstantType(const) => Some(const.value)
case _ => None
end Type
| dotty-staging/dotty | library/src/scala/quoted/Type.scala | Scala | apache-2.0 | 3,486 |
/*
* Copyright (C) 2015 Holmes Team at HUAWEI Noah's Ark Lab.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.apache.spark.streamdm.core
/**
* An Instance represents the input or output of any learning algorithm. It is
* normally composed of a feature vector (having various implementations).
*/
trait Instance extends Serializable {
type T <: Instance
/**
* Get the value present at position index
*
* @param index the index of the features
* @return a Double representing the feature value, or 0 if the index is not
* present in the underlying data structure
*/
def apply(index: Int): Double
/**
* Return an array of features and indexes
*
* @return an array of turple2(value,index)
*/
def getFeatureIndexArray(): Array[(Double, Int)]
/**
* Perform a dot product between two instances
*
* @param input an Instance with which the dot product is performed
* @return a Double representing the dot product
*/
def dot(input: Instance): Double
/**
* Compute the Euclidean distance to another Instance
*
* @param input the Instance to which the distance is computed
* @return a Double representing the distance value
*/
def distanceTo(input: Instance): Double
/**
* Perform an element by element addition between two instances
*
* @param input an Instance which is added up
* @return an Instance representing the added Instances
*/
def add(input: Instance): T
/**
* Perform an element by element multiplication between two instances
*
* @param input an Instance which is multiplied
* @return an Instance representing the Hadamard product
*/
def hadamard(input: Instance): T
/**
* Add a feature to the instance
*
* @param index the index at which the value is added
* @param input the feature value which is added up
* @return an Instance representing the new feature vector
*/
def set(index: Int, input: Double): T
/**
* Apply an operation to every feature of the Instance
*
* @param func the function for the transformation
* @return a new Instance with the transformed features
*/
def map(func: Double => Double): T
/**
* Aggregate the values of an instance
*
* @param func the function for the transformation
* @return the reduced value
*/
def reduce(func: (Double, Double) => Double): Double
}
| gosubpl/akka-online | src/main/scala/org/apache/spark/streamdm/core/Instance.scala | Scala | apache-2.0 | 2,911 |
package aoc.day14
import io.IO
object Part1 extends App {
/*
--- Day 14: Reindeer Olympics ---
This year is the Reindeer Olympics! Reindeer can fly at high speeds, but must
rest occasionally to recover their energy. Santa would like to know which of
his reindeer is fastest, and so he has them race.
Reindeer can only either be flying (always at their top speed) or resting
(not moving at all), and always spend whole seconds in either state.
For example, suppose you have the following Reindeer:
Comet can fly 14 km/s for 10 seconds, but then must rest for 127 seconds.
Dancer can fly 16 km/s for 11 seconds, but then must rest for 162 seconds.
After one second, Comet has gone 14 km, while Dancer has gone 16 km. After ten
seconds, Comet has gone 140 km, while Dancer has gone 160 km. On the eleventh
second, Comet begins resting (staying at 140 km), and Dancer continues on for
a total distance of 176 km. On the 12th second, both reindeer are resting. They
continue to rest until the 138th second, when Comet flies for another ten
seconds. On the 174th second, Dancer flies for another 11 seconds.
In this example, after the 1000th second, both reindeer are resting, and Comet
is in the lead at 1120 km (poor Dancer has only gotten 1056 km by that point).
So, in this situation, Comet would win (if the race ended at 1000 seconds).
Given the descriptions of each reindeer (in your puzzle input), after exactly
2503 seconds, what distance has the winning reindeer traveled?
*/
case class Reinder(name: String, speed: Int, flyingPeriod: Int, restingPeriod: Int)
def distance(time: Int, r: Reinder): Int = {
// Number of cycles (flyingPeriod + restingPeriod)
val cycleTimes = time / (r.flyingPeriod + r.restingPeriod)
// Number of seconds of flight in the started period
val rest = Math.min(time % (r.flyingPeriod + r.restingPeriod), r.flyingPeriod)
// Traveled distance
(cycleTimes * r.flyingPeriod + rest) * r.speed
}
// Input
def linesToReinders(lines: List[String]): Set[Reinder] = {
val regex = """([a-zA-Z]+) can fly ([0-9]+) km/s for ([0-9]+) seconds, but then must rest for ([0-9]+) seconds.""".r
def lineToReinder(line: String): Reinder =
line match {
case regex(name, speed, flyingPeriod, restingPeriod) =>
Reinder(name, speed.toInt, flyingPeriod.toInt, restingPeriod.toInt)
}
lines.map(lineToReinder(_)).toSet
}
val reinders = linesToReinders(IO.getLines())
val winner = reinders.maxBy(r => distance(2503, r))
println(s"After exactly 2503 seconds, the winning reindeer has traveled:\\n${winner}, ${distance(2503, winner)}")
} | GuillaumeDD/AdventOfCode2015 | src/main/scala/aoc/day14/Part1.scala | Scala | gpl-3.0 | 2,651 |
import com.twitter.scalding.typed.TypedPipe
import com.twitter.scalding._
/**
* @author tomerb
* on 20/01/15.
*/
class ScaldingCounterExampleJob(args : Args) extends Job(args) {
val stat = Stat("alice-counter")
TypedPipe.from("is alice really alice ?".split(" "))
.flatMap {
line => {
stat.inc
line.split("""\\s+""") }
}
.groupBy { word => word }
.size
.write(TypedTsv(args("output")))
} | tomer-ben-david-examples/scalding-counters-example | src/main/scala/ScaldingCounterExampleJob.scala | Scala | apache-2.0 | 438 |
/*
* Copyright 2021 ABSA Group Limited
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package za.co.absa.spline.common.validation
import jakarta.validation.ConstraintViolationException
object ValidationUtils {
@throws[ConstraintViolationException]
def validate(c: Constraint): Unit = {
validate(c.isValid, c.message)
}
@throws[ConstraintViolationException]
def validate(b: Boolean, msg: => String): Unit = {
if (!b) throw new ConstraintViolationException(msg, null) // NOSONAR
}
}
| AbsaOSS/spline | commons/src/main/scala/za/co/absa/spline/common/validation/ValidationUtils.scala | Scala | apache-2.0 | 1,022 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package spark.network
import java.net.InetSocketAddress
import spark.Utils
private[spark] case class ConnectionManagerId(host: String, port: Int) {
// DEBUG code
Utils.checkHost(host)
assert (port > 0)
def toSocketAddress() = new InetSocketAddress(host, port)
}
private[spark] object ConnectionManagerId {
def fromSocketAddress(socketAddress: InetSocketAddress): ConnectionManagerId = {
new ConnectionManagerId(socketAddress.getHostName(), socketAddress.getPort())
}
}
| wgpshashank/spark | core/src/main/scala/spark/network/ConnectionManagerId.scala | Scala | apache-2.0 | 1,294 |
/*
* Copyright 2016 rdbc contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.rdbc.implbase
import java.time._
import java.util.UUID
import io.rdbc.sapi.exceptions.ConversionException
import io.rdbc.sapi.{DecimalNumber, Row}
import io.rdbc.util.Preconditions.checkNotNull
import scala.reflect.ClassTag
trait RowPartialImpl extends Row {
override def col[A: ClassTag](name: String): A = {
checkNotNull(name)
colOpt(name).getOrElse {
throw nullConversionException(implicitly[ClassTag[A]].runtimeClass)
}
}
override def col[A: ClassTag](idx: Int): A = {
checkNotNull(idx)
colOpt(idx).getOrElse {
throw nullConversionException(implicitly[ClassTag[A]].runtimeClass)
}
}
private def nullConversionException(target: Class[_]): ConversionException = {
new ConversionException(
msg = s"SQL NULL cannot be represented by $target, use *Opt method instead",
value = None,
targetType = target,
maybeCause = None
)
}
override def str(name: String): String = col[String](name)
override def strOpt(name: String): Option[String] = colOpt[String](name)
override def str(idx: Int): String = col[String](idx)
override def strOpt(idx: Int): Option[String] = colOpt[String](idx)
override def bool(name: String): Boolean = col[Boolean](name)
override def boolOpt(name: String): Option[Boolean] = colOpt[Boolean](name)
override def bool(idx: Int): Boolean = col[Boolean](idx)
override def boolOpt(idx: Int): Option[Boolean] = colOpt[Boolean](idx)
override def char(name: String): Char = col[Char](name)
override def charOpt(name: String): Option[Char] = colOpt[Char](name)
override def char(idx: Int): Char = col[Char](idx)
override def charOpt(idx: Int): Option[Char] = colOpt[Char](idx)
override def short(name: String): Short = col[Short](name)
override def shortOpt(name: String): Option[Short] = colOpt[Short](name)
override def short(idx: Int): Short = col[Short](idx)
override def shortOpt(idx: Int): Option[Short] = colOpt[Short](idx)
override def int(name: String): Int = col[Int](name)
override def intOpt(name: String): Option[Int] = colOpt[Int](name)
override def int(idx: Int): Int = col[Int](idx)
override def intOpt(idx: Int): Option[Int] = colOpt[Int](idx)
override def long(name: String): Long = col[Long](name)
override def longOpt(name: String): Option[Long] = colOpt[Long](name)
override def long(idx: Int): Long = col[Long](idx)
override def longOpt(idx: Int): Option[Long] = colOpt[Long](idx)
override def bigDecimal(name: String): BigDecimal = col[BigDecimal](name)
override def bigDecimalOpt(name: String): Option[BigDecimal] = colOpt[BigDecimal](name)
override def bigDecimal(idx: Int): BigDecimal = col[BigDecimal](idx)
override def bigDecimalOpt(idx: Int): Option[BigDecimal] = colOpt[BigDecimal](idx)
override def decimal(name: String): DecimalNumber = col[DecimalNumber](name)
override def decimalOpt(name: String): Option[DecimalNumber] = colOpt[DecimalNumber](name)
override def decimal(idx: Int): DecimalNumber = col[DecimalNumber](idx)
override def decimalOpt(idx: Int): Option[DecimalNumber] = colOpt[DecimalNumber](idx)
override def double(name: String): Double = col[Double](name)
override def doubleOpt(name: String): Option[Double] = colOpt[Double](name)
override def double(idx: Int): Double = col[Double](idx)
override def doubleOpt(idx: Int): Option[Double] = colOpt[Double](idx)
override def float(name: String): Float = col[Float](name)
override def floatOpt(name: String): Option[Float] = colOpt[Float](name)
override def float(idx: Int): Float = col[Float](idx)
override def floatOpt(idx: Int): Option[Float] = colOpt[Float](idx)
override def instant(name: String): Instant = col[Instant](name)
override def instantOpt(name: String): Option[Instant] = colOpt[Instant](name)
override def instant(idx: Int): Instant = col[Instant](idx)
override def instantOpt(idx: Int): Option[Instant] = colOpt[Instant](idx)
override def instant(name: String, zoneId: ZoneId): Instant = {
localDateTimeToInstant(localDateTime(name), zoneId)
}
override def instantOpt(name: String, zoneId: ZoneId): Option[Instant] = {
localDateTimeOpt(name).map(localDateTimeToInstant(_, zoneId))
}
override def instant(idx: Int, zoneId: ZoneId): Instant = {
localDateTimeToInstant(localDateTime(idx), zoneId)
}
override def instantOpt(idx: Int, zoneId: ZoneId): Option[Instant] = {
localDateTimeOpt(idx).map(localDateTimeToInstant(_, zoneId))
}
override def localDateTime(name: String): LocalDateTime = col[LocalDateTime](name)
override def localDateTimeOpt(name: String): Option[LocalDateTime] = colOpt[LocalDateTime](name)
override def localDateTime(idx: Int): LocalDateTime = col[LocalDateTime](idx)
override def localDateTimeOpt(idx: Int): Option[LocalDateTime] = colOpt[LocalDateTime](idx)
override def localDate(name: String): LocalDate = col[LocalDate](name)
override def localDateOpt(name: String): Option[LocalDate] = colOpt[LocalDate](name)
override def localDate(idx: Int): LocalDate = col[LocalDate](idx)
override def localDateOpt(idx: Int): Option[LocalDate] = colOpt[LocalDate](idx)
override def localTime(name: String): LocalTime = col[LocalTime](name)
override def localTimeOpt(name: String): Option[LocalTime] = colOpt[LocalTime](name)
override def localTime(idx: Int): LocalTime = col[LocalTime](idx)
override def localTimeOpt(idx: Int): Option[LocalTime] = colOpt[LocalTime](idx)
override def bytes(name: String): Array[Byte] = col[Array[Byte]](name)
override def bytesOpt(name: String): Option[Array[Byte]] = colOpt[Array[Byte]](name)
override def bytes(idx: Int): Array[Byte] = col[Array[Byte]](idx)
override def bytesOpt(idx: Int): Option[Array[Byte]] = colOpt[Array[Byte]](idx)
override def uuid(name: String): UUID = col[UUID](name)
override def uuidOpt(name: String): Option[UUID] = colOpt[UUID](name)
override def uuid(idx: Int): UUID = col[UUID](idx)
override def uuidOpt(idx: Int): Option[UUID] = colOpt[UUID](idx)
override def zonedDateTime(name: String): ZonedDateTime = col[ZonedDateTime](name)
override def zonedDateTimeOpt(name: String): Option[ZonedDateTime] = colOpt[ZonedDateTime](name)
override def zonedDateTime(idx: Int): ZonedDateTime = col[ZonedDateTime](idx)
override def zonedDateTimeOpt(idx: Int): Option[ZonedDateTime] = colOpt[ZonedDateTime](idx)
private def localDateTimeToInstant(ldt: LocalDateTime, zoneId: ZoneId): Instant = {
ldt.toInstant(zoneId.getRules.getOffset(ldt))
}
}
| rdbc-io/rdbc | rdbc-implbase/src/main/scala/io/rdbc/implbase/RowPartialImpl.scala | Scala | apache-2.0 | 7,187 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.server
import java.util.Properties
import org.apache.kafka.common.Uuid
import org.apache.kafka.common.metrics.MetricsContext
import org.junit.jupiter.api.Assertions._
import org.junit.jupiter.api.Test
import scala.jdk.CollectionConverters._
class ServerTest {
@Test
def testCreateSelfManagedKafkaMetricsContext(): Unit = {
val nodeId = 0
val clusterId = Uuid.randomUuid().toString
val props = new Properties()
props.put(KafkaConfig.ProcessRolesProp, "broker")
props.put(KafkaConfig.NodeIdProp, nodeId.toString)
props.put(KafkaConfig.QuorumVotersProp, s"${(nodeId + 1)}@localhost:9092")
val config = KafkaConfig.fromProps(props)
val context = Server.createKafkaMetricsContext(config, clusterId)
assertEquals(Map(
MetricsContext.NAMESPACE -> Server.MetricsPrefix,
Server.ClusterIdLabel -> clusterId,
Server.NodeIdLabel -> nodeId.toString
), context.contextLabels.asScala)
}
@Test
def testCreateZkKafkaMetricsContext(): Unit = {
val brokerId = 0
val clusterId = Uuid.randomUuid().toString
val props = new Properties()
props.put(KafkaConfig.BrokerIdProp, brokerId.toString)
props.put(KafkaConfig.ZkConnectProp, "127.0.0.1:0")
val config = KafkaConfig.fromProps(props)
val context = Server.createKafkaMetricsContext(config, clusterId)
assertEquals(Map(
MetricsContext.NAMESPACE -> Server.MetricsPrefix,
Server.ClusterIdLabel -> clusterId,
Server.BrokerIdLabel -> brokerId.toString
), context.contextLabels.asScala)
}
}
| guozhangwang/kafka | core/src/test/scala/unit/kafka/server/ServerTest.scala | Scala | apache-2.0 | 2,362 |
def factorial(n: Int): Int = {
@annotation.tailrec
def go(n: Int, acc: Int): Int =
if ( n<= 0 ) acc
else go(n-1, n*acc)
go(n,1)
}
println(factorial(1))
println(factorial(2))
println(factorial(3))
println(factorial(4))
println(factorial(5))
println(factorial(6))
println(factorial(7))
println(factorial(8))
println(factorial(9))
println(factorial(10))
| scottleedavis/scala-musings | factorial.scala | Scala | mit | 368 |
package io.neons.collector.application.akka.http.directive
import java.util.{Base64, UUID}
import akka.http.scaladsl.coding.Gzip
import akka.http.scaladsl.model._
import akka.http.scaladsl.model.MediaTypes._
import akka.http.scaladsl.model.headers._
import akka.http.scaladsl.server._
import akka.http.scaladsl.server.Directives._
import io.neons.collector.infrastructure.log.builder.AkkaHttpLogBuilder
import io.neons.collector.model.log.{Log, LogBuilder}
object TransparentPixel {
val pixel: Array[Byte] = Base64.getDecoder.decode(
"R0lGODlhAQABAPAAAP///wAAACH5BAEAAAAALAAAAAABAAEAAAICRAEAOw=="
)
}
trait CollectorDirectives {
def responseWithTransparentPixel: Route = {
complete(HttpResponse(entity = HttpEntity(`image/gif`, TransparentPixel.pixel)))
}
def responseWithJavascriptTrackerSource(trackerFile: String): Route = {
val headers = List(
RawHeader("Vary", "Accept-Encoding")
)
respondWithHeaders(headers) {
encodeResponseWith(Gzip) {
getFromResource(trackerFile)
}
}
}
def extractRawRequest(logBuilder: LogBuilder): Directive1[Log] = {
extractRequest
.flatMap(request => {
logBuilder.asInstanceOf[AkkaHttpLogBuilder].addHttpRequest(request)
extractClientIP
})
.flatMap(ip => {
logBuilder.asInstanceOf[AkkaHttpLogBuilder].addClientIp(
ip.toOption.map(_.getHostAddress).getOrElse("unknown")
)
provide(logBuilder.build)
})
}
def responseWithCookieVisitorId(name: String, domain: String): Route = {
optionalCookie(name) {
case Some(nameCookie) => setCookie(getHttpCookie(name, nameCookie.value, domain)) {
responseWithTransparentPixel
}
case None => setCookie(getHttpCookie(name, UUID.randomUUID().toString, domain)) {
responseWithTransparentPixel
}
}
}
def getHttpCookie(name: String, value: String, domain: String): HttpCookie = {
HttpCookie(name,
value = value,
domain = Some(domain),
maxAge = Some(2 * 60 * 24 * 30 * 365),
path = Some("/")
)
}
}
object CollectorDirectives extends CollectorDirectives
| NeonsIo/collector | src/main/scala/io/neons/collector/application/akka/http/directive/CollectorDirectives.scala | Scala | mit | 2,152 |
/*
* Copyright (c) 2014, Brook 'redattack34' Heisler
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the ModularRayguns team nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package com.castlebravostudios.rayguns.items.chambers
import com.castlebravostudios.rayguns.api.items.ItemModule
import com.castlebravostudios.rayguns.entities.effects.FortifiedSunlightEffect
import com.castlebravostudios.rayguns.items.emitters.Emitters
import com.castlebravostudios.rayguns.items.misc.Tier2EmptyChamber
import com.castlebravostudios.rayguns.mod.ModularRayguns
object FortifiedSunlightChamber extends BaseChamber {
val moduleKey = "FortifiedSunlightChamber"
val powerModifier = 4.0
val shotEffect = FortifiedSunlightEffect
val nameSegmentKey = "rayguns.FortifiedSunlightChamber.segment"
def createItem() : ItemModule = new ItemChamber( this,
Emitters.fortifiedSunlightEmitter, Tier2EmptyChamber )
.setUnlocalizedName("rayguns.FortifiedSunlightChamber")
.setTextureName("rayguns:chamber_fortified_sunlight")
.setCreativeTab( ModularRayguns.raygunsTab )
.setMaxStackSize(1)
def registerShotHandlers() : Unit = {
registerSingleShotHandlers()
registerScatterShotHandler()
registerChargedShotHandler()
registerPreciseShotHandler()
}
} | Redattack34/ModularRayguns | src/main/scala/com/castlebravostudios/rayguns/items/chambers/FortifiedSunlightChamber.scala | Scala | bsd-3-clause | 2,701 |
package mesosphere.marathon.core.election.impl
import java.util
import java.util.Collections
import akka.actor.ActorSystem
import akka.event.EventStream
import com.codahale.metrics.MetricRegistry
import mesosphere.marathon.MarathonConf
import mesosphere.marathon.core.base.{ CurrentRuntime, ShutdownHooks }
import mesosphere.marathon.metrics.Metrics
import org.apache.curator.framework.api.ACLProvider
import org.apache.curator.framework.recipes.leader.{ LeaderLatch, LeaderLatchListener }
import org.apache.curator.framework.{ AuthInfo, CuratorFramework, CuratorFrameworkFactory }
import org.apache.curator.{ RetryPolicy, RetrySleeper }
import org.apache.zookeeper.data.ACL
import org.apache.zookeeper.{ CreateMode, KeeperException, ZooDefs }
import org.slf4j.LoggerFactory
import scala.util.control.NonFatal
class CuratorElectionService(
config: MarathonConf,
system: ActorSystem,
eventStream: EventStream,
metrics: Metrics = new Metrics(new MetricRegistry),
hostPort: String,
backoff: ExponentialBackoff,
shutdownHooks: ShutdownHooks) extends ElectionServiceBase(
system, eventStream, metrics, backoff, shutdownHooks
) {
private lazy val log = LoggerFactory.getLogger(getClass.getName)
private lazy val client = provideCuratorClient()
private var maybeLatch: Option[LeaderLatch] = None
override def leaderHostPortImpl: Option[String] = synchronized {
try {
maybeLatch.flatMap { l =>
val participant = l.getLeader
if (participant.isLeader) Some(participant.getId) else None
}
} catch {
case NonFatal(e) =>
log.error("error while getting current leader", e)
None
}
}
override def offerLeadershipImpl(): Unit = synchronized {
log.info("Using HA and therefore offering leadership")
maybeLatch match {
case Some(l) =>
log.info("Offering leadership while being candidate")
l.close()
case _ =>
}
maybeLatch = Some(new LeaderLatch(
client, config.zooKeeperLeaderPath + "-curator", hostPort, LeaderLatch.CloseMode.NOTIFY_LEADER
))
maybeLatch.foreach { latch =>
latch.addListener(Listener)
latch.start()
}
}
private object Listener extends LeaderLatchListener {
override def notLeader(): Unit = CuratorElectionService.this.synchronized {
log.info(s"Defeated (LeaderLatchListener Interface). New leader: ${leaderHostPort.getOrElse("-")}")
// remove tombstone for twitter commons
twitterCommonsTombstone.delete(onlyMyself = true)
stopLeadership()
}
override def isLeader(): Unit = CuratorElectionService.this.synchronized {
log.info("Elected (LeaderLatchListener Interface)")
startLeadership(error => CuratorElectionService.this.synchronized {
maybeLatch match {
case None => log.error("Abdicating leadership while not being leader")
case Some(l) =>
maybeLatch = None
l.close()
}
// stopLeadership() is called in notLeader
})
// write a tombstone into the old twitter commons leadership election path which always
// wins the selection. Check that startLeadership was successful and didn't abdicate.
if (CuratorElectionService.this.isLeader) {
twitterCommonsTombstone.create()
}
}
}
private def provideCuratorClient(): CuratorFramework = {
log.info(s"Will do leader election through ${config.zkHosts}")
// let the world read the leadership information as some setups depend on that to find Marathon
val acl = new util.ArrayList[ACL]()
acl.addAll(config.zkDefaultCreationACL)
acl.addAll(ZooDefs.Ids.READ_ACL_UNSAFE)
val builder = CuratorFrameworkFactory.builder().
connectString(config.zkHosts).
sessionTimeoutMs(config.zooKeeperSessionTimeout().toInt).
aclProvider(new ACLProvider {
val rootAcl = {
val acls = new util.ArrayList[ACL]()
acls.addAll(acls)
acls.addAll(ZooDefs.Ids.OPEN_ACL_UNSAFE)
acls
}
override def getDefaultAcl: util.List[ACL] = acl
override def getAclForPath(path: String): util.List[ACL] = if (path != config.zkPath) {
acl
} else {
rootAcl
}
}).
retryPolicy(new RetryPolicy {
override def allowRetry(retryCount: Int, elapsedTimeMs: Long, sleeper: RetrySleeper): Boolean = {
log.error("ZooKeeper access failed - Committing suicide to avoid invalidating ZooKeeper state")
CurrentRuntime.asyncExit()(scala.concurrent.ExecutionContext.global)
false
}
})
// optionally authenticate
val client = (config.zkUsername, config.zkPassword) match {
case (Some(user), Some(pass)) =>
builder.authorization(Collections.singletonList(
new AuthInfo("digest", (user + ":" + pass).getBytes("UTF-8"))
)).build()
case _ =>
builder.build()
}
client.start()
client.getZookeeperClient.blockUntilConnectedOrTimedOut()
client
}
private object twitterCommonsTombstone {
def memberPath(member: String): String = {
config.zooKeeperLeaderPath.stripSuffix("/") + "/" + member
}
// - precedes 0-9 in ASCII and hence this instance overrules other candidates
lazy val memberName = "member_-00000000"
lazy val path = memberPath(memberName)
var fallbackCreated = false
def create(): Unit = {
try {
delete(onlyMyself = false)
client.createContainers(config.zooKeeperLeaderPath)
// Create a ephemeral node which is not removed when loosing leadership. This is necessary to avoid a
// race of old Marathon instances which think that they can become leader in the moment
// the new instances failover and no tombstone is existing (yet).
if (!fallbackCreated) {
client.create().
creatingParentsIfNeeded().
withMode(CreateMode.EPHEMERAL_SEQUENTIAL).
forPath(memberPath("member_-1"), hostPort.getBytes("UTF-8"))
fallbackCreated = true
}
log.info("Creating tombstone for old twitter commons leader election")
client.create().
creatingParentsIfNeeded().
withMode(CreateMode.EPHEMERAL).
forPath(path, hostPort.getBytes("UTF-8"))
} catch {
case NonFatal(e) =>
log.error(s"Exception while creating tombstone for twitter commons leader election: ${e.getMessage}")
abdicateLeadership(error = true)
}
}
@SuppressWarnings(Array("SwallowedException"))
def delete(onlyMyself: Boolean = false): Unit = {
Option(client.checkExists().forPath(path)).foreach { tombstone =>
try {
if (!onlyMyself ||
new String(client.getData.forPath(memberPath(memberName))) == hostPort) {
log.info("Deleting existing tombstone for old twitter commons leader election")
client.delete().guaranteed().withVersion(tombstone.getVersion).forPath(path)
}
} catch {
case _: KeeperException.NoNodeException =>
case _: KeeperException.BadVersionException =>
}
}
}
}
}
| timcharper/marathon | src/main/scala/mesosphere/marathon/core/election/impl/CuratorElectionService.scala | Scala | apache-2.0 | 7,205 |
package org.jetbrains.plugins.scala.codeInspection.methodSignature
import com.intellij.codeInspection._
import org.intellij.lang.annotations.Language
import org.jetbrains.plugins.scala.lang.psi.api.statements.ScFunction
import quickfix.AddEmptyParentheses
/**
* Pavel Fatin
*/
class UnitMethodIsParameterlessInspection extends AbstractMethodSignatureInspection(
"ScalaUnitMethodIsParameterless", "Method with Unit result type is parameterless") {
def actionFor(holder: ProblemsHolder) = {
case f: ScFunction if f.isParameterless && f.hasUnitResultType && f.superMethods.isEmpty =>
holder.registerProblem(f.nameId, getDisplayName, new AddEmptyParentheses(f))
}
} | consulo/consulo-scala | src/org/jetbrains/plugins/scala/codeInspection/methodSignature/UnitMethodIsParameterlessInspection.scala | Scala | apache-2.0 | 683 |
object Typer {
class 新月
class 上弦
class 渐盈
class 满月
type Yue = YQImpl[YueyuanZero#Add[满月]#Add[渐盈]#Add[上弦]#Add[新月], YueyuanZero]
val a1: 新月 = (throw new Exception("ii")): Yue#Current
val a2: 新月 = (throw new Exception("ii")): Yue#Next#Current
val a3: 上弦 = (throw new Exception("ii")): Yue#Next#Next#Current
val a4: 渐盈 = (throw new Exception("ii")): Yue#Next#Next#Next#Current
val a5: 满月 = (throw new Exception("ii")): Yue#Next#Next#Next#Next#Current
val a6: 满月 = (throw new Exception("ii")): Yue#Next#Next#Next#Next#Next#Current
val a7: 渐盈 = (throw new Exception("ii")): Yue#Next#Next#Next#Next#Next#Next#Current
val a8: 上弦 = (throw new Exception("ii")): Yue#Next#Next#Next#Next#Next#Next#Next#Current
val a9: 新月 = (throw new Exception("ii")): Yue#Next#Next#Next#Next#Next#Next#Next#Next#Current
type Yue10 = Yue#Next#Next#Next#Next#Next#Next#Next#Next#Next
val a10: 新月 = (throw new Exception("ii")): Yue10#Current
val a11: 上弦 = (throw new Exception("ii")): Yue10#Next#Current
val a12: 渐盈 = (throw new Exception("ii")): Yue10#Next#Next#Current
val a13: 满月 = (throw new Exception("ii")): Yue10#Next#Next#Next#Current
val a14: 满月 = (throw new Exception("ii")): Yue10#Next#Next#Next#Next#Current
val a15: 渐盈 = (throw new Exception("ii")): Yue10#Next#Next#Next#Next#Next#Current
val a16: 上弦 = (throw new Exception("ii")): Yue10#Next#Next#Next#Next#Next#Next#Current
}
| djx314/ubw | a20-月盈月亏/src/main/scala/a20/step1_20190801/Typer.scala | Scala | bsd-3-clause | 1,507 |
package controllers
import javax.inject._
import model.PlaceService
import shared.{Location, Place, SharedMessages}
import play.api.mvc._
import play.api.libs.json._
import play.api.libs.functional.syntax._
@Singleton
class Application @Inject()(cc: ControllerComponents) extends AbstractController(cc) {
def index = Action {
Ok(views.html.index(SharedMessages.itWorks))
}
implicit val locationWrites: Writes[Location] = (
(JsPath \\ "lat").write[Double] and
(JsPath \\ "long").write[Double]
) (unlift(Location.unapply))
implicit val placeWrites: Writes[Place] = (
(JsPath \\ "name").write[String] and
(JsPath \\ "location").write[Location]
) (unlift(Place.unapply))
def listPlaces = Action {
val json = Json.toJson(PlaceService.list)
Ok(json)
}
} | apantin/scala.js.test | server/app/controllers/Application.scala | Scala | apache-2.0 | 863 |
package sangria.renderer
import sangria.execution.ValueCoercionHelper
import sangria.introspection._
import sangria.marshalling.{InputUnmarshaller, ToInput}
import sangria.schema._
import sangria.ast
import sangria.ast.{AstNode, AstVisitor}
import sangria.introspection.__DirectiveLocation
import sangria.parser.QueryParser
import sangria.visitor.VisitorCommand
object SchemaRenderer {
def renderTypeName(tpe: Type, topLevel: Boolean = false) = {
def loop(t: Type, suffix: String): String = t match {
case OptionType(ofType) => loop(ofType, "")
case OptionInputType(ofType) => loop(ofType, "")
case ListType(ofType) => s"[${loop(ofType, "!")}]" + suffix
case ListInputType(ofType) => s"[${loop(ofType, "!")}]" + suffix
case named: Named => named.name + suffix
}
loop(tpe, if (topLevel) "" else "!")
}
def renderTypeNameAst(tpe: Type, topLevel: Boolean = false): ast.Type = {
def nn(tpe: ast.Type, notNull: Boolean) =
if (notNull) ast.NotNullType(tpe)
else tpe
def loop(t: Type, notNull: Boolean): ast.Type = t match {
case OptionType(ofType) => loop(ofType, false)
case OptionInputType(ofType) => loop(ofType, false)
case ListType(ofType) => nn(ast.ListType(loop(ofType, true)), notNull)
case ListInputType(ofType) => nn(ast.ListType(loop(ofType, true)), notNull)
case named: Named => nn(ast.NamedType(named.name), notNull)
}
loop(tpe, !topLevel)
}
def renderDescription(description: Option[String]): Option[ast.StringValue] =
description.flatMap { d =>
if (d.trim.nonEmpty) Some(ast.StringValue(d, block = d.indexOf('\\n') > 0))
else None
}
def renderImplementedInterfaces(tpe: IntrospectionObjectType) =
tpe.interfaces.map(t => ast.NamedType(t.name)).toVector
def renderImplementedInterfaces(tpe: ObjectLikeType[_, _]) =
tpe.allInterfaces.map(t => ast.NamedType(t.name))
def renderTypeName(tpe: IntrospectionTypeRef): ast.Type =
tpe match {
case IntrospectionListTypeRef(ofType) => ast.ListType(renderTypeName(ofType))
case IntrospectionNonNullTypeRef(ofType) => ast.NotNullType(renderTypeName(ofType))
case IntrospectionNamedTypeRef(_, name) => ast.NamedType(name)
}
def renderDefault(defaultValue: Option[String]) =
defaultValue.flatMap(d => QueryParser.parseInput(d).toOption)
def renderDefault(value: (Any, ToInput[_, _]), tpe: InputType[_]) = {
val coercionHelper = new ValueCoercionHelper[Any]
DefaultValueRenderer.renderInputValue(value, tpe, coercionHelper)
}
def renderArg(arg: IntrospectionInputValue) =
ast.InputValueDefinition(
arg.name,
renderTypeName(arg.tpe),
renderDefault(arg.defaultValue),
description = renderDescription(arg.description))
def renderArg(arg: Argument[_]) =
ast.InputValueDefinition(
arg.name,
renderTypeNameAst(arg.argumentType),
arg.defaultValue.flatMap(renderDefault(_, arg.argumentType)),
arg.astDirectives,
renderDescription(arg.description)
)
def withoutDeprecated(dirs: Vector[ast.Directive]) = dirs.filterNot(_.name == "deprecated")
def renderDeprecation(isDeprecated: Boolean, reason: Option[String]) =
(isDeprecated, reason) match {
case (true, Some(r)) if r.trim == DefaultDeprecationReason =>
Vector(ast.Directive("deprecated", Vector.empty))
case (true, Some(r)) if r.trim.nonEmpty =>
Vector(ast.Directive("deprecated", Vector(ast.Argument("reason", ast.StringValue(r.trim)))))
case (true, _) => Vector(ast.Directive("deprecated", Vector.empty))
case _ => Vector.empty
}
def renderArgsI(args: Seq[IntrospectionInputValue]) =
args.map(renderArg).toVector
def renderArgs(args: Seq[Argument[_]]) =
args.map(renderArg).toVector
def renderFieldsI(fields: Seq[IntrospectionField]) =
fields.map(renderField).toVector
def renderFields(fields: Seq[Field[_, _]]) =
fields.map(renderField).toVector
def renderInputFieldsI(fields: Seq[IntrospectionInputValue]) =
fields.map(renderInputField).toVector
def renderInputFields(fields: Seq[InputField[_]]) =
fields.map(renderInputField).toVector
def renderField(field: IntrospectionField) =
ast.FieldDefinition(
field.name,
renderTypeName(field.tpe),
renderArgsI(field.args),
renderDeprecation(field.isDeprecated, field.deprecationReason),
renderDescription(field.description)
)
def renderField(field: Field[_, _]) =
ast.FieldDefinition(
field.name,
renderTypeNameAst(field.fieldType),
renderArgs(field.arguments),
withoutDeprecated(field.astDirectives) ++ renderDeprecation(
field.deprecationReason.isDefined,
field.deprecationReason),
renderDescription(field.description)
)
def renderInputField(field: IntrospectionInputValue) =
ast.InputValueDefinition(
field.name,
renderTypeName(field.tpe),
renderDefault(field.defaultValue),
description = renderDescription(field.description))
def renderInputField(field: InputField[_]) =
ast.InputValueDefinition(
field.name,
renderTypeNameAst(field.fieldType),
field.defaultValue.flatMap(renderDefault(_, field.fieldType)),
field.astDirectives,
renderDescription(field.description)
)
def renderObject(tpe: IntrospectionObjectType) =
ast.ObjectTypeDefinition(
tpe.name,
renderImplementedInterfaces(tpe),
renderFieldsI(tpe.fields),
description = renderDescription(tpe.description))
def renderObject(tpe: ObjectType[_, _]) =
ast.ObjectTypeDefinition(
tpe.name,
renderImplementedInterfaces(tpe),
renderFields(tpe.uniqueFields),
tpe.astDirectives,
renderDescription(tpe.description))
def renderEnum(tpe: IntrospectionEnumType) =
ast.EnumTypeDefinition(
tpe.name,
renderEnumValuesI(tpe.enumValues),
description = renderDescription(tpe.description))
def renderEnum(tpe: EnumType[_]) =
ast.EnumTypeDefinition(
tpe.name,
renderEnumValues(tpe.values),
tpe.astDirectives,
renderDescription(tpe.description))
def renderEnumValuesI(values: Seq[IntrospectionEnumValue]) =
values
.map(v =>
ast.EnumValueDefinition(
v.name,
renderDeprecation(v.isDeprecated, v.deprecationReason),
renderDescription(v.description)))
.toVector
def renderEnumValues(values: Seq[EnumValue[_]]) =
values.map(renderEnumValue).toVector
def renderEnumValue(v: EnumValue[_]) =
ast.EnumValueDefinition(
v.name,
withoutDeprecated(v.astDirectives) ++ renderDeprecation(
v.deprecationReason.isDefined,
v.deprecationReason),
renderDescription(v.description))
def renderScalar(tpe: IntrospectionScalarType) =
ast.ScalarTypeDefinition(tpe.name, description = renderDescription(tpe.description))
def renderScalar(tpe: ScalarType[_]) =
ast.ScalarTypeDefinition(tpe.name, tpe.astDirectives, renderDescription(tpe.description))
def renderInputObject(tpe: IntrospectionInputObjectType) =
ast.InputObjectTypeDefinition(
tpe.name,
renderInputFieldsI(tpe.inputFields),
description = renderDescription(tpe.description))
def renderInputObject(tpe: InputObjectType[_]) =
ast.InputObjectTypeDefinition(
tpe.name,
renderInputFields(tpe.fields),
tpe.astDirectives,
renderDescription(tpe.description))
def renderInterface(tpe: IntrospectionInterfaceType) =
ast.InterfaceTypeDefinition(
tpe.name,
renderFieldsI(tpe.fields),
description = renderDescription(tpe.description))
def renderInterface(tpe: InterfaceType[_, _]) =
ast.InterfaceTypeDefinition(
tpe.name,
renderFields(tpe.uniqueFields),
tpe.astDirectives,
renderDescription(tpe.description))
def renderUnion(tpe: IntrospectionUnionType) =
ast.UnionTypeDefinition(
tpe.name,
tpe.possibleTypes.map(t => ast.NamedType(t.name)).toVector,
description = renderDescription(tpe.description))
def renderUnion(tpe: UnionType[_]) =
ast.UnionTypeDefinition(
tpe.name,
tpe.types.map(t => ast.NamedType(t.name)).toVector,
tpe.astDirectives,
renderDescription(tpe.description))
private def renderSchemaDefinition(schema: IntrospectionSchema): Option[ast.SchemaDefinition] =
if (isSchemaOfCommonNames(
schema.queryType.name,
schema.mutationType.map(_.name),
schema.subscriptionType.map(_.name)))
None
else {
val withQuery = Vector(
ast.OperationTypeDefinition(ast.OperationType.Query, ast.NamedType(schema.queryType.name)))
val withMutation = schema.mutationType.fold(withQuery)(t =>
withQuery :+ ast.OperationTypeDefinition(ast.OperationType.Mutation, ast.NamedType(t.name)))
val withSubs = schema.subscriptionType.fold(withMutation)(t =>
withMutation :+ ast.OperationTypeDefinition(
ast.OperationType.Subscription,
ast.NamedType(t.name)))
Some(ast.SchemaDefinition(withSubs, description = renderDescription(schema.description)))
}
private def renderSchemaDefinition(schema: Schema[_, _]): Option[ast.SchemaDefinition] =
if (isSchemaOfCommonNames(
schema.query.name,
schema.mutation.map(_.name),
schema.subscription.map(
_.name)) && schema.description.isEmpty && schema.astDirectives.isEmpty)
None
else {
val withQuery = Vector(
ast.OperationTypeDefinition(ast.OperationType.Query, ast.NamedType(schema.query.name)))
val withMutation = schema.mutation.fold(withQuery)(t =>
withQuery :+ ast.OperationTypeDefinition(ast.OperationType.Mutation, ast.NamedType(t.name)))
val withSubs = schema.subscription.fold(withMutation)(t =>
withMutation :+ ast.OperationTypeDefinition(
ast.OperationType.Subscription,
ast.NamedType(t.name)))
Some(
ast.SchemaDefinition(withSubs, schema.astDirectives, renderDescription(schema.description)))
}
private def isSchemaOfCommonNames(
query: String,
mutation: Option[String],
subscription: Option[String]) =
query == "Query" && mutation.fold(true)(_ == "Mutation") && subscription.fold(true)(
_ == "Subscription")
def renderType(tpe: IntrospectionType): ast.TypeDefinition =
tpe match {
case o: IntrospectionObjectType => renderObject(o)
case u: IntrospectionUnionType => renderUnion(u)
case i: IntrospectionInterfaceType => renderInterface(i)
case io: IntrospectionInputObjectType => renderInputObject(io)
case s: IntrospectionScalarType => renderScalar(s)
case e: IntrospectionEnumType => renderEnum(e)
case kind => throw new IllegalArgumentException(s"Unsupported kind: $kind")
}
def renderType(tpe: Type with Named): ast.TypeDefinition =
tpe match {
case o: ObjectType[_, _] => renderObject(o)
case u: UnionType[_] => renderUnion(u)
case i: InterfaceType[_, _] => renderInterface(i)
case io: InputObjectType[_] => renderInputObject(io)
case s: ScalarType[_] => renderScalar(s)
case s: ScalarAlias[_, _] => renderScalar(s.aliasFor)
case e: EnumType[_] => renderEnum(e)
case _ => throw new IllegalArgumentException(s"Unsupported type: $tpe")
}
def renderDirectiveLocation(loc: DirectiveLocation.Value) =
ast.DirectiveLocation(__DirectiveLocation.byValue(loc).name)
def renderDirective(dir: Directive) =
ast.DirectiveDefinition(
dir.name,
renderArgs(dir.arguments),
dir.locations.toVector.map(renderDirectiveLocation).sortBy(_.name),
renderDescription(dir.description))
def renderDirective(dir: IntrospectionDirective) =
ast.DirectiveDefinition(
dir.name,
renderArgsI(dir.args),
dir.locations.toVector.map(renderDirectiveLocation).sortBy(_.name),
renderDescription(dir.description))
def schemaAstFromIntrospection(
introspectionSchema: IntrospectionSchema,
filter: SchemaFilter = SchemaFilter.default): ast.Document = {
val schemaDef = if (filter.renderSchema) renderSchemaDefinition(introspectionSchema) else None
val types = introspectionSchema.types
.filter(t => filter.filterTypes(t.name))
.sortBy(_.name)
.map(renderType)
val directives = introspectionSchema.directives
.filter(d => filter.filterDirectives(d.name))
.sortBy(_.name)
.map(renderDirective)
ast.Document(schemaDef.toVector ++ types ++ directives)
}
def renderSchema(introspectionSchema: IntrospectionSchema): String =
schemaAstFromIntrospection(introspectionSchema, SchemaFilter.default).renderPretty
def renderSchema[T: InputUnmarshaller](introspectionResult: T): String = {
import sangria.parser.DeliveryScheme.Throw
schemaAstFromIntrospection(
IntrospectionParser.parse(introspectionResult),
SchemaFilter.default).renderPretty
}
def renderSchema(introspectionSchema: IntrospectionSchema, filter: SchemaFilter): String =
schemaAstFromIntrospection(introspectionSchema, filter).renderPretty
def renderSchema[T: InputUnmarshaller](introspectionResult: T, filter: SchemaFilter): String = {
import sangria.parser.DeliveryScheme.Throw
schemaAstFromIntrospection(IntrospectionParser.parse(introspectionResult), filter).renderPretty
}
def schemaAst(schema: Schema[_, _], filter: SchemaFilter = SchemaFilter.default): ast.Document = {
val schemaDef = if (filter.renderSchema) renderSchemaDefinition(schema) else None
val types =
schema.typeList.filter(t => filter.filterTypes(t.name)).sortBy(_.name).map(renderType)
val directives = schema.directives
.filter(d => filter.filterDirectives(d.name))
.sortBy(_.name)
.map(renderDirective)
val document = ast.Document(schemaDef.toVector ++ types ++ directives)
if (filter.legacyCommentDescriptions) transformLegacyCommentDescriptions(document)
else document
}
def transformLegacyCommentDescriptions[T <: AstNode](node: T): T =
AstVisitor.visit(
node,
AstVisitor {
case n: ast.DirectiveDefinition if n.description.isDefined =>
VisitorCommand.Transform(
n.copy(description = None, comments = n.comments ++ commentDescription(n)))
case n: ast.InterfaceTypeDefinition if n.description.isDefined =>
VisitorCommand.Transform(
n.copy(description = None, comments = n.comments ++ commentDescription(n)))
case n: ast.EnumTypeDefinition if n.description.isDefined =>
VisitorCommand.Transform(
n.copy(description = None, comments = n.comments ++ commentDescription(n)))
case n: ast.EnumValueDefinition if n.description.isDefined =>
VisitorCommand.Transform(
n.copy(description = None, comments = n.comments ++ commentDescription(n)))
case n: ast.FieldDefinition if n.description.isDefined =>
VisitorCommand.Transform(
n.copy(description = None, comments = n.comments ++ commentDescription(n)))
case n: ast.InputObjectTypeDefinition if n.description.isDefined =>
VisitorCommand.Transform(
n.copy(description = None, comments = n.comments ++ commentDescription(n)))
case n: ast.InputValueDefinition if n.description.isDefined =>
VisitorCommand.Transform(
n.copy(description = None, comments = n.comments ++ commentDescription(n)))
case n: ast.ObjectTypeDefinition if n.description.isDefined =>
VisitorCommand.Transform(
n.copy(description = None, comments = n.comments ++ commentDescription(n)))
case n: ast.ScalarTypeDefinition if n.description.isDefined =>
VisitorCommand.Transform(
n.copy(description = None, comments = n.comments ++ commentDescription(n)))
case n: ast.UnionTypeDefinition if n.description.isDefined =>
VisitorCommand.Transform(
n.copy(description = None, comments = n.comments ++ commentDescription(n)))
}
)
private def commentDescription(node: ast.WithDescription) =
node.description.toVector.flatMap(sv => sv.value.split("\\\\r?\\\\n").toVector.map(ast.Comment(_)))
def renderSchema(schema: Schema[_, _]): String =
schemaAst(schema, SchemaFilter.default).renderPretty
def renderSchema(schema: Schema[_, _], filter: SchemaFilter): String =
schemaAst(schema, filter).renderPretty
}
case class SchemaFilter(
filterTypes: String => Boolean,
filterDirectives: String => Boolean,
renderSchema: Boolean = true,
legacyCommentDescriptions: Boolean = false) {
@deprecated("Please migrate to new string-based description format", "1.4.0")
def withLegacyCommentDescriptions = copy(legacyCommentDescriptions = true)
}
object SchemaFilter {
val withoutSangriaBuiltIn: SchemaFilter = SchemaFilter(
typeName => !Schema.isBuiltInType(typeName),
dirName => !Schema.isBuiltInDirective(dirName))
val default: SchemaFilter = withoutSangriaBuiltIn
val withoutGraphQLBuiltIn = SchemaFilter(
typeName => !Schema.isBuiltInGraphQLType(typeName),
dirName => !Schema.isBuiltInDirective(dirName))
val withoutIntrospection: SchemaFilter =
SchemaFilter(typeName => !Schema.isIntrospectionType(typeName), Function.const(true))
val builtIn: SchemaFilter = SchemaFilter(
typeName => Schema.isBuiltInType(typeName),
dirName => Schema.isBuiltInDirective(dirName))
val introspection: SchemaFilter = SchemaFilter(
typeName => Schema.isIntrospectionType(typeName),
Function.const(false),
renderSchema = false)
val all: SchemaFilter = SchemaFilter(Function.const(true), Function.const(true))
}
| OlegIlyenko/sangria | modules/core/src/main/scala/sangria/renderer/SchemaRenderer.scala | Scala | apache-2.0 | 17,812 |
package mesosphere.util
import akka.testkit.{ TestActorRef, TestKit }
import akka.actor.{ Status, Props, ActorSystem }
import mesosphere.marathon.MarathonSpec
import org.scalatest.{ Matchers, BeforeAndAfterAll }
import scala.concurrent.{ Await, Promise }
import scala.concurrent.duration._
class PromiseActorTest
extends TestKit(ActorSystem("System"))
with MarathonSpec
with BeforeAndAfterAll
with Matchers {
override def afterAll(): Unit = {
super.afterAll()
system.shutdown()
}
test("Success") {
val promise = Promise[Any]()
val ref = TestActorRef(Props(classOf[PromiseActor], promise))
ref ! 'Test
Await.result(promise.future, 2.seconds) should equal('Test)
}
test("Status.Success") {
val promise = Promise[Any]()
val ref = TestActorRef(Props(classOf[PromiseActor], promise))
ref ! Status.Success('Test)
Await.result(promise.future, 2.seconds) should equal('Test)
}
test("Status.Failure") {
val promise = Promise[Any]()
val ref = TestActorRef(Props(classOf[PromiseActor], promise))
val ex = new Exception("test")
ref ! Status.Failure(ex)
intercept[Exception] {
Await.result(promise.future, 2.seconds)
}.getMessage should be("test")
}
}
| tnachen/marathon | src/test/scala/mesosphere/util/PromiseActorTest.scala | Scala | apache-2.0 | 1,253 |
package org.globalnames
package resolver
package model
import scalaz._
import Scalaz._
import db.{NameString, DataSource, NameStringIndex, VernacularString, VernacularStringIndex}
import parser.ScientificNameParser.{Result => SNResult}
case class Match(nameString: NameString, dataSource: DataSource, nameStringIndex: NameStringIndex,
vernacularStrings: Seq[(VernacularString, VernacularStringIndex)],
nameType: Option[Int], matchType: MatchType = MatchType.Unknown) {
val synonym: Boolean = {
val classificationPathIdsSeq =
nameStringIndex.classificationPathIds.map { cpids => cpids.split('|').toList }
.getOrElse(List())
if (classificationPathIdsSeq.nonEmpty) {
nameStringIndex.taxonId != classificationPathIdsSeq.last
} else if (nameStringIndex.acceptedTaxonId.isDefined) {
nameStringIndex.taxonId != nameStringIndex.acceptedTaxonId.get
} else true
}
}
case class Matches(total: Long, matches: Seq[Match],
suppliedInput: Option[String] = None,
private val suppliedIdProvided: Option[SuppliedId] = None,
scientificName: Option[SNResult] = None) {
val suppliedId: Option[SuppliedId] = suppliedIdProvided.map { _.trim }
}
object Matches {
def empty: Matches = Matches(0, Seq(), None)
def empty(suppliedInput: String, suppliedId: Option[SuppliedId] = None): Matches =
Matches(0, Seq(), suppliedInput.some, suppliedIdProvided = suppliedId)
}
| GlobalNamesArchitecture/gnresolver | resolver/src/main/scala/org/globalnames/resolver/model/Match.scala | Scala | mit | 1,493 |
package com.catinthedark.yoba.units
import com.badlogic.gdx.{Gdx, Input, InputAdapter}
import com.catinthedark.lib.{Deferred, Pipe, SimpleUnit}
import com.catinthedark.yoba.common.Const
import com.catinthedark.yoba.common.Const.Pedals
import com.catinthedark.yoba.{Assets, Shared}
/**
* Created by over on 22.01.15.
*/
abstract class Control(shared: Shared) extends SimpleUnit with Deferred {
val onPedaled = new Pipe[(Int)]
override def onActivate(): Unit = {
Gdx.input.setInputProcessor(new InputAdapter {
override def keyDown(keycode: Int): Boolean = {
keycode match {
case Const.Pedals.leftPedalKey =>
onPedaled(Pedals.leftPedalKey)
case Const.Pedals.rightPedalKey =>
onPedaled(Pedals.rightPedalKey)
case _ =>
}
true
}
override def keyUp(keycode: Int): Boolean = {
keycode match {
case Input.Keys.LEFT =>
leftAllowed = true
case Input.Keys.RIGHT =>
rightAllowed = true
case _ =>
}
true
}
override def touchDown(screenX: Int, screenY: Int, pointer: Int, button: Int): Boolean = {
println(s"mouse click-> (btn: $button, x: $screenX, y: $screenY")
true
}
})
}
override def onExit(): Unit = {
Gdx.input.setInputProcessor(null)
}
var leftAllowed = true
var rightAllowed = true
override def run(delta: Float) = {
if (!shared.isFalling) {
if (rightAllowed && Gdx.input.isKeyPressed(Input.Keys.RIGHT)) {
if (shared.speed != 0) {
shared.playerX -= Const.Physics.playerXSpeed
} else {
shared.playerX -= Const.Physics.playerXSpeed / 5
}
if (shared.playerX < Const.Physics.roadRightBorderX) {
rightAllowed = false
Assets.Audios.border.play(Const.soundVolume)
shared.speed /= 2
shared.playerX += Const.Physics.playerBorderTeleportationX
}
}
if (leftAllowed && Gdx.input.isKeyPressed(Input.Keys.LEFT)) {
if (shared.speed != 0) {
shared.playerX += Const.Physics.playerXSpeed
} else {
shared.playerX += Const.Physics.playerXSpeed / 5
}
if (shared.playerX > Const.Physics.roadLeftBorderX) {
leftAllowed = false
Assets.Audios.border.play(Const.soundVolume)
shared.speed /= 2
shared.playerX -= Const.Physics.playerBorderTeleportationX
}
}
}
if (Gdx.input.isKeyPressed(Input.Keys.L)) {
Assets.Audios.bgm.setVolume(Assets.Audios.bgm.getVolume + 0.01f)
}
}
}
| cat-in-the-dark/old48_33_game | src/main/scala/com/catinthedark/yoba/units/Control.scala | Scala | mit | 2,642 |
package model
import spray.json.{DefaultJsonProtocol, DeserializationException, JsObject, JsValue, RootJsonFormat, _}
case class PokemonStats(stats: List[Stat])
object PokemonStats extends DefaultJsonProtocol {
implicit object PokemonDetailsJsonFormat extends RootJsonFormat[PokemonStats] {
override def write(p: PokemonStats) = JsObject(
"stats" -> p.stats.toJson
)
override def read(value: JsValue) = {
value.asJsObject.getFields("stats") match {
case Seq(stats) =>
PokemonStats(
stats.convertTo[List[Stat]]
)
case _ =>
throw DeserializationException("Pokemon stats expected")
}
}
}
}
| mdulac/pokemon | src/main/scala/model/PokemonStats.scala | Scala | mit | 689 |
/**
* Copyright (C) 2016 DANS - Data Archiving and Networked Services (info@dans.knaw.nl)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package nl.knaw.dans.easy.multideposit
import java.nio.file.{ Path, Paths }
import java.util.UUID
import better.files.File
import better.files.File.currentWorkingDirectory
import cats.data.NonEmptyList
import cats.scalatest.{ EitherMatchers, EitherValues, ValidatedValues }
import nl.knaw.dans.common.lang.dataset.AccessCategory
import nl.knaw.dans.easy.multideposit.PathExplorer.{ InputPathExplorer, OutputPathExplorer, StagingPathExplorer }
import nl.knaw.dans.easy.multideposit.model._
import org.apache.commons.configuration.PropertiesConfiguration
import org.joda.time.DateTime
import org.scalatest.enablers.Existence
import org.scalatest.flatspec.AnyFlatSpec
import org.scalatest.matchers.should.Matchers
import org.scalatest.{ Inside, OptionValues }
import scala.collection.JavaConverters._
trait TestSupportFixture extends AnyFlatSpec with Matchers with OptionValues with EitherMatchers with EitherValues with ValidatedValues with Inside with InputPathExplorer with StagingPathExplorer with OutputPathExplorer {
implicit def existenceOfFile[FILE <: better.files.File]: Existence[FILE] = _.exists
lazy val testDir: File = {
val path = currentWorkingDirectory / s"target/test/${ getClass.getSimpleName }"
if (path.exists) path.delete()
path.createDirectories()
path
}
override val multiDepositDir: File = testDir / "md"
override val stagingDir: File = testDir / "sd"
override val outputDepositDir: File = testDir / "od"
override val reportFile: File = testDir / "report.csv"
implicit val inputPathExplorer: InputPathExplorer = this
implicit val stagingPathExplorer: StagingPathExplorer = this
implicit val outputPathExplorer: OutputPathExplorer = this
val licensesDir: Path = Paths.get("target/easy-licenses/licenses")
val userLicenses: Set[MimeType] = new PropertiesConfiguration(licensesDir.resolve("licenses.properties").toFile)
.getKeys.asScala.filterNot(_.isEmpty).toSet
def testInstructions1: Instructions = {
Instructions(
depositId = "ruimtereis01",
row = 2,
depositorUserId = "ruimtereiziger1",
profile = Profile(
titles = NonEmptyList.of("Reis naar Centaur-planetoïde", "Trip to Centaur asteroid"),
descriptions = NonEmptyList.of("Een tweedaagse reis per ruimteschip naar een bijzondere planetoïde in de omgeving van Jupiter.", "A two day mission to boldly go where no man has gone before"),
creators = NonEmptyList.of(
CreatorPerson(
titles = Some("Captain"),
initials = "J.T.",
surname = "Kirk",
organization = Some("United Federation of Planets")
)
),
created = DateTime.parse("2015-05-19"),
audiences = NonEmptyList.of("D30000"),
accessright = AccessCategory.OPEN_ACCESS
),
baseUUID = Option(UUID.fromString("1de3f841-0f0d-048b-b3db-4b03ad4834d7")),
metadata = Metadata(
formats = List("video/mpeg", "text/plain"),
languages = List("NL", "encoding=UTF-8"),
subjects = List(Subject("astronomie"), Subject("ruimtevaart"), Subject("planetoïden")),
rightsholder = NonEmptyList.one("Mr. Anderson"),
),
files = Map(
testDir / "md/ruimtereis01/reisverslag/centaur.mpg" -> FileDescriptor(2, Option("flyby of centaur")),
testDir / "md/ruimtereis01/path/to/a/random/video/hubble.mpg" -> FileDescriptor(3, Option("video about the hubble space telescope")),
),
audioVideo = AudioVideo(
springfield = Option(Springfield("dans", "janvanmansum", "Jans-test-files", PlayMode.Menu)),
avFiles = Map(
testDir / "md/ruimtereis01/reisverslag/centaur.mpg" -> Set(
SubtitlesFile(testDir / "md/ruimtereis01/reisverslag/centaur.srt", Option("en")),
SubtitlesFile(testDir / "md/ruimtereis01/reisverslag/centaur-nederlands.srt", Option("nl"))
)
)
)
)
}
def testInstructions2: Instructions = {
Instructions(
depositId = "deposit-2",
row = 5,
depositorUserId = "ruimtereiziger2",
profile = Profile(
titles = NonEmptyList.of("Title 1 of deposit 2", "Title 2 of deposit 2"),
descriptions = NonEmptyList.of("A sample deposit with a not very long description"),
creators = NonEmptyList.of(CreatorOrganization("Creator A")),
created = DateTime.now(),
available = DateTime.parse("2016-07-30"),
audiences = NonEmptyList.of("D37000"),
accessright = AccessCategory.REQUEST_PERMISSION
),
baseUUID = Option(UUID.fromString("1de3f841-0f0d-048b-b3db-4b03ad4834d7")),
metadata = Metadata(
contributors = List(ContributorOrganization("Contributor 1"), ContributorOrganization("Contributor 2")),
subjects = List(Subject("subject 1", Option("abr:ABRcomplex")), Subject("subject 2"), Subject("subject 3")),
publishers = List("publisher 1"),
types = NonEmptyList.of(DcType.STILLIMAGE),
identifiers = List(Identifier("10.17026/abcdef12345")),
rightsholder = NonEmptyList.of("Neo"),
),
files = Map(
testDir / "md/ruimtereis02/path/to/images/Hubble_01.jpg" -> FileDescriptor(5, Some("Hubble"), Some(FileAccessRights.RESTRICTED_REQUEST))
)
)
}
}
| DANS-KNAW/easy-split-multi-deposit | src/test/scala/nl.knaw.dans.easy.multideposit/TestSupportFixture.scala | Scala | apache-2.0 | 5,928 |
package com.twitter.zipkin.receiver.kafka
import com.twitter.util.{Await, Future}
import com.twitter.scrooge.BinaryThriftStructSerializer
import com.twitter.zipkin.common.{Annotation, BinaryAnnotation, Endpoint, Span}
import com.twitter.zipkin.conversions.thrift.{thriftSpanToSpan, spanToThriftSpan}
import com.twitter.zipkin.gen
import com.twitter.zipkin.receiver.test.kafka.{TestUtils, EmbeddedZookeeper}
import kafka.server.KafkaServer
import kafka.message.Message
import kafka.producer.{Producer, ProducerConfig, ProducerData}
import kafka.consumer.{Consumer, ConsumerConnector, ConsumerConfig}
import kafka.serializer.Decoder
import org.junit.runner.RunWith
import org.scalatest.FunSuite
import org.scalatest.BeforeAndAfter
import org.scalatest.junit.JUnitRunner
@RunWith(classOf[JUnitRunner])
class KafkaProcessorSpec extends FunSuite with BeforeAndAfter {
case class TestDecoder extends KafkaProcessor.KafkaDecoder {
val deserializer = new BinaryThriftStructSerializer[gen.Span] {
def codec = gen.Span
}
def toEvent(message: Message): Option[List[Span]] = {
val buffer = message.payload
val payload = new Array[Byte](buffer.remaining)
buffer.get(payload)
val gSpan = deserializer.fromBytes(payload)
val span = thriftSpanToSpan(gSpan).toSpan
Some(List(span))
}
def encode(span: Span) = {
val gspan = spanToThriftSpan(span)
deserializer.toBytes(gspan.toThrift)
}
}
var zkServer: EmbeddedZookeeper = _
var kafkaServer: KafkaServer = _
def processorFun(spans: Seq[Span]): Future[Unit] = {
assert(1 == spans.length, "received more spans than sent")
val message = spans.head
assert(message.traceId == 1234, "traceId mismatch")
assert(message.name == "methodName", "method name mismatch")
assert(message.id == 4567, "spanId mismatch")
message.annotations map {
a => {
assert(a.value == "value", "annotation name mismatch")
assert(a.timestamp == 1, "annotation timestamp mismatch")
}
}
Future.Done
}
def createMessage(): Message = {
val annotation = Annotation(1, "value", Some(Endpoint(1, 2, "service")))
val message = Span(1234, "methodName", 4567, None, List(annotation), Nil)
val codec = new TestDecoder()
new Message(codec.encode(message))
}
before {
zkServer = TestUtils.startZkServer()
kafkaServer = TestUtils.startKafkaServer()
Thread.sleep(500)
}
after {
kafkaServer.shutdown
zkServer.shutdown
}
test("kafka processor test simple") {
val producerConfig = TestUtils.kafkaProducerProps
val processorConfig = TestUtils.kafkaProcessorProps
val producer = new Producer[String, Message](new ProducerConfig(producerConfig))
val message = createMessage()
val data = new ProducerData[String, Message]("integration-test-topic", "key", Seq(message) )
val decoder = new TestDecoder()
producer.send(data)
producer.close()
val topic = Map("integration-test-topic" -> 1)
val consumerConnector: ConsumerConnector = Consumer.create(new ConsumerConfig(processorConfig))
val topicMessageStreams = consumerConnector.createMessageStreams(topic, decoder)
for ((topic, streams) <- topicMessageStreams) {
val messageList = streams.head.head.message getOrElse List()
processorFun(messageList)
}
}
}
| gardleopard/zipkin | zipkin-receiver-kafka/src/test/scala/com/twitter/zipkin/receiver/kafka/KafkaProcessorSpec.scala | Scala | apache-2.0 | 3,401 |
/*
* Copyright 2017 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.ct600.v3
import uk.gov.hmrc.ct.box.{Calculated, CtBoxIdentifier, CtInteger}
import uk.gov.hmrc.ct.computations.HmrcAccountingPeriod
import uk.gov.hmrc.ct.computations.retriever.ComputationsBoxRetriever
import uk.gov.hmrc.ct.ct600.calculations.CorporationTaxCalculatorParameters
import uk.gov.hmrc.ct.ct600.v3.calculations.CorporationTaxCalculator
//was B54
case class B385(value: Int) extends CtBoxIdentifier("Amount of Profit FY2") with CtInteger
object B385 extends CorporationTaxCalculator with Calculated[B385, ComputationsBoxRetriever] {
/*
Same calculation as B54 Amount of Profit but in this case I2 Total of basic profit pro-rata for FY2
must be calculated using B315v3 as an input to the MRR calculator, instead of B37 Profits chargeable to corporation tax
*/
override def calculate(fieldValueRetriever: ComputationsBoxRetriever): B385 = {
calculateApportionedProfitsChargeableFy2(
CorporationTaxCalculatorParameters(
fieldValueRetriever.cp295(),
HmrcAccountingPeriod(fieldValueRetriever.cp1(),fieldValueRetriever.cp2())
))
}
}
| pncampbell/ct-calculations | src/main/scala/uk/gov/hmrc/ct/ct600/v3/B385.scala | Scala | apache-2.0 | 1,714 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.analysis
import java.util
import java.util.Locale
import java.util.concurrent.atomic.AtomicBoolean
import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer
import scala.util.Random
import org.apache.spark.sql.AnalysisException
import org.apache.spark.sql.catalyst._
import org.apache.spark.sql.catalyst.catalog._
import org.apache.spark.sql.catalyst.encoders.OuterScopes
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.expressions.SubExprUtils._
import org.apache.spark.sql.catalyst.expressions.aggregate._
import org.apache.spark.sql.catalyst.expressions.objects._
import org.apache.spark.sql.catalyst.plans._
import org.apache.spark.sql.catalyst.plans.logical._
import org.apache.spark.sql.catalyst.rules._
import org.apache.spark.sql.catalyst.trees.TreeNodeRef
import org.apache.spark.sql.catalyst.util.toPrettySQL
import org.apache.spark.sql.connector.catalog._
import org.apache.spark.sql.connector.catalog.CatalogV2Implicits._
import org.apache.spark.sql.connector.catalog.TableChange.{AddColumn, After, ColumnChange, ColumnPosition, DeleteColumn, RenameColumn, UpdateColumnComment, UpdateColumnNullability, UpdateColumnPosition, UpdateColumnType}
import org.apache.spark.sql.connector.expressions.{FieldReference, IdentityTransform, Transform}
import org.apache.spark.sql.execution.datasources.v2.DataSourceV2Relation
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.internal.SQLConf.{PartitionOverwriteMode, StoreAssignmentPolicy}
import org.apache.spark.sql.types._
import org.apache.spark.sql.util.CaseInsensitiveStringMap
/**
* A trivial [[Analyzer]] with a dummy [[SessionCatalog]] and [[EmptyFunctionRegistry]].
* Used for testing when all relations are already filled in and the analyzer needs only
* to resolve attribute references.
*/
object SimpleAnalyzer extends Analyzer(
new CatalogManager(
new SQLConf().copy(SQLConf.CASE_SENSITIVE -> true),
FakeV2SessionCatalog,
new SessionCatalog(
new InMemoryCatalog,
EmptyFunctionRegistry,
new SQLConf().copy(SQLConf.CASE_SENSITIVE -> true)) {
override def createDatabase(dbDefinition: CatalogDatabase, ignoreIfExists: Boolean): Unit = {}
}),
new SQLConf().copy(SQLConf.CASE_SENSITIVE -> true))
object FakeV2SessionCatalog extends TableCatalog {
private def fail() = throw new UnsupportedOperationException
override def listTables(namespace: Array[String]): Array[Identifier] = fail()
override def loadTable(ident: Identifier): Table = {
throw new NoSuchTableException(ident.toString)
}
override def createTable(
ident: Identifier,
schema: StructType,
partitions: Array[Transform],
properties: util.Map[String, String]): Table = fail()
override def alterTable(ident: Identifier, changes: TableChange*): Table = fail()
override def dropTable(ident: Identifier): Boolean = fail()
override def renameTable(oldIdent: Identifier, newIdent: Identifier): Unit = fail()
override def initialize(name: String, options: CaseInsensitiveStringMap): Unit = fail()
override def name(): String = CatalogManager.SESSION_CATALOG_NAME
}
/**
* Provides a way to keep state during the analysis, this enables us to decouple the concerns
* of analysis environment from the catalog.
* The state that is kept here is per-query.
*
* Note this is thread local.
*
* @param catalogAndNamespace The catalog and namespace used in the view resolution. This overrides
* the current catalog and namespace when resolving relations inside
* views.
* @param nestedViewDepth The nested depth in the view resolution, this enables us to limit the
* depth of nested views.
* @param relationCache A mapping from qualified table names to resolved relations. This can ensure
* that the table is resolved only once if a table is used multiple times
* in a query.
*/
case class AnalysisContext(
catalogAndNamespace: Seq[String] = Nil,
nestedViewDepth: Int = 0,
relationCache: mutable.Map[Seq[String], LogicalPlan] = mutable.Map.empty)
object AnalysisContext {
private val value = new ThreadLocal[AnalysisContext]() {
override def initialValue: AnalysisContext = AnalysisContext()
}
def get: AnalysisContext = value.get()
def reset(): Unit = value.remove()
private def set(context: AnalysisContext): Unit = value.set(context)
def withAnalysisContext[A](catalogAndNamespace: Seq[String])(f: => A): A = {
val originContext = value.get()
val context = AnalysisContext(
catalogAndNamespace, originContext.nestedViewDepth + 1, originContext.relationCache)
set(context)
try f finally { set(originContext) }
}
}
/**
* Provides a logical query plan analyzer, which translates [[UnresolvedAttribute]]s and
* [[UnresolvedRelation]]s into fully typed objects using information in a [[SessionCatalog]].
*/
class Analyzer(
override val catalogManager: CatalogManager,
conf: SQLConf,
maxIterations: Int)
extends RuleExecutor[LogicalPlan] with CheckAnalysis with LookupCatalog {
private val v1SessionCatalog: SessionCatalog = catalogManager.v1SessionCatalog
override def isView(nameParts: Seq[String]): Boolean = v1SessionCatalog.isView(nameParts)
// Only for tests.
def this(catalog: SessionCatalog, conf: SQLConf) = {
this(
new CatalogManager(conf, FakeV2SessionCatalog, catalog),
conf,
conf.analyzerMaxIterations)
}
def this(catalogManager: CatalogManager, conf: SQLConf) = {
this(catalogManager, conf, conf.analyzerMaxIterations)
}
def executeAndCheck(plan: LogicalPlan, tracker: QueryPlanningTracker): LogicalPlan = {
AnalysisHelper.markInAnalyzer {
val analyzed = executeAndTrack(plan, tracker)
try {
checkAnalysis(analyzed)
analyzed
} catch {
case e: AnalysisException =>
val ae = new AnalysisException(e.message, e.line, e.startPosition, Option(analyzed))
ae.setStackTrace(e.getStackTrace)
throw ae
}
}
}
override def execute(plan: LogicalPlan): LogicalPlan = {
AnalysisContext.reset()
try {
executeSameContext(plan)
} finally {
AnalysisContext.reset()
}
}
private def executeSameContext(plan: LogicalPlan): LogicalPlan = super.execute(plan)
def resolver: Resolver = conf.resolver
/**
* If the plan cannot be resolved within maxIterations, analyzer will throw exception to inform
* user to increase the value of SQLConf.ANALYZER_MAX_ITERATIONS.
*/
protected val fixedPoint =
FixedPoint(
maxIterations,
errorOnExceed = true,
maxIterationsSetting = SQLConf.ANALYZER_MAX_ITERATIONS.key)
/**
* Override to provide additional rules for the "Resolution" batch.
*/
val extendedResolutionRules: Seq[Rule[LogicalPlan]] = Nil
/**
* Override to provide rules to do post-hoc resolution. Note that these rules will be executed
* in an individual batch. This batch is to run right after the normal resolution batch and
* execute its rules in one pass.
*/
val postHocResolutionRules: Seq[Rule[LogicalPlan]] = Nil
lazy val batches: Seq[Batch] = Seq(
Batch("Hints", fixedPoint,
new ResolveHints.ResolveJoinStrategyHints(conf),
new ResolveHints.ResolveCoalesceHints(conf)),
Batch("Simple Sanity Check", Once,
LookupFunctions),
Batch("Substitution", fixedPoint,
CTESubstitution,
WindowsSubstitution,
EliminateUnions,
new SubstituteUnresolvedOrdinals(conf)),
Batch("Resolution", fixedPoint,
ResolveTableValuedFunctions ::
ResolveNamespace(catalogManager) ::
new ResolveCatalogs(catalogManager) ::
ResolveInsertInto ::
ResolveRelations ::
ResolveTables ::
ResolveReferences ::
ResolveCreateNamedStruct ::
ResolveDeserializer ::
ResolveNewInstance ::
ResolveUpCast ::
ResolveGroupingAnalytics ::
ResolvePivot ::
ResolveOrdinalInOrderByAndGroupBy ::
ResolveAggAliasInGroupBy ::
ResolveMissingReferences ::
ExtractGenerator ::
ResolveGenerate ::
ResolveFunctions ::
ResolveAliases ::
ResolveSubquery ::
ResolveSubqueryColumnAliases ::
ResolveWindowOrder ::
ResolveWindowFrame ::
ResolveNaturalAndUsingJoin ::
ResolveOutputRelation ::
ExtractWindowExpressions ::
GlobalAggregates ::
ResolveAggregateFunctions ::
TimeWindowing ::
ResolveInlineTables(conf) ::
ResolveHigherOrderFunctions(v1SessionCatalog) ::
ResolveLambdaVariables(conf) ::
ResolveTimeZone(conf) ::
ResolveRandomSeed ::
ResolveBinaryArithmetic(conf) ::
TypeCoercion.typeCoercionRules(conf) ++
extendedResolutionRules : _*),
Batch("Post-Hoc Resolution", Once, postHocResolutionRules: _*),
Batch("Normalize Alter Table", Once, ResolveAlterTableChanges),
Batch("Remove Unresolved Hints", Once,
new ResolveHints.RemoveAllHints(conf)),
Batch("Nondeterministic", Once,
PullOutNondeterministic),
Batch("UDF", Once,
HandleNullInputsForUDF),
Batch("UpdateNullability", Once,
UpdateAttributeNullability),
Batch("Subquery", Once,
UpdateOuterReferences),
Batch("Cleanup", fixedPoint,
CleanupAliases)
)
/**
* For [[Add]]:
* 1. if both side are interval, stays the same;
* 2. else if one side is interval, turns it to [[TimeAdd]];
* 3. else if one side is date, turns it to [[DateAdd]] ;
* 4. else stays the same.
*
* For [[Subtract]]:
* 1. if both side are interval, stays the same;
* 2. else if the right side is an interval, turns it to [[TimeSub]];
* 3. else if one side is timestamp, turns it to [[SubtractTimestamps]];
* 4. else if the right side is date, turns it to [[DateDiff]]/[[SubtractDates]];
* 5. else if the left side is date, turns it to [[DateSub]];
* 6. else turns it to stays the same.
*
* For [[Multiply]]:
* 1. If one side is interval, turns it to [[MultiplyInterval]];
* 2. otherwise, stays the same.
*
* For [[Divide]]:
* 1. If the left side is interval, turns it to [[DivideInterval]];
* 2. otherwise, stays the same.
*/
case class ResolveBinaryArithmetic(conf: SQLConf) extends Rule[LogicalPlan] {
override def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperatorsUp {
case p: LogicalPlan => p.transformExpressionsUp {
case a @ Add(l, r) if a.childrenResolved => (l.dataType, r.dataType) match {
case (CalendarIntervalType, CalendarIntervalType) => a
case (_, CalendarIntervalType) => Cast(TimeAdd(l, r), l.dataType)
case (CalendarIntervalType, _) => Cast(TimeAdd(r, l), r.dataType)
case (DateType, dt) if dt != StringType => DateAdd(l, r)
case (dt, DateType) if dt != StringType => DateAdd(r, l)
case _ => a
}
case s @ Subtract(l, r) if s.childrenResolved => (l.dataType, r.dataType) match {
case (CalendarIntervalType, CalendarIntervalType) => s
case (_, CalendarIntervalType) => Cast(TimeSub(l, r), l.dataType)
case (TimestampType, _) => SubtractTimestamps(l, r)
case (_, TimestampType) => SubtractTimestamps(l, r)
case (_, DateType) => SubtractDates(l, r)
case (DateType, dt) if dt != StringType => DateSub(l, r)
case _ => s
}
case m @ Multiply(l, r) if m.childrenResolved => (l.dataType, r.dataType) match {
case (CalendarIntervalType, _) => MultiplyInterval(l, r)
case (_, CalendarIntervalType) => MultiplyInterval(r, l)
case _ => m
}
case d @ Divide(l, r) if d.childrenResolved => (l.dataType, r.dataType) match {
case (CalendarIntervalType, _) => DivideInterval(l, r)
case _ => d
}
}
}
}
/**
* Substitute child plan with WindowSpecDefinitions.
*/
object WindowsSubstitution extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperatorsUp {
// Lookup WindowSpecDefinitions. This rule works with unresolved children.
case WithWindowDefinition(windowDefinitions, child) => child.resolveExpressions {
case UnresolvedWindowExpression(c, WindowSpecReference(windowName)) =>
val errorMessage =
s"Window specification $windowName is not defined in the WINDOW clause."
val windowSpecDefinition =
windowDefinitions.getOrElse(windowName, failAnalysis(errorMessage))
WindowExpression(c, windowSpecDefinition)
}
}
}
/**
* Replaces [[UnresolvedAlias]]s with concrete aliases.
*/
object ResolveAliases extends Rule[LogicalPlan] {
private def assignAliases(exprs: Seq[NamedExpression]) = {
exprs.map(_.transformUp { case u @ UnresolvedAlias(child, optGenAliasFunc) =>
child match {
case ne: NamedExpression => ne
case go @ GeneratorOuter(g: Generator) if g.resolved => MultiAlias(go, Nil)
case e if !e.resolved => u
case g: Generator => MultiAlias(g, Nil)
case c @ Cast(ne: NamedExpression, _, _) => Alias(c, ne.name)()
case e: ExtractValue => Alias(e, toPrettySQL(e))()
case e if optGenAliasFunc.isDefined =>
Alias(child, optGenAliasFunc.get.apply(e))()
case e => Alias(e, toPrettySQL(e))()
}
}
).asInstanceOf[Seq[NamedExpression]]
}
private def hasUnresolvedAlias(exprs: Seq[NamedExpression]) =
exprs.exists(_.find(_.isInstanceOf[UnresolvedAlias]).isDefined)
def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperatorsUp {
case Aggregate(groups, aggs, child) if child.resolved && hasUnresolvedAlias(aggs) =>
Aggregate(groups, assignAliases(aggs), child)
case g: GroupingSets if g.child.resolved && hasUnresolvedAlias(g.aggregations) =>
g.copy(aggregations = assignAliases(g.aggregations))
case Pivot(groupByOpt, pivotColumn, pivotValues, aggregates, child)
if child.resolved && groupByOpt.isDefined && hasUnresolvedAlias(groupByOpt.get) =>
Pivot(Some(assignAliases(groupByOpt.get)), pivotColumn, pivotValues, aggregates, child)
case Project(projectList, child) if child.resolved && hasUnresolvedAlias(projectList) =>
Project(assignAliases(projectList), child)
}
}
object ResolveGroupingAnalytics extends Rule[LogicalPlan] {
/*
* GROUP BY a, b, c WITH ROLLUP
* is equivalent to
* GROUP BY a, b, c GROUPING SETS ( (a, b, c), (a, b), (a), ( ) ).
* Group Count: N + 1 (N is the number of group expressions)
*
* We need to get all of its subsets for the rule described above, the subset is
* represented as sequence of expressions.
*/
def rollupExprs(exprs: Seq[Expression]): Seq[Seq[Expression]] = exprs.inits.toIndexedSeq
/*
* GROUP BY a, b, c WITH CUBE
* is equivalent to
* GROUP BY a, b, c GROUPING SETS ( (a, b, c), (a, b), (b, c), (a, c), (a), (b), (c), ( ) ).
* Group Count: 2 ^ N (N is the number of group expressions)
*
* We need to get all of its subsets for a given GROUPBY expression, the subsets are
* represented as sequence of expressions.
*/
def cubeExprs(exprs: Seq[Expression]): Seq[Seq[Expression]] = {
// `cubeExprs0` is recursive and returns a lazy Stream. Here we call `toIndexedSeq` to
// materialize it and avoid serialization problems later on.
cubeExprs0(exprs).toIndexedSeq
}
def cubeExprs0(exprs: Seq[Expression]): Seq[Seq[Expression]] = exprs.toList match {
case x :: xs =>
val initial = cubeExprs0(xs)
initial.map(x +: _) ++ initial
case Nil =>
Seq(Seq.empty)
}
private[analysis] def hasGroupingFunction(e: Expression): Boolean = {
e.collectFirst {
case g: Grouping => g
case g: GroupingID => g
}.isDefined
}
private def replaceGroupingFunc(
expr: Expression,
groupByExprs: Seq[Expression],
gid: Expression): Expression = {
expr transform {
case e: GroupingID =>
if (e.groupByExprs.isEmpty ||
e.groupByExprs.map(_.canonicalized) == groupByExprs.map(_.canonicalized)) {
Alias(gid, toPrettySQL(e))()
} else {
throw new AnalysisException(
s"Columns of grouping_id (${e.groupByExprs.mkString(",")}) does not match " +
s"grouping columns (${groupByExprs.mkString(",")})")
}
case e @ Grouping(col: Expression) =>
val idx = groupByExprs.indexWhere(_.semanticEquals(col))
if (idx >= 0) {
Alias(Cast(BitwiseAnd(ShiftRight(gid, Literal(groupByExprs.length - 1 - idx)),
Literal(1L)), ByteType), toPrettySQL(e))()
} else {
throw new AnalysisException(s"Column of grouping ($col) can't be found " +
s"in grouping columns ${groupByExprs.mkString(",")}")
}
}
}
/*
* Create new alias for all group by expressions for `Expand` operator.
*/
private def constructGroupByAlias(groupByExprs: Seq[Expression]): Seq[Alias] = {
groupByExprs.map {
case e: NamedExpression => Alias(e, e.name)()
case other => Alias(other, other.toString)()
}
}
/*
* Construct [[Expand]] operator with grouping sets.
*/
private def constructExpand(
selectedGroupByExprs: Seq[Seq[Expression]],
child: LogicalPlan,
groupByAliases: Seq[Alias],
gid: Attribute): LogicalPlan = {
// Change the nullability of group by aliases if necessary. For example, if we have
// GROUPING SETS ((a,b), a), we do not need to change the nullability of a, but we
// should change the nullability of b to be TRUE.
// TODO: For Cube/Rollup just set nullability to be `true`.
val expandedAttributes = groupByAliases.map { alias =>
if (selectedGroupByExprs.exists(!_.contains(alias.child))) {
alias.toAttribute.withNullability(true)
} else {
alias.toAttribute
}
}
val groupingSetsAttributes = selectedGroupByExprs.map { groupingSetExprs =>
groupingSetExprs.map { expr =>
val alias = groupByAliases.find(_.child.semanticEquals(expr)).getOrElse(
failAnalysis(s"$expr doesn't show up in the GROUP BY list $groupByAliases"))
// Map alias to expanded attribute.
expandedAttributes.find(_.semanticEquals(alias.toAttribute)).getOrElse(
alias.toAttribute)
}
}
Expand(groupingSetsAttributes, groupByAliases, expandedAttributes, gid, child)
}
/*
* Construct new aggregate expressions by replacing grouping functions.
*/
private def constructAggregateExprs(
groupByExprs: Seq[Expression],
aggregations: Seq[NamedExpression],
groupByAliases: Seq[Alias],
groupingAttrs: Seq[Expression],
gid: Attribute): Seq[NamedExpression] = aggregations.map {
// collect all the found AggregateExpression, so we can check an expression is part of
// any AggregateExpression or not.
val aggsBuffer = ArrayBuffer[Expression]()
// Returns whether the expression belongs to any expressions in `aggsBuffer` or not.
def isPartOfAggregation(e: Expression): Boolean = {
aggsBuffer.exists(a => a.find(_ eq e).isDefined)
}
replaceGroupingFunc(_, groupByExprs, gid).transformDown {
// AggregateExpression should be computed on the unmodified value of its argument
// expressions, so we should not replace any references to grouping expression
// inside it.
case e: AggregateExpression =>
aggsBuffer += e
e
case e if isPartOfAggregation(e) => e
case e =>
// Replace expression by expand output attribute.
val index = groupByAliases.indexWhere(_.child.semanticEquals(e))
if (index == -1) {
e
} else {
groupingAttrs(index)
}
}.asInstanceOf[NamedExpression]
}
/*
* Construct [[Aggregate]] operator from Cube/Rollup/GroupingSets.
*/
private def constructAggregate(
selectedGroupByExprs: Seq[Seq[Expression]],
groupByExprs: Seq[Expression],
aggregationExprs: Seq[NamedExpression],
child: LogicalPlan): LogicalPlan = {
// In case of ANSI-SQL compliant syntax for GROUPING SETS, groupByExprs is optional and
// can be null. In such case, we derive the groupByExprs from the user supplied values for
// grouping sets.
val finalGroupByExpressions = if (groupByExprs == Nil) {
selectedGroupByExprs.flatten.foldLeft(Seq.empty[Expression]) { (result, currentExpr) =>
// Only unique expressions are included in the group by expressions and is determined
// based on their semantic equality. Example. grouping sets ((a * b), (b * a)) results
// in grouping expression (a * b)
if (result.find(_.semanticEquals(currentExpr)).isDefined) {
result
} else {
result :+ currentExpr
}
}
} else {
groupByExprs
}
if (finalGroupByExpressions.size > GroupingID.dataType.defaultSize * 8) {
throw new AnalysisException(
s"Grouping sets size cannot be greater than ${GroupingID.dataType.defaultSize * 8}")
}
// Expand works by setting grouping expressions to null as determined by the
// `selectedGroupByExprs`. To prevent these null values from being used in an aggregate
// instead of the original value we need to create new aliases for all group by expressions
// that will only be used for the intended purpose.
val groupByAliases = constructGroupByAlias(finalGroupByExpressions)
val gid = AttributeReference(VirtualColumn.groupingIdName, GroupingID.dataType, false)()
val expand = constructExpand(selectedGroupByExprs, child, groupByAliases, gid)
val groupingAttrs = expand.output.drop(child.output.length)
val aggregations = constructAggregateExprs(
finalGroupByExpressions, aggregationExprs, groupByAliases, groupingAttrs, gid)
Aggregate(groupingAttrs, aggregations, expand)
}
private def findGroupingExprs(plan: LogicalPlan): Seq[Expression] = {
plan.collectFirst {
case a: Aggregate =>
// this Aggregate should have grouping id as the last grouping key.
val gid = a.groupingExpressions.last
if (!gid.isInstanceOf[AttributeReference]
|| gid.asInstanceOf[AttributeReference].name != VirtualColumn.groupingIdName) {
failAnalysis(s"grouping()/grouping_id() can only be used with GroupingSets/Cube/Rollup")
}
a.groupingExpressions.take(a.groupingExpressions.length - 1)
}.getOrElse {
failAnalysis(s"grouping()/grouping_id() can only be used with GroupingSets/Cube/Rollup")
}
}
// This require transformUp to replace grouping()/grouping_id() in resolved Filter/Sort
def apply(plan: LogicalPlan): LogicalPlan = plan resolveOperatorsUp {
case a if !a.childrenResolved => a // be sure all of the children are resolved.
// Ensure group by expressions and aggregate expressions have been resolved.
case Aggregate(Seq(c @ Cube(groupByExprs)), aggregateExpressions, child)
if (groupByExprs ++ aggregateExpressions).forall(_.resolved) =>
constructAggregate(cubeExprs(groupByExprs), groupByExprs, aggregateExpressions, child)
case Aggregate(Seq(r @ Rollup(groupByExprs)), aggregateExpressions, child)
if (groupByExprs ++ aggregateExpressions).forall(_.resolved) =>
constructAggregate(rollupExprs(groupByExprs), groupByExprs, aggregateExpressions, child)
// Ensure all the expressions have been resolved.
case x: GroupingSets if x.expressions.forall(_.resolved) =>
constructAggregate(x.selectedGroupByExprs, x.groupByExprs, x.aggregations, x.child)
// We should make sure all expressions in condition have been resolved.
case f @ Filter(cond, child) if hasGroupingFunction(cond) && cond.resolved =>
val groupingExprs = findGroupingExprs(child)
// The unresolved grouping id will be resolved by ResolveMissingReferences
val newCond = replaceGroupingFunc(cond, groupingExprs, VirtualColumn.groupingIdAttribute)
f.copy(condition = newCond)
// We should make sure all [[SortOrder]]s have been resolved.
case s @ Sort(order, _, child)
if order.exists(hasGroupingFunction) && order.forall(_.resolved) =>
val groupingExprs = findGroupingExprs(child)
val gid = VirtualColumn.groupingIdAttribute
// The unresolved grouping id will be resolved by ResolveMissingReferences
val newOrder = order.map(replaceGroupingFunc(_, groupingExprs, gid).asInstanceOf[SortOrder])
s.copy(order = newOrder)
}
}
object ResolvePivot extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan resolveOperators {
case p: Pivot if !p.childrenResolved || !p.aggregates.forall(_.resolved)
|| (p.groupByExprsOpt.isDefined && !p.groupByExprsOpt.get.forall(_.resolved))
|| !p.pivotColumn.resolved || !p.pivotValues.forall(_.resolved) => p
case Pivot(groupByExprsOpt, pivotColumn, pivotValues, aggregates, child) =>
if (!RowOrdering.isOrderable(pivotColumn.dataType)) {
throw new AnalysisException(
s"Invalid pivot column '${pivotColumn}'. Pivot columns must be comparable.")
}
// Check all aggregate expressions.
aggregates.foreach(checkValidAggregateExpression)
// Check all pivot values are literal and match pivot column data type.
val evalPivotValues = pivotValues.map { value =>
val foldable = value match {
case Alias(v, _) => v.foldable
case _ => value.foldable
}
if (!foldable) {
throw new AnalysisException(
s"Literal expressions required for pivot values, found '$value'")
}
if (!Cast.canCast(value.dataType, pivotColumn.dataType)) {
throw new AnalysisException(s"Invalid pivot value '$value': " +
s"value data type ${value.dataType.simpleString} does not match " +
s"pivot column data type ${pivotColumn.dataType.catalogString}")
}
Cast(value, pivotColumn.dataType, Some(conf.sessionLocalTimeZone)).eval(EmptyRow)
}
// Group-by expressions coming from SQL are implicit and need to be deduced.
val groupByExprs = groupByExprsOpt.getOrElse {
val pivotColAndAggRefs = pivotColumn.references ++ AttributeSet(aggregates)
child.output.filterNot(pivotColAndAggRefs.contains)
}
val singleAgg = aggregates.size == 1
def outputName(value: Expression, aggregate: Expression): String = {
val stringValue = value match {
case n: NamedExpression => n.name
case _ =>
val utf8Value =
Cast(value, StringType, Some(conf.sessionLocalTimeZone)).eval(EmptyRow)
Option(utf8Value).map(_.toString).getOrElse("null")
}
if (singleAgg) {
stringValue
} else {
val suffix = aggregate match {
case n: NamedExpression => n.name
case _ => toPrettySQL(aggregate)
}
stringValue + "_" + suffix
}
}
if (aggregates.forall(a => PivotFirst.supportsDataType(a.dataType))) {
// Since evaluating |pivotValues| if statements for each input row can get slow this is an
// alternate plan that instead uses two steps of aggregation.
val namedAggExps: Seq[NamedExpression] = aggregates.map(a => Alias(a, a.sql)())
val namedPivotCol = pivotColumn match {
case n: NamedExpression => n
case _ => Alias(pivotColumn, "__pivot_col")()
}
val bigGroup = groupByExprs :+ namedPivotCol
val firstAgg = Aggregate(bigGroup, bigGroup ++ namedAggExps, child)
val pivotAggs = namedAggExps.map { a =>
Alias(PivotFirst(namedPivotCol.toAttribute, a.toAttribute, evalPivotValues)
.toAggregateExpression()
, "__pivot_" + a.sql)()
}
val groupByExprsAttr = groupByExprs.map(_.toAttribute)
val secondAgg = Aggregate(groupByExprsAttr, groupByExprsAttr ++ pivotAggs, firstAgg)
val pivotAggAttribute = pivotAggs.map(_.toAttribute)
val pivotOutputs = pivotValues.zipWithIndex.flatMap { case (value, i) =>
aggregates.zip(pivotAggAttribute).map { case (aggregate, pivotAtt) =>
Alias(ExtractValue(pivotAtt, Literal(i), resolver), outputName(value, aggregate))()
}
}
Project(groupByExprsAttr ++ pivotOutputs, secondAgg)
} else {
val pivotAggregates: Seq[NamedExpression] = pivotValues.flatMap { value =>
def ifExpr(e: Expression) = {
If(
EqualNullSafe(
pivotColumn,
Cast(value, pivotColumn.dataType, Some(conf.sessionLocalTimeZone))),
e, Literal(null))
}
aggregates.map { aggregate =>
val filteredAggregate = aggregate.transformDown {
// Assumption is the aggregate function ignores nulls. This is true for all current
// AggregateFunction's with the exception of First and Last in their default mode
// (which we handle) and possibly some Hive UDAF's.
case First(expr, _) =>
First(ifExpr(expr), Literal(true))
case Last(expr, _) =>
Last(ifExpr(expr), Literal(true))
case a: AggregateFunction =>
a.withNewChildren(a.children.map(ifExpr))
}.transform {
// We are duplicating aggregates that are now computing a different value for each
// pivot value.
// TODO: Don't construct the physical container until after analysis.
case ae: AggregateExpression => ae.copy(resultId = NamedExpression.newExprId)
}
Alias(filteredAggregate, outputName(value, aggregate))()
}
}
Aggregate(groupByExprs, groupByExprs ++ pivotAggregates, child)
}
}
// Support any aggregate expression that can appear in an Aggregate plan except Pandas UDF.
// TODO: Support Pandas UDF.
private def checkValidAggregateExpression(expr: Expression): Unit = expr match {
case _: AggregateExpression => // OK and leave the argument check to CheckAnalysis.
case expr: PythonUDF if PythonUDF.isGroupedAggPandasUDF(expr) =>
failAnalysis("Pandas UDF aggregate expressions are currently not supported in pivot.")
case e: Attribute =>
failAnalysis(
s"Aggregate expression required for pivot, but '${e.sql}' " +
s"did not appear in any aggregate function.")
case e => e.children.foreach(checkValidAggregateExpression)
}
}
case class ResolveNamespace(catalogManager: CatalogManager)
extends Rule[LogicalPlan] with LookupCatalog {
def apply(plan: LogicalPlan): LogicalPlan = plan resolveOperators {
case s @ ShowTables(UnresolvedNamespace(Seq()), _) =>
s.copy(namespace = ResolvedNamespace(currentCatalog, catalogManager.currentNamespace))
case s @ ShowViews(UnresolvedNamespace(Seq()), _) =>
s.copy(namespace = ResolvedNamespace(currentCatalog, catalogManager.currentNamespace))
case UnresolvedNamespace(Seq()) =>
ResolvedNamespace(currentCatalog, Seq.empty[String])
case UnresolvedNamespace(CatalogAndNamespace(catalog, ns)) =>
ResolvedNamespace(catalog, ns)
}
}
private def isResolvingView: Boolean = AnalysisContext.get.catalogAndNamespace.nonEmpty
/**
* Resolve relations to temp views. This is not an actual rule, and is called by
* [[ResolveTables]] and [[ResolveRelations]].
*/
object ResolveTempViews extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperatorsUp {
case u @ UnresolvedRelation(ident) =>
lookupTempView(ident).getOrElse(u)
case i @ InsertIntoStatement(UnresolvedRelation(ident), _, _, _, _) =>
lookupTempView(ident)
.map(view => i.copy(table = view))
.getOrElse(i)
case u @ UnresolvedTable(ident) =>
lookupTempView(ident).foreach { _ =>
u.failAnalysis(s"${ident.quoted} is a temp view not table.")
}
u
case u @ UnresolvedTableOrView(ident) =>
lookupTempView(ident).map(_ => ResolvedView(ident.asIdentifier)).getOrElse(u)
}
def lookupTempView(identifier: Seq[String]): Option[LogicalPlan] = {
// Permanent View can't refer to temp views, no need to lookup at all.
if (isResolvingView) return None
identifier match {
case Seq(part1) => v1SessionCatalog.lookupTempView(part1)
case Seq(part1, part2) => v1SessionCatalog.lookupGlobalTempView(part1, part2)
case _ => None
}
}
}
// If we are resolving relations insides views, we need to expand single-part relation names with
// the current catalog and namespace of when the view was created.
private def expandRelationName(nameParts: Seq[String]): Seq[String] = {
if (!isResolvingView) return nameParts
if (nameParts.length == 1) {
AnalysisContext.get.catalogAndNamespace :+ nameParts.head
} else if (catalogManager.isCatalogRegistered(nameParts.head)) {
nameParts
} else {
AnalysisContext.get.catalogAndNamespace.head +: nameParts
}
}
/**
* Resolve table relations with concrete relations from v2 catalog.
*
* [[ResolveRelations]] still resolves v1 tables.
*/
object ResolveTables extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = ResolveTempViews(plan).resolveOperatorsUp {
case u: UnresolvedRelation =>
lookupV2Relation(u.multipartIdentifier)
.map { rel =>
val ident = rel.identifier.get
SubqueryAlias(rel.catalog.get.name +: ident.namespace :+ ident.name, rel)
}.getOrElse(u)
case u @ UnresolvedTable(NonSessionCatalogAndIdentifier(catalog, ident)) =>
CatalogV2Util.loadTable(catalog, ident)
.map(ResolvedTable(catalog.asTableCatalog, ident, _))
.getOrElse(u)
case u @ UnresolvedTableOrView(NonSessionCatalogAndIdentifier(catalog, ident)) =>
CatalogV2Util.loadTable(catalog, ident)
.map(ResolvedTable(catalog.asTableCatalog, ident, _))
.getOrElse(u)
case i @ InsertIntoStatement(u: UnresolvedRelation, _, _, _, _) if i.query.resolved =>
lookupV2Relation(u.multipartIdentifier)
.map(v2Relation => i.copy(table = v2Relation))
.getOrElse(i)
case alter @ AlterTable(_, _, u: UnresolvedV2Relation, _) =>
CatalogV2Util.loadRelation(u.catalog, u.tableName)
.map(rel => alter.copy(table = rel))
.getOrElse(alter)
case u: UnresolvedV2Relation =>
CatalogV2Util.loadRelation(u.catalog, u.tableName).getOrElse(u)
}
/**
* Performs the lookup of DataSourceV2 Tables from v2 catalog.
*/
private def lookupV2Relation(identifier: Seq[String]): Option[DataSourceV2Relation] =
expandRelationName(identifier) match {
case NonSessionCatalogAndIdentifier(catalog, ident) =>
CatalogV2Util.loadTable(catalog, ident) match {
case Some(table) =>
Some(DataSourceV2Relation.create(table, Some(catalog), Some(ident)))
case None => None
}
case _ => None
}
}
/**
* Replaces [[UnresolvedRelation]]s with concrete relations from the catalog.
*/
object ResolveRelations extends Rule[LogicalPlan] {
// The current catalog and namespace may be different from when the view was created, we must
// resolve the view logical plan here, with the catalog and namespace stored in view metadata.
// This is done by keeping the catalog and namespace in `AnalysisContext`, and analyzer will
// look at `AnalysisContext.catalogAndNamespace` when resolving relations with single-part name.
// If `AnalysisContext.catalogAndNamespace` is non-empty, analyzer will expand single-part names
// with it, instead of current catalog and namespace.
private def resolveViews(plan: LogicalPlan): LogicalPlan = plan match {
// The view's child should be a logical plan parsed from the `desc.viewText`, the variable
// `viewText` should be defined, or else we throw an error on the generation of the View
// operator.
case view @ View(desc, _, child) if !child.resolved =>
// Resolve all the UnresolvedRelations and Views in the child.
val newChild = AnalysisContext.withAnalysisContext(desc.viewCatalogAndNamespace) {
if (AnalysisContext.get.nestedViewDepth > conf.maxNestedViewDepth) {
view.failAnalysis(s"The depth of view ${desc.identifier} exceeds the maximum " +
s"view resolution depth (${conf.maxNestedViewDepth}). Analysis is aborted to " +
s"avoid errors. Increase the value of ${SQLConf.MAX_NESTED_VIEW_DEPTH.key} to work " +
"around this.")
}
executeSameContext(child)
}
view.copy(child = newChild)
case p @ SubqueryAlias(_, view: View) =>
p.copy(child = resolveViews(view))
case _ => plan
}
def apply(plan: LogicalPlan): LogicalPlan = ResolveTempViews(plan).resolveOperatorsUp {
case i @ InsertIntoStatement(table, _, _, _, _) if i.query.resolved =>
val relation = table match {
case u: UnresolvedRelation =>
lookupRelation(u.multipartIdentifier).getOrElse(u)
case other => other
}
EliminateSubqueryAliases(relation) match {
case v: View =>
table.failAnalysis(s"Inserting into a view is not allowed. View: ${v.desc.identifier}.")
case other => i.copy(table = other)
}
case u: UnresolvedRelation =>
lookupRelation(u.multipartIdentifier).map(resolveViews).getOrElse(u)
case u @ UnresolvedTable(identifier) =>
lookupTableOrView(identifier).map {
case v: ResolvedView =>
u.failAnalysis(s"${v.identifier.quoted} is a view not table.")
case table => table
}.getOrElse(u)
case u @ UnresolvedTableOrView(identifier) =>
lookupTableOrView(identifier).getOrElse(u)
}
private def lookupTableOrView(identifier: Seq[String]): Option[LogicalPlan] = {
expandRelationName(identifier) match {
case SessionCatalogAndIdentifier(catalog, ident) =>
CatalogV2Util.loadTable(catalog, ident).map {
case v1Table: V1Table if v1Table.v1Table.tableType == CatalogTableType.VIEW =>
ResolvedView(ident)
case table =>
ResolvedTable(catalog.asTableCatalog, ident, table)
}
case _ => None
}
}
// Look up a relation from the session catalog with the following logic:
// 1) If the resolved catalog is not session catalog, return None.
// 2) If a relation is not found in the catalog, return None.
// 3) If a v1 table is found, create a v1 relation. Otherwise, create a v2 relation.
private def lookupRelation(identifier: Seq[String]): Option[LogicalPlan] = {
expandRelationName(identifier) match {
case SessionCatalogAndIdentifier(catalog, ident) =>
def loaded = CatalogV2Util.loadTable(catalog, ident).map {
case v1Table: V1Table =>
v1SessionCatalog.getRelation(v1Table.v1Table)
case table =>
SubqueryAlias(
catalog.name +: ident.asMultipartIdentifier,
DataSourceV2Relation.create(table, Some(catalog), Some(ident)))
}
val key = catalog.name +: ident.namespace :+ ident.name
Option(AnalysisContext.get.relationCache.getOrElseUpdate(key, loaded.orNull))
case _ => None
}
}
}
object ResolveInsertInto extends Rule[LogicalPlan] {
override def apply(plan: LogicalPlan): LogicalPlan = plan resolveOperators {
case i @ InsertIntoStatement(r: DataSourceV2Relation, _, _, _, _) if i.query.resolved =>
// ifPartitionNotExists is append with validation, but validation is not supported
if (i.ifPartitionNotExists) {
throw new AnalysisException(
s"Cannot write, IF NOT EXISTS is not supported for table: ${r.table.name}")
}
val partCols = partitionColumnNames(r.table)
validatePartitionSpec(partCols, i.partitionSpec)
val staticPartitions = i.partitionSpec.filter(_._2.isDefined).mapValues(_.get)
val query = addStaticPartitionColumns(r, i.query, staticPartitions)
val dynamicPartitionOverwrite = partCols.size > staticPartitions.size &&
conf.partitionOverwriteMode == PartitionOverwriteMode.DYNAMIC
if (!i.overwrite) {
AppendData.byPosition(r, query)
} else if (dynamicPartitionOverwrite) {
OverwritePartitionsDynamic.byPosition(r, query)
} else {
OverwriteByExpression.byPosition(r, query, staticDeleteExpression(r, staticPartitions))
}
}
private def partitionColumnNames(table: Table): Seq[String] = {
// get partition column names. in v2, partition columns are columns that are stored using an
// identity partition transform because the partition values and the column values are
// identical. otherwise, partition values are produced by transforming one or more source
// columns and cannot be set directly in a query's PARTITION clause.
table.partitioning.flatMap {
case IdentityTransform(FieldReference(Seq(name))) => Some(name)
case _ => None
}
}
private def validatePartitionSpec(
partitionColumnNames: Seq[String],
partitionSpec: Map[String, Option[String]]): Unit = {
// check that each partition name is a partition column. otherwise, it is not valid
partitionSpec.keySet.foreach { partitionName =>
partitionColumnNames.find(name => conf.resolver(name, partitionName)) match {
case Some(_) =>
case None =>
throw new AnalysisException(
s"PARTITION clause cannot contain a non-partition column name: $partitionName")
}
}
}
private def addStaticPartitionColumns(
relation: DataSourceV2Relation,
query: LogicalPlan,
staticPartitions: Map[String, String]): LogicalPlan = {
if (staticPartitions.isEmpty) {
query
} else {
// add any static value as a literal column
val withStaticPartitionValues = {
// for each static name, find the column name it will replace and check for unknowns.
val outputNameToStaticName = staticPartitions.keySet.map(staticName =>
relation.output.find(col => conf.resolver(col.name, staticName)) match {
case Some(attr) =>
attr.name -> staticName
case _ =>
throw new AnalysisException(
s"Cannot add static value for unknown column: $staticName")
}).toMap
val queryColumns = query.output.iterator
// for each output column, add the static value as a literal, or use the next input
// column. this does not fail if input columns are exhausted and adds remaining columns
// at the end. both cases will be caught by ResolveOutputRelation and will fail the
// query with a helpful error message.
relation.output.flatMap { col =>
outputNameToStaticName.get(col.name).flatMap(staticPartitions.get) match {
case Some(staticValue) =>
Some(Alias(Cast(Literal(staticValue), col.dataType), col.name)())
case _ if queryColumns.hasNext =>
Some(queryColumns.next)
case _ =>
None
}
} ++ queryColumns
}
Project(withStaticPartitionValues, query)
}
}
private def staticDeleteExpression(
relation: DataSourceV2Relation,
staticPartitions: Map[String, String]): Expression = {
if (staticPartitions.isEmpty) {
Literal(true)
} else {
staticPartitions.map { case (name, value) =>
relation.output.find(col => conf.resolver(col.name, name)) match {
case Some(attr) =>
// the delete expression must reference the table's column names, but these attributes
// are not available when CheckAnalysis runs because the relation is not a child of
// the logical operation. instead, expressions are resolved after
// ResolveOutputRelation runs, using the query's column names that will match the
// table names at that point. because resolution happens after a future rule, create
// an UnresolvedAttribute.
EqualTo(UnresolvedAttribute(attr.name), Cast(Literal(value), attr.dataType))
case None =>
throw new AnalysisException(s"Unknown static partition column: $name")
}
}.reduce(And)
}
}
}
/**
* Replaces [[UnresolvedAttribute]]s with concrete [[AttributeReference]]s from
* a logical plan node's children.
*/
object ResolveReferences extends Rule[LogicalPlan] {
/**
* Generate a new logical plan for the right child with different expression IDs
* for all conflicting attributes.
*/
private def dedupRight (left: LogicalPlan, right: LogicalPlan): LogicalPlan = {
val conflictingAttributes = left.outputSet.intersect(right.outputSet)
logDebug(s"Conflicting attributes ${conflictingAttributes.mkString(",")} " +
s"between $left and $right")
/**
* For LogicalPlan likes MultiInstanceRelation, Project, Aggregate, etc, whose output doesn't
* inherit directly from its children, we could just stop collect on it. Because we could
* always replace all the lower conflict attributes with the new attributes from the new
* plan. Theoretically, we should do recursively collect for Generate and Window but we leave
* it to the next batch to reduce possible overhead because this should be a corner case.
*/
def collectConflictPlans(plan: LogicalPlan): Seq[(LogicalPlan, LogicalPlan)] = plan match {
// Handle base relations that might appear more than once.
case oldVersion: MultiInstanceRelation
if oldVersion.outputSet.intersect(conflictingAttributes).nonEmpty =>
val newVersion = oldVersion.newInstance()
Seq((oldVersion, newVersion))
case oldVersion: SerializeFromObject
if oldVersion.outputSet.intersect(conflictingAttributes).nonEmpty =>
Seq((oldVersion, oldVersion.copy(
serializer = oldVersion.serializer.map(_.newInstance()))))
// Handle projects that create conflicting aliases.
case oldVersion @ Project(projectList, _)
if findAliases(projectList).intersect(conflictingAttributes).nonEmpty =>
Seq((oldVersion, oldVersion.copy(projectList = newAliases(projectList))))
case oldVersion @ Aggregate(_, aggregateExpressions, _)
if findAliases(aggregateExpressions).intersect(conflictingAttributes).nonEmpty =>
Seq((oldVersion, oldVersion.copy(
aggregateExpressions = newAliases(aggregateExpressions))))
case oldVersion @ FlatMapGroupsInPandas(_, _, output, _)
if oldVersion.outputSet.intersect(conflictingAttributes).nonEmpty =>
Seq((oldVersion, oldVersion.copy(output = output.map(_.newInstance()))))
case oldVersion: Generate
if oldVersion.producedAttributes.intersect(conflictingAttributes).nonEmpty =>
val newOutput = oldVersion.generatorOutput.map(_.newInstance())
Seq((oldVersion, oldVersion.copy(generatorOutput = newOutput)))
case oldVersion: Expand
if oldVersion.producedAttributes.intersect(conflictingAttributes).nonEmpty =>
val producedAttributes = oldVersion.producedAttributes
val newOutput = oldVersion.output.map { attr =>
if (producedAttributes.contains(attr)) {
attr.newInstance()
} else {
attr
}
}
Seq((oldVersion, oldVersion.copy(output = newOutput)))
case oldVersion @ Window(windowExpressions, _, _, child)
if AttributeSet(windowExpressions.map(_.toAttribute)).intersect(conflictingAttributes)
.nonEmpty =>
Seq((oldVersion, oldVersion.copy(windowExpressions = newAliases(windowExpressions))))
case _ => plan.children.flatMap(collectConflictPlans)
}
val conflictPlans = collectConflictPlans(right)
/*
* Note that it's possible `conflictPlans` can be empty which implies that there
* is a logical plan node that produces new references that this rule cannot handle.
* When that is the case, there must be another rule that resolves these conflicts.
* Otherwise, the analysis will fail.
*/
if (conflictPlans.isEmpty) {
right
} else {
val attributeRewrites = AttributeMap(conflictPlans.flatMap {
case (oldRelation, newRelation) => oldRelation.output.zip(newRelation.output)})
val conflictPlanMap = conflictPlans.toMap
// transformDown so that we can replace all the old Relations in one turn due to
// the reason that `conflictPlans` are also collected in pre-order.
right transformDown {
case r => conflictPlanMap.getOrElse(r, r)
} transformUp {
case other => other transformExpressions {
case a: Attribute =>
dedupAttr(a, attributeRewrites)
case s: SubqueryExpression =>
s.withNewPlan(dedupOuterReferencesInSubquery(s.plan, attributeRewrites))
}
}
}
}
private def dedupAttr(attr: Attribute, attrMap: AttributeMap[Attribute]): Attribute = {
val exprId = attrMap.getOrElse(attr, attr).exprId
attr.withExprId(exprId)
}
/**
* The outer plan may have been de-duplicated and the function below updates the
* outer references to refer to the de-duplicated attributes.
*
* For example (SQL):
* {{{
* SELECT * FROM t1
* INTERSECT
* SELECT * FROM t1
* WHERE EXISTS (SELECT 1
* FROM t2
* WHERE t1.c1 = t2.c1)
* }}}
* Plan before resolveReference rule.
* 'Intersect
* :- Project [c1#245, c2#246]
* : +- SubqueryAlias t1
* : +- Relation[c1#245,c2#246] parquet
* +- 'Project [*]
* +- Filter exists#257 [c1#245]
* : +- Project [1 AS 1#258]
* : +- Filter (outer(c1#245) = c1#251)
* : +- SubqueryAlias t2
* : +- Relation[c1#251,c2#252] parquet
* +- SubqueryAlias t1
* +- Relation[c1#245,c2#246] parquet
* Plan after the resolveReference rule.
* Intersect
* :- Project [c1#245, c2#246]
* : +- SubqueryAlias t1
* : +- Relation[c1#245,c2#246] parquet
* +- Project [c1#259, c2#260]
* +- Filter exists#257 [c1#259]
* : +- Project [1 AS 1#258]
* : +- Filter (outer(c1#259) = c1#251) => Updated
* : +- SubqueryAlias t2
* : +- Relation[c1#251,c2#252] parquet
* +- SubqueryAlias t1
* +- Relation[c1#259,c2#260] parquet => Outer plan's attributes are de-duplicated.
*/
private def dedupOuterReferencesInSubquery(
plan: LogicalPlan,
attrMap: AttributeMap[Attribute]): LogicalPlan = {
plan transformDown { case currentFragment =>
currentFragment transformExpressions {
case OuterReference(a: Attribute) =>
OuterReference(dedupAttr(a, attrMap))
case s: SubqueryExpression =>
s.withNewPlan(dedupOuterReferencesInSubquery(s.plan, attrMap))
}
}
}
/**
* Resolves the attribute and extract value expressions(s) by traversing the
* input expression in top down manner. The traversal is done in top-down manner as
* we need to skip over unbound lamda function expression. The lamda expressions are
* resolved in a different rule [[ResolveLambdaVariables]]
*
* Example :
* SELECT transform(array(1, 2, 3), (x, i) -> x + i)"
*
* In the case above, x and i are resolved as lamda variables in [[ResolveLambdaVariables]]
*
* Note : In this routine, the unresolved attributes are resolved from the input plan's
* children attributes.
*/
private def resolveExpressionTopDown(e: Expression, q: LogicalPlan): Expression = {
if (e.resolved) return e
e match {
case f: LambdaFunction if !f.bound => f
case u @ UnresolvedAttribute(nameParts) =>
// Leave unchanged if resolution fails. Hopefully will be resolved next round.
val result =
withPosition(u) {
q.resolveChildren(nameParts, resolver)
.orElse(resolveLiteralFunction(nameParts, u, q))
.getOrElse(u)
}
logDebug(s"Resolving $u to $result")
result
case UnresolvedExtractValue(child, fieldExpr) if child.resolved =>
ExtractValue(child, fieldExpr, resolver)
case _ => e.mapChildren(resolveExpressionTopDown(_, q))
}
}
def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperatorsUp {
case p: LogicalPlan if !p.childrenResolved => p
// If the projection list contains Stars, expand it.
case p: Project if containsStar(p.projectList) =>
p.copy(projectList = buildExpandedProjectList(p.projectList, p.child))
// If the aggregate function argument contains Stars, expand it.
case a: Aggregate if containsStar(a.aggregateExpressions) =>
if (a.groupingExpressions.exists(_.isInstanceOf[UnresolvedOrdinal])) {
failAnalysis(
"Star (*) is not allowed in select list when GROUP BY ordinal position is used")
} else {
a.copy(aggregateExpressions = buildExpandedProjectList(a.aggregateExpressions, a.child))
}
// If the script transformation input contains Stars, expand it.
case t: ScriptTransformation if containsStar(t.input) =>
t.copy(
input = t.input.flatMap {
case s: Star => s.expand(t.child, resolver)
case o => o :: Nil
}
)
case g: Generate if containsStar(g.generator.children) =>
failAnalysis("Invalid usage of '*' in explode/json_tuple/UDTF")
// To resolve duplicate expression IDs for Join and Intersect
case j @ Join(left, right, _, _, _) if !j.duplicateResolved =>
j.copy(right = dedupRight(left, right))
case f @ FlatMapCoGroupsInPandas(leftAttributes, rightAttributes, _, _, left, right) =>
val leftRes = leftAttributes
.map(x => resolveExpressionBottomUp(x, left).asInstanceOf[Attribute])
val rightRes = rightAttributes
.map(x => resolveExpressionBottomUp(x, right).asInstanceOf[Attribute])
f.copy(leftAttributes = leftRes, rightAttributes = rightRes)
// intersect/except will be rewritten to join at the begininng of optimizer. Here we need to
// deduplicate the right side plan, so that we won't produce an invalid self-join later.
case i @ Intersect(left, right, _) if !i.duplicateResolved =>
i.copy(right = dedupRight(left, right))
case e @ Except(left, right, _) if !e.duplicateResolved =>
e.copy(right = dedupRight(left, right))
case u @ Union(children) if !u.duplicateResolved =>
// Use projection-based de-duplication for Union to avoid breaking the checkpoint sharing
// feature in streaming.
val newChildren = children.foldRight(Seq.empty[LogicalPlan]) { (head, tail) =>
head +: tail.map {
case child if head.outputSet.intersect(child.outputSet).isEmpty =>
child
case child =>
val projectList = child.output.map { attr =>
Alias(attr, attr.name)()
}
Project(projectList, child)
}
}
u.copy(children = newChildren)
// When resolve `SortOrder`s in Sort based on child, don't report errors as
// we still have chance to resolve it based on its descendants
case s @ Sort(ordering, global, child) if child.resolved && !s.resolved =>
val newOrdering =
ordering.map(order => resolveExpressionBottomUp(order, child).asInstanceOf[SortOrder])
Sort(newOrdering, global, child)
// A special case for Generate, because the output of Generate should not be resolved by
// ResolveReferences. Attributes in the output will be resolved by ResolveGenerate.
case g @ Generate(generator, _, _, _, _, _) if generator.resolved => g
case g @ Generate(generator, join, outer, qualifier, output, child) =>
val newG = resolveExpressionBottomUp(generator, child, throws = true)
if (newG.fastEquals(generator)) {
g
} else {
Generate(newG.asInstanceOf[Generator], join, outer, qualifier, output, child)
}
// Skips plan which contains deserializer expressions, as they should be resolved by another
// rule: ResolveDeserializer.
case plan if containsDeserializer(plan.expressions) => plan
// SPARK-25942: Resolves aggregate expressions with `AppendColumns`'s children, instead of
// `AppendColumns`, because `AppendColumns`'s serializer might produce conflict attribute
// names leading to ambiguous references exception.
case a @ Aggregate(groupingExprs, aggExprs, appendColumns: AppendColumns) =>
a.mapExpressions(resolveExpressionTopDown(_, appendColumns))
case o: OverwriteByExpression if !o.outputResolved =>
// do not resolve expression attributes until the query attributes are resolved against the
// table by ResolveOutputRelation. that rule will alias the attributes to the table's names.
o
case m @ MergeIntoTable(targetTable, sourceTable, _, _, _)
if !m.resolved && targetTable.resolved && sourceTable.resolved =>
EliminateSubqueryAliases(targetTable) match {
case r: NamedRelation if r.skipSchemaResolution =>
// Do not resolve the expression if the target table accepts any schema.
// This allows data sources to customize their own resolution logic using
// custom resolution rules.
m
case _ =>
val newMatchedActions = m.matchedActions.map {
case DeleteAction(deleteCondition) =>
val resolvedDeleteCondition = deleteCondition.map(resolveExpressionTopDown(_, m))
DeleteAction(resolvedDeleteCondition)
case UpdateAction(updateCondition, assignments) =>
val resolvedUpdateCondition = updateCondition.map(resolveExpressionTopDown(_, m))
// The update value can access columns from both target and source tables.
UpdateAction(
resolvedUpdateCondition,
resolveAssignments(assignments, m, resolveValuesWithSourceOnly = false))
case o => o
}
val newNotMatchedActions = m.notMatchedActions.map {
case InsertAction(insertCondition, assignments) =>
// The insert action is used when not matched, so its condition and value can only
// access columns from the source table.
val resolvedInsertCondition =
insertCondition.map(resolveExpressionTopDown(_, Project(Nil, m.sourceTable)))
InsertAction(
resolvedInsertCondition,
resolveAssignments(assignments, m, resolveValuesWithSourceOnly = true))
case o => o
}
val resolvedMergeCondition = resolveExpressionTopDown(m.mergeCondition, m)
m.copy(mergeCondition = resolvedMergeCondition,
matchedActions = newMatchedActions,
notMatchedActions = newNotMatchedActions)
}
case q: LogicalPlan =>
logTrace(s"Attempting to resolve ${q.simpleString(SQLConf.get.maxToStringFields)}")
q.mapExpressions(resolveExpressionTopDown(_, q))
}
def resolveAssignments(
assignments: Seq[Assignment],
mergeInto: MergeIntoTable,
resolveValuesWithSourceOnly: Boolean): Seq[Assignment] = {
if (assignments.isEmpty) {
val expandedColumns = mergeInto.targetTable.output
val expandedValues = mergeInto.sourceTable.output
expandedColumns.zip(expandedValues).map(kv => Assignment(kv._1, kv._2))
} else {
assignments.map { assign =>
val resolvedKey = assign.key match {
case c if !c.resolved =>
resolveExpressionTopDown(c, Project(Nil, mergeInto.targetTable))
case o => o
}
val resolvedValue = assign.value match {
// The update values may contain target and/or source references.
case c if !c.resolved =>
if (resolveValuesWithSourceOnly) {
resolveExpressionTopDown(c, Project(Nil, mergeInto.sourceTable))
} else {
resolveExpressionTopDown(c, mergeInto)
}
case o => o
}
Assignment(resolvedKey, resolvedValue)
}
}
}
def newAliases(expressions: Seq[NamedExpression]): Seq[NamedExpression] = {
expressions.map {
case a: Alias => Alias(a.child, a.name)()
case other => other
}
}
def findAliases(projectList: Seq[NamedExpression]): AttributeSet = {
AttributeSet(projectList.collect { case a: Alias => a.toAttribute })
}
/**
* Build a project list for Project/Aggregate and expand the star if possible
*/
private def buildExpandedProjectList(
exprs: Seq[NamedExpression],
child: LogicalPlan): Seq[NamedExpression] = {
exprs.flatMap {
// Using Dataframe/Dataset API: testData2.groupBy($"a", $"b").agg($"*")
case s: Star => s.expand(child, resolver)
// Using SQL API without running ResolveAlias: SELECT * FROM testData2 group by a, b
case UnresolvedAlias(s: Star, _) => s.expand(child, resolver)
case o if containsStar(o :: Nil) => expandStarExpression(o, child) :: Nil
case o => o :: Nil
}.map(_.asInstanceOf[NamedExpression])
}
/**
* Returns true if `exprs` contains a [[Star]].
*/
def containsStar(exprs: Seq[Expression]): Boolean =
exprs.exists(_.collect { case _: Star => true }.nonEmpty)
/**
* Expands the matching attribute.*'s in `child`'s output.
*/
def expandStarExpression(expr: Expression, child: LogicalPlan): Expression = {
expr.transformUp {
case f1: UnresolvedFunction if containsStar(f1.arguments) =>
f1.copy(arguments = f1.arguments.flatMap {
case s: Star => s.expand(child, resolver)
case o => o :: Nil
})
case c: CreateNamedStruct if containsStar(c.valExprs) =>
val newChildren = c.children.grouped(2).flatMap {
case Seq(k, s : Star) => CreateStruct(s.expand(child, resolver)).children
case kv => kv
}
c.copy(children = newChildren.toList )
case c: CreateArray if containsStar(c.children) =>
c.copy(children = c.children.flatMap {
case s: Star => s.expand(child, resolver)
case o => o :: Nil
})
case p: Murmur3Hash if containsStar(p.children) =>
p.copy(children = p.children.flatMap {
case s: Star => s.expand(child, resolver)
case o => o :: Nil
})
case p: XxHash64 if containsStar(p.children) =>
p.copy(children = p.children.flatMap {
case s: Star => s.expand(child, resolver)
case o => o :: Nil
})
// count(*) has been replaced by count(1)
case o if containsStar(o.children) =>
failAnalysis(s"Invalid usage of '*' in expression '${o.prettyName}'")
}
}
}
private def containsDeserializer(exprs: Seq[Expression]): Boolean = {
exprs.exists(_.find(_.isInstanceOf[UnresolvedDeserializer]).isDefined)
}
/**
* Literal functions do not require the user to specify braces when calling them
* When an attributes is not resolvable, we try to resolve it as a literal function.
*/
private def resolveLiteralFunction(
nameParts: Seq[String],
attribute: UnresolvedAttribute,
plan: LogicalPlan): Option[Expression] = {
if (nameParts.length != 1) return None
val isNamedExpression = plan match {
case Aggregate(_, aggregateExpressions, _) => aggregateExpressions.contains(attribute)
case Project(projectList, _) => projectList.contains(attribute)
case Window(windowExpressions, _, _, _) => windowExpressions.contains(attribute)
case _ => false
}
val wrapper: Expression => Expression =
if (isNamedExpression) f => Alias(f, toPrettySQL(f))() else identity
// support CURRENT_DATE and CURRENT_TIMESTAMP
val literalFunctions = Seq(CurrentDate(), CurrentTimestamp())
val name = nameParts.head
val func = literalFunctions.find(e => caseInsensitiveResolution(e.prettyName, name))
func.map(wrapper)
}
/**
* Resolves the attribute, column value and extract value expressions(s) by traversing the
* input expression in bottom-up manner. In order to resolve the nested complex type fields
* correctly, this function makes use of `throws` parameter to control when to raise an
* AnalysisException.
*
* Example :
* SELECT a.b FROM t ORDER BY b[0].d
*
* In the above example, in b needs to be resolved before d can be resolved. Given we are
* doing a bottom up traversal, it will first attempt to resolve d and fail as b has not
* been resolved yet. If `throws` is false, this function will handle the exception by
* returning the original attribute. In this case `d` will be resolved in subsequent passes
* after `b` is resolved.
*/
protected[sql] def resolveExpressionBottomUp(
expr: Expression,
plan: LogicalPlan,
throws: Boolean = false): Expression = {
if (expr.resolved) return expr
// Resolve expression in one round.
// If throws == false or the desired attribute doesn't exist
// (like try to resolve `a.b` but `a` doesn't exist), fail and return the origin one.
// Else, throw exception.
try {
expr transformUp {
case GetColumnByOrdinal(ordinal, _) => plan.output(ordinal)
case u @ UnresolvedAttribute(nameParts) =>
val result =
withPosition(u) {
plan.resolve(nameParts, resolver)
.orElse(resolveLiteralFunction(nameParts, u, plan))
.getOrElse(u)
}
logDebug(s"Resolving $u to $result")
result
case UnresolvedExtractValue(child, fieldName) if child.resolved =>
ExtractValue(child, fieldName, resolver)
}
} catch {
case a: AnalysisException if !throws => expr
}
}
/**
* In many dialects of SQL it is valid to use ordinal positions in order/sort by and group by
* clauses. This rule is to convert ordinal positions to the corresponding expressions in the
* select list. This support is introduced in Spark 2.0.
*
* - When the sort references or group by expressions are not integer but foldable expressions,
* just ignore them.
* - When spark.sql.orderByOrdinal/spark.sql.groupByOrdinal is set to false, ignore the position
* numbers too.
*
* Before the release of Spark 2.0, the literals in order/sort by and group by clauses
* have no effect on the results.
*/
object ResolveOrdinalInOrderByAndGroupBy extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperatorsUp {
case p if !p.childrenResolved => p
// Replace the index with the related attribute for ORDER BY,
// which is a 1-base position of the projection list.
case Sort(orders, global, child)
if orders.exists(_.child.isInstanceOf[UnresolvedOrdinal]) =>
val newOrders = orders map {
case s @ SortOrder(UnresolvedOrdinal(index), direction, nullOrdering, _) =>
if (index > 0 && index <= child.output.size) {
SortOrder(child.output(index - 1), direction, nullOrdering, Set.empty)
} else {
s.failAnalysis(
s"ORDER BY position $index is not in select list " +
s"(valid range is [1, ${child.output.size}])")
}
case o => o
}
Sort(newOrders, global, child)
// Replace the index with the corresponding expression in aggregateExpressions. The index is
// a 1-base position of aggregateExpressions, which is output columns (select expression)
case Aggregate(groups, aggs, child) if aggs.forall(_.resolved) &&
groups.exists(_.isInstanceOf[UnresolvedOrdinal]) =>
val newGroups = groups.map {
case u @ UnresolvedOrdinal(index) if index > 0 && index <= aggs.size =>
aggs(index - 1)
case ordinal @ UnresolvedOrdinal(index) =>
ordinal.failAnalysis(
s"GROUP BY position $index is not in select list " +
s"(valid range is [1, ${aggs.size}])")
case o => o
}
Aggregate(newGroups, aggs, child)
}
}
/**
* Replace unresolved expressions in grouping keys with resolved ones in SELECT clauses.
* This rule is expected to run after [[ResolveReferences]] applied.
*/
object ResolveAggAliasInGroupBy extends Rule[LogicalPlan] {
// This is a strict check though, we put this to apply the rule only if the expression is not
// resolvable by child.
private def notResolvableByChild(attrName: String, child: LogicalPlan): Boolean = {
!child.output.exists(a => resolver(a.name, attrName))
}
private def mayResolveAttrByAggregateExprs(
exprs: Seq[Expression], aggs: Seq[NamedExpression], child: LogicalPlan): Seq[Expression] = {
exprs.map { _.transform {
case u: UnresolvedAttribute if notResolvableByChild(u.name, child) =>
aggs.find(ne => resolver(ne.name, u.name)).getOrElse(u)
}}
}
override def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperatorsUp {
case agg @ Aggregate(groups, aggs, child)
if conf.groupByAliases && child.resolved && aggs.forall(_.resolved) &&
groups.exists(!_.resolved) =>
agg.copy(groupingExpressions = mayResolveAttrByAggregateExprs(groups, aggs, child))
case gs @ GroupingSets(selectedGroups, groups, child, aggs)
if conf.groupByAliases && child.resolved && aggs.forall(_.resolved) &&
groups.exists(_.isInstanceOf[UnresolvedAttribute]) =>
gs.copy(
selectedGroupByExprs = selectedGroups.map(mayResolveAttrByAggregateExprs(_, aggs, child)),
groupByExprs = mayResolveAttrByAggregateExprs(groups, aggs, child))
}
}
/**
* In many dialects of SQL it is valid to sort by attributes that are not present in the SELECT
* clause. This rule detects such queries and adds the required attributes to the original
* projection, so that they will be available during sorting. Another projection is added to
* remove these attributes after sorting.
*
* The HAVING clause could also used a grouping columns that is not presented in the SELECT.
*/
object ResolveMissingReferences extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperatorsUp {
// Skip sort with aggregate. This will be handled in ResolveAggregateFunctions
case sa @ Sort(_, _, child: Aggregate) => sa
case s @ Sort(order, _, child)
if (!s.resolved || s.missingInput.nonEmpty) && child.resolved =>
val (newOrder, newChild) = resolveExprsAndAddMissingAttrs(order, child)
val ordering = newOrder.map(_.asInstanceOf[SortOrder])
if (child.output == newChild.output) {
s.copy(order = ordering)
} else {
// Add missing attributes and then project them away.
val newSort = s.copy(order = ordering, child = newChild)
Project(child.output, newSort)
}
case f @ Filter(cond, child) if (!f.resolved || f.missingInput.nonEmpty) && child.resolved =>
val (newCond, newChild) = resolveExprsAndAddMissingAttrs(Seq(cond), child)
if (child.output == newChild.output) {
f.copy(condition = newCond.head)
} else {
// Add missing attributes and then project them away.
val newFilter = Filter(newCond.head, newChild)
Project(child.output, newFilter)
}
}
/**
* This method tries to resolve expressions and find missing attributes recursively. Specially,
* when the expressions used in `Sort` or `Filter` contain unresolved attributes or resolved
* attributes which are missed from child output. This method tries to find the missing
* attributes out and add into the projection.
*/
private def resolveExprsAndAddMissingAttrs(
exprs: Seq[Expression], plan: LogicalPlan): (Seq[Expression], LogicalPlan) = {
// Missing attributes can be unresolved attributes or resolved attributes which are not in
// the output attributes of the plan.
if (exprs.forall(e => e.resolved && e.references.subsetOf(plan.outputSet))) {
(exprs, plan)
} else {
plan match {
case p: Project =>
// Resolving expressions against current plan.
val maybeResolvedExprs = exprs.map(resolveExpressionBottomUp(_, p))
// Recursively resolving expressions on the child of current plan.
val (newExprs, newChild) = resolveExprsAndAddMissingAttrs(maybeResolvedExprs, p.child)
// If some attributes used by expressions are resolvable only on the rewritten child
// plan, we need to add them into original projection.
val missingAttrs = (AttributeSet(newExprs) -- p.outputSet).intersect(newChild.outputSet)
(newExprs, Project(p.projectList ++ missingAttrs, newChild))
case a @ Aggregate(groupExprs, aggExprs, child) =>
val maybeResolvedExprs = exprs.map(resolveExpressionBottomUp(_, a))
val (newExprs, newChild) = resolveExprsAndAddMissingAttrs(maybeResolvedExprs, child)
val missingAttrs = (AttributeSet(newExprs) -- a.outputSet).intersect(newChild.outputSet)
if (missingAttrs.forall(attr => groupExprs.exists(_.semanticEquals(attr)))) {
// All the missing attributes are grouping expressions, valid case.
(newExprs, a.copy(aggregateExpressions = aggExprs ++ missingAttrs, child = newChild))
} else {
// Need to add non-grouping attributes, invalid case.
(exprs, a)
}
case g: Generate =>
val maybeResolvedExprs = exprs.map(resolveExpressionBottomUp(_, g))
val (newExprs, newChild) = resolveExprsAndAddMissingAttrs(maybeResolvedExprs, g.child)
(newExprs, g.copy(unrequiredChildIndex = Nil, child = newChild))
// For `Distinct` and `SubqueryAlias`, we can't recursively resolve and add attributes
// via its children.
case u: UnaryNode if !u.isInstanceOf[Distinct] && !u.isInstanceOf[SubqueryAlias] =>
val maybeResolvedExprs = exprs.map(resolveExpressionBottomUp(_, u))
val (newExprs, newChild) = resolveExprsAndAddMissingAttrs(maybeResolvedExprs, u.child)
(newExprs, u.withNewChildren(Seq(newChild)))
// For other operators, we can't recursively resolve and add attributes via its children.
case other =>
(exprs.map(resolveExpressionBottomUp(_, other)), other)
}
}
}
}
/**
* Checks whether a function identifier referenced by an [[UnresolvedFunction]] is defined in the
* function registry. Note that this rule doesn't try to resolve the [[UnresolvedFunction]]. It
* only performs simple existence check according to the function identifier to quickly identify
* undefined functions without triggering relation resolution, which may incur potentially
* expensive partition/schema discovery process in some cases.
* In order to avoid duplicate external functions lookup, the external function identifier will
* store in the local hash set externalFunctionNameSet.
* @see [[ResolveFunctions]]
* @see https://issues.apache.org/jira/browse/SPARK-19737
*/
object LookupFunctions extends Rule[LogicalPlan] {
override def apply(plan: LogicalPlan): LogicalPlan = {
val externalFunctionNameSet = new mutable.HashSet[FunctionIdentifier]()
plan.resolveExpressions {
case f: UnresolvedFunction
if externalFunctionNameSet.contains(normalizeFuncName(f.name)) => f
case f: UnresolvedFunction if v1SessionCatalog.isRegisteredFunction(f.name) => f
case f: UnresolvedFunction if v1SessionCatalog.isPersistentFunction(f.name) =>
externalFunctionNameSet.add(normalizeFuncName(f.name))
f
case f: UnresolvedFunction =>
withPosition(f) {
throw new NoSuchFunctionException(
f.name.database.getOrElse(v1SessionCatalog.getCurrentDatabase),
f.name.funcName)
}
}
}
def normalizeFuncName(name: FunctionIdentifier): FunctionIdentifier = {
val funcName = if (conf.caseSensitiveAnalysis) {
name.funcName
} else {
name.funcName.toLowerCase(Locale.ROOT)
}
val databaseName = name.database match {
case Some(a) => formatDatabaseName(a)
case None => v1SessionCatalog.getCurrentDatabase
}
FunctionIdentifier(funcName, Some(databaseName))
}
protected def formatDatabaseName(name: String): String = {
if (conf.caseSensitiveAnalysis) name else name.toLowerCase(Locale.ROOT)
}
}
/**
* Replaces [[UnresolvedFunction]]s with concrete [[Expression]]s.
*/
object ResolveFunctions extends Rule[LogicalPlan] {
val trimWarningEnabled = new AtomicBoolean(true)
def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperatorsUp {
case q: LogicalPlan =>
q transformExpressions {
case u if !u.childrenResolved => u // Skip until children are resolved.
case u: UnresolvedAttribute if resolver(u.name, VirtualColumn.hiveGroupingIdName) =>
withPosition(u) {
Alias(GroupingID(Nil), VirtualColumn.hiveGroupingIdName)()
}
case u @ UnresolvedGenerator(name, children) =>
withPosition(u) {
v1SessionCatalog.lookupFunction(name, children) match {
case generator: Generator => generator
case other =>
failAnalysis(s"$name is expected to be a generator. However, " +
s"its class is ${other.getClass.getCanonicalName}, which is not a generator.")
}
}
case u @ UnresolvedFunction(funcId, arguments, isDistinct, filter) =>
withPosition(u) {
v1SessionCatalog.lookupFunction(funcId, arguments) match {
// AggregateWindowFunctions are AggregateFunctions that can only be evaluated within
// the context of a Window clause. They do not need to be wrapped in an
// AggregateExpression.
case wf: AggregateWindowFunction =>
if (isDistinct || filter.isDefined) {
failAnalysis("DISTINCT or FILTER specified, " +
s"but ${wf.prettyName} is not an aggregate function")
} else {
wf
}
// We get an aggregate function, we need to wrap it in an AggregateExpression.
case agg: AggregateFunction =>
// TODO: SPARK-30276 Support Filter expression allows simultaneous use of DISTINCT
if (filter.isDefined) {
if (isDistinct) {
failAnalysis("DISTINCT and FILTER cannot be used in aggregate functions " +
"at the same time")
} else if (!filter.get.deterministic) {
failAnalysis("FILTER expression is non-deterministic, " +
"it cannot be used in aggregate functions")
}
}
AggregateExpression(agg, Complete, isDistinct, filter)
// This function is not an aggregate function, just return the resolved one.
case other if (isDistinct || filter.isDefined) =>
failAnalysis("DISTINCT or FILTER specified, " +
s"but ${other.prettyName} is not an aggregate function")
case e: String2TrimExpression if arguments.size == 2 =>
if (trimWarningEnabled.get) {
log.warn("Two-parameter TRIM/LTRIM/RTRIM function signatures are deprecated." +
" Use SQL syntax `TRIM((BOTH | LEADING | TRAILING)? trimStr FROM str)`" +
" instead.")
trimWarningEnabled.set(false)
}
e
case other =>
other
}
}
}
}
}
/**
* This rule resolves and rewrites subqueries inside expressions.
*
* Note: CTEs are handled in CTESubstitution.
*/
object ResolveSubquery extends Rule[LogicalPlan] with PredicateHelper {
/**
* Resolve the correlated expressions in a subquery by using the an outer plans' references. All
* resolved outer references are wrapped in an [[OuterReference]]
*/
private def resolveOuterReferences(plan: LogicalPlan, outer: LogicalPlan): LogicalPlan = {
plan resolveOperatorsDown {
case q: LogicalPlan if q.childrenResolved && !q.resolved =>
q transformExpressions {
case u @ UnresolvedAttribute(nameParts) =>
withPosition(u) {
try {
outer.resolve(nameParts, resolver) match {
case Some(outerAttr) => OuterReference(outerAttr)
case None => u
}
} catch {
case _: AnalysisException => u
}
}
}
}
}
/**
* Resolves the subquery plan that is referenced in a subquery expression. The normal
* attribute references are resolved using regular analyzer and the outer references are
* resolved from the outer plans using the resolveOuterReferences method.
*
* Outer references from the correlated predicates are updated as children of
* Subquery expression.
*/
private def resolveSubQuery(
e: SubqueryExpression,
plans: Seq[LogicalPlan])(
f: (LogicalPlan, Seq[Expression]) => SubqueryExpression): SubqueryExpression = {
// Step 1: Resolve the outer expressions.
var previous: LogicalPlan = null
var current = e.plan
do {
// Try to resolve the subquery plan using the regular analyzer.
previous = current
current = executeSameContext(current)
// Use the outer references to resolve the subquery plan if it isn't resolved yet.
val i = plans.iterator
val afterResolve = current
while (!current.resolved && current.fastEquals(afterResolve) && i.hasNext) {
current = resolveOuterReferences(current, i.next())
}
} while (!current.resolved && !current.fastEquals(previous))
// Step 2: If the subquery plan is fully resolved, pull the outer references and record
// them as children of SubqueryExpression.
if (current.resolved) {
// Record the outer references as children of subquery expression.
f(current, SubExprUtils.getOuterReferences(current))
} else {
e.withNewPlan(current)
}
}
/**
* Resolves the subquery. Apart of resolving the subquery and outer references (if any)
* in the subquery plan, the children of subquery expression are updated to record the
* outer references. This is needed to make sure
* (1) The column(s) referred from the outer query are not pruned from the plan during
* optimization.
* (2) Any aggregate expression(s) that reference outer attributes are pushed down to
* outer plan to get evaluated.
*/
private def resolveSubQueries(plan: LogicalPlan, plans: Seq[LogicalPlan]): LogicalPlan = {
plan transformExpressions {
case s @ ScalarSubquery(sub, _, exprId) if !sub.resolved =>
resolveSubQuery(s, plans)(ScalarSubquery(_, _, exprId))
case e @ Exists(sub, _, exprId) if !sub.resolved =>
resolveSubQuery(e, plans)(Exists(_, _, exprId))
case InSubquery(values, l @ ListQuery(_, _, exprId, _))
if values.forall(_.resolved) && !l.resolved =>
val expr = resolveSubQuery(l, plans)((plan, exprs) => {
ListQuery(plan, exprs, exprId, plan.output)
})
InSubquery(values, expr.asInstanceOf[ListQuery])
}
}
/**
* Resolve and rewrite all subqueries in an operator tree..
*/
def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperatorsUp {
// In case of HAVING (a filter after an aggregate) we use both the aggregate and
// its child for resolution.
case f @ Filter(_, a: Aggregate) if f.childrenResolved =>
resolveSubQueries(f, Seq(a, a.child))
// Only a few unary nodes (Project/Filter/Aggregate) can contain subqueries.
case q: UnaryNode if q.childrenResolved =>
resolveSubQueries(q, q.children)
case j: Join if j.childrenResolved =>
resolveSubQueries(j, Seq(j, j.left, j.right))
case s: SupportsSubquery if s.childrenResolved =>
resolveSubQueries(s, s.children)
}
}
/**
* Replaces unresolved column aliases for a subquery with projections.
*/
object ResolveSubqueryColumnAliases extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperatorsUp {
case u @ UnresolvedSubqueryColumnAliases(columnNames, child) if child.resolved =>
// Resolves output attributes if a query has alias names in its subquery:
// e.g., SELECT * FROM (SELECT 1 AS a, 1 AS b) t(col1, col2)
val outputAttrs = child.output
// Checks if the number of the aliases equals to the number of output columns
// in the subquery.
if (columnNames.size != outputAttrs.size) {
u.failAnalysis("Number of column aliases does not match number of columns. " +
s"Number of column aliases: ${columnNames.size}; " +
s"number of columns: ${outputAttrs.size}.")
}
val aliases = outputAttrs.zip(columnNames).map { case (attr, aliasName) =>
Alias(attr, aliasName)()
}
Project(aliases, child)
}
}
/**
* Turns projections that contain aggregate expressions into aggregations.
*/
object GlobalAggregates extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperators {
case Project(projectList, child) if containsAggregates(projectList) =>
Aggregate(Nil, projectList, child)
}
def containsAggregates(exprs: Seq[Expression]): Boolean = {
// Collect all Windowed Aggregate Expressions.
val windowedAggExprs: Set[Expression] = exprs.flatMap { expr =>
expr.collect {
case WindowExpression(ae: AggregateExpression, _) => ae
case WindowExpression(e: PythonUDF, _) if PythonUDF.isGroupedAggPandasUDF(e) => e
}
}.toSet
// Find the first Aggregate Expression that is not Windowed.
exprs.exists(_.collectFirst {
case ae: AggregateExpression if !windowedAggExprs.contains(ae) => ae
case e: PythonUDF if PythonUDF.isGroupedAggPandasUDF(e) &&
!windowedAggExprs.contains(e) => e
}.isDefined)
}
}
/**
* This rule finds aggregate expressions that are not in an aggregate operator. For example,
* those in a HAVING clause or ORDER BY clause. These expressions are pushed down to the
* underlying aggregate operator and then projected away after the original operator.
*/
object ResolveAggregateFunctions extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperatorsUp {
case f @ Filter(cond, agg @ Aggregate(grouping, originalAggExprs, child)) if agg.resolved =>
// Try resolving the condition of the filter as though it is in the aggregate clause
try {
val aggregatedCondition =
Aggregate(
grouping,
Alias(cond, "havingCondition")() :: Nil,
child)
val resolvedOperator = executeSameContext(aggregatedCondition)
def resolvedAggregateFilter =
resolvedOperator
.asInstanceOf[Aggregate]
.aggregateExpressions.head
// If resolution was successful and we see the filter has an aggregate in it, add it to
// the original aggregate operator.
if (resolvedOperator.resolved) {
// Try to replace all aggregate expressions in the filter by an alias.
val aggregateExpressions = ArrayBuffer.empty[NamedExpression]
val transformedAggregateFilter = resolvedAggregateFilter.transform {
case ae: AggregateExpression =>
val alias = Alias(ae, ae.toString)()
aggregateExpressions += alias
alias.toAttribute
// Grouping functions are handled in the rule [[ResolveGroupingAnalytics]].
case e: Expression if grouping.exists(_.semanticEquals(e)) &&
!ResolveGroupingAnalytics.hasGroupingFunction(e) &&
!agg.output.exists(_.semanticEquals(e)) =>
e match {
case ne: NamedExpression =>
aggregateExpressions += ne
ne.toAttribute
case _ =>
val alias = Alias(e, e.toString)()
aggregateExpressions += alias
alias.toAttribute
}
}
// Push the aggregate expressions into the aggregate (if any).
if (aggregateExpressions.nonEmpty) {
Project(agg.output,
Filter(transformedAggregateFilter,
agg.copy(aggregateExpressions = originalAggExprs ++ aggregateExpressions)))
} else {
f
}
} else {
f
}
} catch {
// Attempting to resolve in the aggregate can result in ambiguity. When this happens,
// just return the original plan.
case ae: AnalysisException => f
}
case sort @ Sort(sortOrder, global, aggregate: Aggregate) if aggregate.resolved =>
// Try resolving the ordering as though it is in the aggregate clause.
try {
// If a sort order is unresolved, containing references not in aggregate, or containing
// `AggregateExpression`, we need to push down it to the underlying aggregate operator.
val unresolvedSortOrders = sortOrder.filter { s =>
!s.resolved || !s.references.subsetOf(aggregate.outputSet) || containsAggregate(s)
}
val aliasedOrdering =
unresolvedSortOrders.map(o => Alias(o.child, "aggOrder")())
val aggregatedOrdering = aggregate.copy(aggregateExpressions = aliasedOrdering)
val resolvedAggregate: Aggregate =
executeSameContext(aggregatedOrdering).asInstanceOf[Aggregate]
val resolvedAliasedOrdering: Seq[Alias] =
resolvedAggregate.aggregateExpressions.asInstanceOf[Seq[Alias]]
// If we pass the analysis check, then the ordering expressions should only reference to
// aggregate expressions or grouping expressions, and it's safe to push them down to
// Aggregate.
checkAnalysis(resolvedAggregate)
val originalAggExprs = aggregate.aggregateExpressions.map(
CleanupAliases.trimNonTopLevelAliases(_).asInstanceOf[NamedExpression])
// If the ordering expression is same with original aggregate expression, we don't need
// to push down this ordering expression and can reference the original aggregate
// expression instead.
val needsPushDown = ArrayBuffer.empty[NamedExpression]
val evaluatedOrderings = resolvedAliasedOrdering.zip(unresolvedSortOrders).map {
case (evaluated, order) =>
val index = originalAggExprs.indexWhere {
case Alias(child, _) => child semanticEquals evaluated.child
case other => other semanticEquals evaluated.child
}
if (index == -1) {
needsPushDown += evaluated
order.copy(child = evaluated.toAttribute)
} else {
order.copy(child = originalAggExprs(index).toAttribute)
}
}
val sortOrdersMap = unresolvedSortOrders
.map(new TreeNodeRef(_))
.zip(evaluatedOrderings)
.toMap
val finalSortOrders = sortOrder.map(s => sortOrdersMap.getOrElse(new TreeNodeRef(s), s))
// Since we don't rely on sort.resolved as the stop condition for this rule,
// we need to check this and prevent applying this rule multiple times
if (sortOrder == finalSortOrders) {
sort
} else {
Project(aggregate.output,
Sort(finalSortOrders, global,
aggregate.copy(aggregateExpressions = originalAggExprs ++ needsPushDown)))
}
} catch {
// Attempting to resolve in the aggregate can result in ambiguity. When this happens,
// just return the original plan.
case ae: AnalysisException => sort
}
}
def containsAggregate(condition: Expression): Boolean = {
condition.find(_.isInstanceOf[AggregateExpression]).isDefined
}
}
/**
* Extracts [[Generator]] from the projectList of a [[Project]] operator and creates [[Generate]]
* operator under [[Project]].
*
* This rule will throw [[AnalysisException]] for following cases:
* 1. [[Generator]] is nested in expressions, e.g. `SELECT explode(list) + 1 FROM tbl`
* 2. more than one [[Generator]] is found in projectList,
* e.g. `SELECT explode(list), explode(list) FROM tbl`
* 3. [[Generator]] is found in other operators that are not [[Project]] or [[Generate]],
* e.g. `SELECT * FROM tbl SORT BY explode(list)`
*/
object ExtractGenerator extends Rule[LogicalPlan] {
private def hasGenerator(expr: Expression): Boolean = {
expr.find(_.isInstanceOf[Generator]).isDefined
}
private def hasNestedGenerator(expr: NamedExpression): Boolean = {
def hasInnerGenerator(g: Generator): Boolean = g match {
// Since `GeneratorOuter` is just a wrapper of generators, we skip it here
case go: GeneratorOuter =>
hasInnerGenerator(go.child)
case _ =>
g.children.exists { _.find {
case _: Generator => true
case _ => false
}.isDefined }
}
CleanupAliases.trimNonTopLevelAliases(expr) match {
case UnresolvedAlias(g: Generator, _) => hasInnerGenerator(g)
case Alias(g: Generator, _) => hasInnerGenerator(g)
case MultiAlias(g: Generator, _) => hasInnerGenerator(g)
case other => hasGenerator(other)
}
}
private def hasAggFunctionInGenerator(ne: Seq[NamedExpression]): Boolean = {
ne.exists(_.find {
case g: Generator =>
g.children.exists(_.find(_.isInstanceOf[AggregateFunction]).isDefined)
case _ =>
false
}.nonEmpty)
}
private def trimAlias(expr: NamedExpression): Expression = expr match {
case UnresolvedAlias(child, _) => child
case Alias(child, _) => child
case MultiAlias(child, _) => child
case _ => expr
}
private object AliasedGenerator {
/**
* Extracts a [[Generator]] expression, any names assigned by aliases to the outputs
* and the outer flag. The outer flag is used when joining the generator output.
* @param e the [[Expression]]
* @return (the [[Generator]], seq of output names, outer flag)
*/
def unapply(e: Expression): Option[(Generator, Seq[String], Boolean)] = e match {
case Alias(GeneratorOuter(g: Generator), name) if g.resolved => Some((g, name :: Nil, true))
case MultiAlias(GeneratorOuter(g: Generator), names) if g.resolved => Some((g, names, true))
case Alias(g: Generator, name) if g.resolved => Some((g, name :: Nil, false))
case MultiAlias(g: Generator, names) if g.resolved => Some((g, names, false))
case _ => None
}
}
def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperatorsUp {
case Project(projectList, _) if projectList.exists(hasNestedGenerator) =>
val nestedGenerator = projectList.find(hasNestedGenerator).get
throw new AnalysisException("Generators are not supported when it's nested in " +
"expressions, but got: " + toPrettySQL(trimAlias(nestedGenerator)))
case Project(projectList, _) if projectList.count(hasGenerator) > 1 =>
val generators = projectList.filter(hasGenerator).map(trimAlias)
throw new AnalysisException("Only one generator allowed per select clause but found " +
generators.size + ": " + generators.map(toPrettySQL).mkString(", "))
case Aggregate(_, aggList, _) if aggList.exists(hasNestedGenerator) =>
val nestedGenerator = aggList.find(hasNestedGenerator).get
throw new AnalysisException("Generators are not supported when it's nested in " +
"expressions, but got: " + toPrettySQL(trimAlias(nestedGenerator)))
case Aggregate(_, aggList, _) if aggList.count(hasGenerator) > 1 =>
val generators = aggList.filter(hasGenerator).map(trimAlias)
throw new AnalysisException("Only one generator allowed per aggregate clause but found " +
generators.size + ": " + generators.map(toPrettySQL).mkString(", "))
case agg @ Aggregate(groupList, aggList, child) if aggList.forall {
case AliasedGenerator(_, _, _) => true
case other => other.resolved
} && aggList.exists(hasGenerator) =>
// If generator in the aggregate list was visited, set the boolean flag true.
var generatorVisited = false
val projectExprs = Array.ofDim[NamedExpression](aggList.length)
val newAggList = aggList
.map(CleanupAliases.trimNonTopLevelAliases(_).asInstanceOf[NamedExpression])
.zipWithIndex
.flatMap {
case (AliasedGenerator(generator, names, outer), idx) =>
// It's a sanity check, this should not happen as the previous case will throw
// exception earlier.
assert(!generatorVisited, "More than one generator found in aggregate.")
generatorVisited = true
val newGenChildren: Seq[Expression] = generator.children.zipWithIndex.map {
case (e, idx) => if (e.foldable) e else Alias(e, s"_gen_input_${idx}")()
}
val newGenerator = {
val g = generator.withNewChildren(newGenChildren.map { e =>
if (e.foldable) e else e.asInstanceOf[Alias].toAttribute
}).asInstanceOf[Generator]
if (outer) GeneratorOuter(g) else g
}
val newAliasedGenerator = if (names.length == 1) {
Alias(newGenerator, names(0))()
} else {
MultiAlias(newGenerator, names)
}
projectExprs(idx) = newAliasedGenerator
newGenChildren.filter(!_.foldable).asInstanceOf[Seq[NamedExpression]]
case (other, idx) =>
projectExprs(idx) = other.toAttribute
other :: Nil
}
val newAgg = Aggregate(groupList, newAggList, child)
Project(projectExprs.toList, newAgg)
case p @ Project(projectList, _) if hasAggFunctionInGenerator(projectList) =>
// If a generator has any aggregate function, we need to apply the `GlobalAggregates` rule
// first for replacing `Project` with `Aggregate`.
p
case p @ Project(projectList, child) =>
// Holds the resolved generator, if one exists in the project list.
var resolvedGenerator: Generate = null
val newProjectList = projectList
.map(CleanupAliases.trimNonTopLevelAliases(_).asInstanceOf[NamedExpression])
.flatMap {
case AliasedGenerator(generator, names, outer) if generator.childrenResolved =>
// It's a sanity check, this should not happen as the previous case will throw
// exception earlier.
assert(resolvedGenerator == null, "More than one generator found in SELECT.")
resolvedGenerator =
Generate(
generator,
unrequiredChildIndex = Nil,
outer = outer,
qualifier = None,
generatorOutput = ResolveGenerate.makeGeneratorOutput(generator, names),
child)
resolvedGenerator.generatorOutput
case other => other :: Nil
}
if (resolvedGenerator != null) {
Project(newProjectList, resolvedGenerator)
} else {
p
}
case g: Generate => g
case p if p.expressions.exists(hasGenerator) =>
throw new AnalysisException("Generators are not supported outside the SELECT clause, but " +
"got: " + p.simpleString(SQLConf.get.maxToStringFields))
}
}
/**
* Rewrites table generating expressions that either need one or more of the following in order
* to be resolved:
* - concrete attribute references for their output.
* - to be relocated from a SELECT clause (i.e. from a [[Project]]) into a [[Generate]]).
*
* Names for the output [[Attribute]]s are extracted from [[Alias]] or [[MultiAlias]] expressions
* that wrap the [[Generator]].
*/
object ResolveGenerate extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperatorsUp {
case g: Generate if !g.child.resolved || !g.generator.resolved => g
case g: Generate if !g.resolved =>
g.copy(generatorOutput = makeGeneratorOutput(g.generator, g.generatorOutput.map(_.name)))
}
/**
* Construct the output attributes for a [[Generator]], given a list of names. If the list of
* names is empty names are assigned from field names in generator.
*/
private[analysis] def makeGeneratorOutput(
generator: Generator,
names: Seq[String]): Seq[Attribute] = {
val elementAttrs = generator.elementSchema.toAttributes
if (names.length == elementAttrs.length) {
names.zip(elementAttrs).map {
case (name, attr) => attr.withName(name)
}
} else if (names.isEmpty) {
elementAttrs
} else {
failAnalysis(
"The number of aliases supplied in the AS clause does not match the number of columns " +
s"output by the UDTF expected ${elementAttrs.size} aliases but got " +
s"${names.mkString(",")} ")
}
}
}
/**
* Extracts [[WindowExpression]]s from the projectList of a [[Project]] operator and
* aggregateExpressions of an [[Aggregate]] operator and creates individual [[Window]]
* operators for every distinct [[WindowSpecDefinition]].
*
* This rule handles three cases:
* - A [[Project]] having [[WindowExpression]]s in its projectList;
* - An [[Aggregate]] having [[WindowExpression]]s in its aggregateExpressions.
* - A [[Filter]]->[[Aggregate]] pattern representing GROUP BY with a HAVING
* clause and the [[Aggregate]] has [[WindowExpression]]s in its aggregateExpressions.
* Note: If there is a GROUP BY clause in the query, aggregations and corresponding
* filters (expressions in the HAVING clause) should be evaluated before any
* [[WindowExpression]]. If a query has SELECT DISTINCT, the DISTINCT part should be
* evaluated after all [[WindowExpression]]s.
*
* For every case, the transformation works as follows:
* 1. For a list of [[Expression]]s (a projectList or an aggregateExpressions), partitions
* it two lists of [[Expression]]s, one for all [[WindowExpression]]s and another for
* all regular expressions.
* 2. For all [[WindowExpression]]s, groups them based on their [[WindowSpecDefinition]]s
* and [[WindowFunctionType]]s.
* 3. For every distinct [[WindowSpecDefinition]] and [[WindowFunctionType]], creates a
* [[Window]] operator and inserts it into the plan tree.
*/
object ExtractWindowExpressions extends Rule[LogicalPlan] {
private def hasWindowFunction(exprs: Seq[Expression]): Boolean =
exprs.exists(hasWindowFunction)
private def hasWindowFunction(expr: Expression): Boolean = {
expr.find {
case window: WindowExpression => true
case _ => false
}.isDefined
}
/**
* From a Seq of [[NamedExpression]]s, extract expressions containing window expressions and
* other regular expressions that do not contain any window expression. For example, for
* `col1, Sum(col2 + col3) OVER (PARTITION BY col4 ORDER BY col5)`, we will extract
* `col1`, `col2 + col3`, `col4`, and `col5` out and replace their appearances in
* the window expression as attribute references. So, the first returned value will be
* `[Sum(_w0) OVER (PARTITION BY _w1 ORDER BY _w2)]` and the second returned value will be
* [col1, col2 + col3 as _w0, col4 as _w1, col5 as _w2].
*
* @return (seq of expressions containing at least one window expression,
* seq of non-window expressions)
*/
private def extract(
expressions: Seq[NamedExpression]): (Seq[NamedExpression], Seq[NamedExpression]) = {
// First, we partition the input expressions to two part. For the first part,
// every expression in it contain at least one WindowExpression.
// Expressions in the second part do not have any WindowExpression.
val (expressionsWithWindowFunctions, regularExpressions) =
expressions.partition(hasWindowFunction)
// Then, we need to extract those regular expressions used in the WindowExpression.
// For example, when we have col1 - Sum(col2 + col3) OVER (PARTITION BY col4 ORDER BY col5),
// we need to make sure that col1 to col5 are all projected from the child of the Window
// operator.
val extractedExprBuffer = new ArrayBuffer[NamedExpression]()
def extractExpr(expr: Expression): Expression = expr match {
case ne: NamedExpression =>
// If a named expression is not in regularExpressions, add it to
// extractedExprBuffer and replace it with an AttributeReference.
val missingExpr =
AttributeSet(Seq(expr)) -- (regularExpressions ++ extractedExprBuffer)
if (missingExpr.nonEmpty) {
extractedExprBuffer += ne
}
// alias will be cleaned in the rule CleanupAliases
ne
case e: Expression if e.foldable =>
e // No need to create an attribute reference if it will be evaluated as a Literal.
case e: Expression =>
// For other expressions, we extract it and replace it with an AttributeReference (with
// an internal column name, e.g. "_w0").
val withName = Alias(e, s"_w${extractedExprBuffer.length}")()
extractedExprBuffer += withName
withName.toAttribute
}
// Now, we extract regular expressions from expressionsWithWindowFunctions
// by using extractExpr.
val seenWindowAggregates = new ArrayBuffer[AggregateExpression]
val newExpressionsWithWindowFunctions = expressionsWithWindowFunctions.map {
_.transform {
// Extracts children expressions of a WindowFunction (input parameters of
// a WindowFunction).
case wf: WindowFunction =>
val newChildren = wf.children.map(extractExpr)
wf.withNewChildren(newChildren)
// Extracts expressions from the partition spec and order spec.
case wsc @ WindowSpecDefinition(partitionSpec, orderSpec, _) =>
val newPartitionSpec = partitionSpec.map(extractExpr)
val newOrderSpec = orderSpec.map { so =>
val newChild = extractExpr(so.child)
so.copy(child = newChild)
}
wsc.copy(partitionSpec = newPartitionSpec, orderSpec = newOrderSpec)
case WindowExpression(ae: AggregateExpression, _) if ae.filter.isDefined =>
failAnalysis(
"window aggregate function with filter predicate is not supported yet.")
// Extract Windowed AggregateExpression
case we @ WindowExpression(
ae @ AggregateExpression(function, _, _, _, _),
spec: WindowSpecDefinition) =>
val newChildren = function.children.map(extractExpr)
val newFunction = function.withNewChildren(newChildren).asInstanceOf[AggregateFunction]
val newAgg = ae.copy(aggregateFunction = newFunction)
seenWindowAggregates += newAgg
WindowExpression(newAgg, spec)
case AggregateExpression(aggFunc, _, _, _, _) if hasWindowFunction(aggFunc.children) =>
failAnalysis("It is not allowed to use a window function inside an aggregate " +
"function. Please use the inner window function in a sub-query.")
// Extracts AggregateExpression. For example, for SUM(x) - Sum(y) OVER (...),
// we need to extract SUM(x).
case agg: AggregateExpression if !seenWindowAggregates.contains(agg) =>
val withName = Alias(agg, s"_w${extractedExprBuffer.length}")()
extractedExprBuffer += withName
withName.toAttribute
// Extracts other attributes
case attr: Attribute => extractExpr(attr)
}.asInstanceOf[NamedExpression]
}
(newExpressionsWithWindowFunctions, regularExpressions ++ extractedExprBuffer)
} // end of extract
/**
* Adds operators for Window Expressions. Every Window operator handles a single Window Spec.
*/
private def addWindow(
expressionsWithWindowFunctions: Seq[NamedExpression],
child: LogicalPlan): LogicalPlan = {
// First, we need to extract all WindowExpressions from expressionsWithWindowFunctions
// and put those extracted WindowExpressions to extractedWindowExprBuffer.
// This step is needed because it is possible that an expression contains multiple
// WindowExpressions with different Window Specs.
// After extracting WindowExpressions, we need to construct a project list to generate
// expressionsWithWindowFunctions based on extractedWindowExprBuffer.
// For example, for "sum(a) over (...) / sum(b) over (...)", we will first extract
// "sum(a) over (...)" and "sum(b) over (...)" out, and assign "_we0" as the alias to
// "sum(a) over (...)" and "_we1" as the alias to "sum(b) over (...)".
// Then, the projectList will be [_we0/_we1].
val extractedWindowExprBuffer = new ArrayBuffer[NamedExpression]()
val newExpressionsWithWindowFunctions = expressionsWithWindowFunctions.map {
// We need to use transformDown because we want to trigger
// "case alias @ Alias(window: WindowExpression, _)" first.
_.transformDown {
case alias @ Alias(window: WindowExpression, _) =>
// If a WindowExpression has an assigned alias, just use it.
extractedWindowExprBuffer += alias
alias.toAttribute
case window: WindowExpression =>
// If there is no alias assigned to the WindowExpressions. We create an
// internal column.
val withName = Alias(window, s"_we${extractedWindowExprBuffer.length}")()
extractedWindowExprBuffer += withName
withName.toAttribute
}.asInstanceOf[NamedExpression]
}
// Second, we group extractedWindowExprBuffer based on their Partition and Order Specs.
val groupedWindowExpressions = extractedWindowExprBuffer.groupBy { expr =>
val distinctWindowSpec = expr.collect {
case window: WindowExpression => window.windowSpec
}.distinct
// We do a final check and see if we only have a single Window Spec defined in an
// expressions.
if (distinctWindowSpec.isEmpty) {
failAnalysis(s"$expr does not have any WindowExpression.")
} else if (distinctWindowSpec.length > 1) {
// newExpressionsWithWindowFunctions only have expressions with a single
// WindowExpression. If we reach here, we have a bug.
failAnalysis(s"$expr has multiple Window Specifications ($distinctWindowSpec)." +
s"Please file a bug report with this error message, stack trace, and the query.")
} else {
val spec = distinctWindowSpec.head
(spec.partitionSpec, spec.orderSpec, WindowFunctionType.functionType(expr))
}
}.toSeq
// Third, we aggregate them by adding each Window operator for each Window Spec and then
// setting this to the child of the next Window operator.
val windowOps =
groupedWindowExpressions.foldLeft(child) {
case (last, ((partitionSpec, orderSpec, _), windowExpressions)) =>
Window(windowExpressions, partitionSpec, orderSpec, last)
}
// Finally, we create a Project to output windowOps's output
// newExpressionsWithWindowFunctions.
Project(windowOps.output ++ newExpressionsWithWindowFunctions, windowOps)
} // end of addWindow
// We have to use transformDown at here to make sure the rule of
// "Aggregate with Having clause" will be triggered.
def apply(plan: LogicalPlan): LogicalPlan = plan resolveOperatorsDown {
case Filter(condition, _) if hasWindowFunction(condition) =>
failAnalysis("It is not allowed to use window functions inside WHERE and HAVING clauses")
// Aggregate with Having clause. This rule works with an unresolved Aggregate because
// a resolved Aggregate will not have Window Functions.
case f @ Filter(condition, a @ Aggregate(groupingExprs, aggregateExprs, child))
if child.resolved &&
hasWindowFunction(aggregateExprs) &&
a.expressions.forall(_.resolved) =>
val (windowExpressions, aggregateExpressions) = extract(aggregateExprs)
// Create an Aggregate operator to evaluate aggregation functions.
val withAggregate = Aggregate(groupingExprs, aggregateExpressions, child)
// Add a Filter operator for conditions in the Having clause.
val withFilter = Filter(condition, withAggregate)
val withWindow = addWindow(windowExpressions, withFilter)
// Finally, generate output columns according to the original projectList.
val finalProjectList = aggregateExprs.map(_.toAttribute)
Project(finalProjectList, withWindow)
case p: LogicalPlan if !p.childrenResolved => p
// Aggregate without Having clause.
case a @ Aggregate(groupingExprs, aggregateExprs, child)
if hasWindowFunction(aggregateExprs) &&
a.expressions.forall(_.resolved) =>
val (windowExpressions, aggregateExpressions) = extract(aggregateExprs)
// Create an Aggregate operator to evaluate aggregation functions.
val withAggregate = Aggregate(groupingExprs, aggregateExpressions, child)
// Add Window operators.
val withWindow = addWindow(windowExpressions, withAggregate)
// Finally, generate output columns according to the original projectList.
val finalProjectList = aggregateExprs.map(_.toAttribute)
Project(finalProjectList, withWindow)
// We only extract Window Expressions after all expressions of the Project
// have been resolved.
case p @ Project(projectList, child)
if hasWindowFunction(projectList) && !p.expressions.exists(!_.resolved) =>
val (windowExpressions, regularExpressions) = extract(projectList)
// We add a project to get all needed expressions for window expressions from the child
// of the original Project operator.
val withProject = Project(regularExpressions, child)
// Add Window operators.
val withWindow = addWindow(windowExpressions, withProject)
// Finally, generate output columns according to the original projectList.
val finalProjectList = projectList.map(_.toAttribute)
Project(finalProjectList, withWindow)
}
}
/**
* Pulls out nondeterministic expressions from LogicalPlan which is not Project or Filter,
* put them into an inner Project and finally project them away at the outer Project.
*/
object PullOutNondeterministic extends Rule[LogicalPlan] {
override def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperatorsUp {
case p if !p.resolved => p // Skip unresolved nodes.
case p: Project => p
case f: Filter => f
case a: Aggregate if a.groupingExpressions.exists(!_.deterministic) =>
val nondeterToAttr = getNondeterToAttr(a.groupingExpressions)
val newChild = Project(a.child.output ++ nondeterToAttr.values, a.child)
a.transformExpressions { case e =>
nondeterToAttr.get(e).map(_.toAttribute).getOrElse(e)
}.copy(child = newChild)
// Don't touch collect metrics. Top-level metrics are not supported (check analysis will fail)
// and we want to retain them inside the aggregate functions.
case m: CollectMetrics => m
// todo: It's hard to write a general rule to pull out nondeterministic expressions
// from LogicalPlan, currently we only do it for UnaryNode which has same output
// schema with its child.
case p: UnaryNode if p.output == p.child.output && p.expressions.exists(!_.deterministic) =>
val nondeterToAttr = getNondeterToAttr(p.expressions)
val newPlan = p.transformExpressions { case e =>
nondeterToAttr.get(e).map(_.toAttribute).getOrElse(e)
}
val newChild = Project(p.child.output ++ nondeterToAttr.values, p.child)
Project(p.output, newPlan.withNewChildren(newChild :: Nil))
}
private def getNondeterToAttr(exprs: Seq[Expression]): Map[Expression, NamedExpression] = {
exprs.filterNot(_.deterministic).flatMap { expr =>
val leafNondeterministic = expr.collect { case n: Nondeterministic => n }
leafNondeterministic.distinct.map { e =>
val ne = e match {
case n: NamedExpression => n
case _ => Alias(e, "_nondeterministic")()
}
e -> ne
}
}.toMap
}
}
/**
* Set the seed for random number generation.
*/
object ResolveRandomSeed extends Rule[LogicalPlan] {
private lazy val random = new Random()
override def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperatorsUp {
case p if p.resolved => p
case p => p transformExpressionsUp {
case Uuid(None) => Uuid(Some(random.nextLong()))
case Shuffle(child, None) => Shuffle(child, Some(random.nextLong()))
}
}
}
/**
* Correctly handle null primitive inputs for UDF by adding extra [[If]] expression to do the
* null check. When user defines a UDF with primitive parameters, there is no way to tell if the
* primitive parameter is null or not, so here we assume the primitive input is null-propagatable
* and we should return null if the input is null.
*/
object HandleNullInputsForUDF extends Rule[LogicalPlan] {
override def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperatorsUp {
case p if !p.resolved => p // Skip unresolved nodes.
case p => p transformExpressionsUp {
case udf @ ScalaUDF(_, _, inputs, _, _, _, _)
if udf.inputPrimitives.contains(true) =>
// Otherwise, add special handling of null for fields that can't accept null.
// The result of operations like this, when passed null, is generally to return null.
assert(udf.inputPrimitives.length == inputs.length)
val inputPrimitivesPair = udf.inputPrimitives.zip(inputs)
val inputNullCheck = inputPrimitivesPair.collect {
case (isPrimitive, input) if isPrimitive && input.nullable =>
IsNull(input)
}.reduceLeftOption[Expression](Or)
if (inputNullCheck.isDefined) {
// Once we add an `If` check above the udf, it is safe to mark those checked inputs
// as null-safe (i.e., wrap with `KnownNotNull`), because the null-returning
// branch of `If` will be called if any of these checked inputs is null. Thus we can
// prevent this rule from being applied repeatedly.
val newInputs = inputPrimitivesPair.map {
case (isPrimitive, input) =>
if (isPrimitive && input.nullable) {
KnownNotNull(input)
} else {
input
}
}
val newUDF = udf.copy(children = newInputs)
If(inputNullCheck.get, Literal.create(null, udf.dataType), newUDF)
} else {
udf
}
}
}
}
/**
* Check and add proper window frames for all window functions.
*/
object ResolveWindowFrame extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan resolveExpressions {
case WindowExpression(wf: WindowFunction, WindowSpecDefinition(_, _, f: SpecifiedWindowFrame))
if wf.frame != UnspecifiedFrame && wf.frame != f =>
failAnalysis(s"Window Frame $f must match the required frame ${wf.frame}")
case WindowExpression(wf: WindowFunction, s @ WindowSpecDefinition(_, _, UnspecifiedFrame))
if wf.frame != UnspecifiedFrame =>
WindowExpression(wf, s.copy(frameSpecification = wf.frame))
case we @ WindowExpression(e, s @ WindowSpecDefinition(_, o, UnspecifiedFrame))
if e.resolved =>
val frame = if (o.nonEmpty) {
SpecifiedWindowFrame(RangeFrame, UnboundedPreceding, CurrentRow)
} else {
SpecifiedWindowFrame(RowFrame, UnboundedPreceding, UnboundedFollowing)
}
we.copy(windowSpec = s.copy(frameSpecification = frame))
}
}
/**
* Check and add order to [[AggregateWindowFunction]]s.
*/
object ResolveWindowOrder extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan resolveExpressions {
case WindowExpression(wf: WindowFunction, spec) if spec.orderSpec.isEmpty =>
failAnalysis(s"Window function $wf requires window to be ordered, please add ORDER BY " +
s"clause. For example SELECT $wf(value_expr) OVER (PARTITION BY window_partition " +
s"ORDER BY window_ordering) from table")
case WindowExpression(rank: RankLike, spec) if spec.resolved =>
val order = spec.orderSpec.map(_.child)
WindowExpression(rank.withOrder(order), spec)
}
}
/**
* Removes natural or using joins by calculating output columns based on output from two sides,
* Then apply a Project on a normal Join to eliminate natural or using join.
*/
object ResolveNaturalAndUsingJoin extends Rule[LogicalPlan] {
override def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperatorsUp {
case j @ Join(left, right, UsingJoin(joinType, usingCols), _, hint)
if left.resolved && right.resolved && j.duplicateResolved =>
commonNaturalJoinProcessing(left, right, joinType, usingCols, None, hint)
case j @ Join(left, right, NaturalJoin(joinType), condition, hint)
if j.resolvedExceptNatural =>
// find common column names from both sides
val joinNames = left.output.map(_.name).intersect(right.output.map(_.name))
commonNaturalJoinProcessing(left, right, joinType, joinNames, condition, hint)
}
}
/**
* Resolves columns of an output table from the data in a logical plan. This rule will:
*
* - Reorder columns when the write is by name
* - Insert casts when data types do not match
* - Insert aliases when column names do not match
* - Detect plans that are not compatible with the output table and throw AnalysisException
*/
object ResolveOutputRelation extends Rule[LogicalPlan] {
override def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperators {
case append @ AppendData(table, query, _, isByName)
if table.resolved && query.resolved && !append.outputResolved =>
validateStoreAssignmentPolicy()
val projection =
TableOutputResolver.resolveOutputColumns(table.name, table.output, query, isByName, conf)
if (projection != query) {
append.copy(query = projection)
} else {
append
}
case overwrite @ OverwriteByExpression(table, _, query, _, isByName)
if table.resolved && query.resolved && !overwrite.outputResolved =>
validateStoreAssignmentPolicy()
val projection =
TableOutputResolver.resolveOutputColumns(table.name, table.output, query, isByName, conf)
if (projection != query) {
overwrite.copy(query = projection)
} else {
overwrite
}
case overwrite @ OverwritePartitionsDynamic(table, query, _, isByName)
if table.resolved && query.resolved && !overwrite.outputResolved =>
validateStoreAssignmentPolicy()
val projection =
TableOutputResolver.resolveOutputColumns(table.name, table.output, query, isByName, conf)
if (projection != query) {
overwrite.copy(query = projection)
} else {
overwrite
}
}
}
private def validateStoreAssignmentPolicy(): Unit = {
// SPARK-28730: LEGACY store assignment policy is disallowed in data source v2.
if (conf.storeAssignmentPolicy == StoreAssignmentPolicy.LEGACY) {
val configKey = SQLConf.STORE_ASSIGNMENT_POLICY.key
throw new AnalysisException(s"""
|"LEGACY" store assignment policy is disallowed in Spark data source V2.
|Please set the configuration $configKey to other values.""".stripMargin)
}
}
private def commonNaturalJoinProcessing(
left: LogicalPlan,
right: LogicalPlan,
joinType: JoinType,
joinNames: Seq[String],
condition: Option[Expression],
hint: JoinHint) = {
val leftKeys = joinNames.map { keyName =>
left.output.find(attr => resolver(attr.name, keyName)).getOrElse {
throw new AnalysisException(s"USING column `$keyName` cannot be resolved on the left " +
s"side of the join. The left-side columns: [${left.output.map(_.name).mkString(", ")}]")
}
}
val rightKeys = joinNames.map { keyName =>
right.output.find(attr => resolver(attr.name, keyName)).getOrElse {
throw new AnalysisException(s"USING column `$keyName` cannot be resolved on the right " +
s"side of the join. The right-side columns: [${right.output.map(_.name).mkString(", ")}]")
}
}
val joinPairs = leftKeys.zip(rightKeys)
val newCondition = (condition ++ joinPairs.map(EqualTo.tupled)).reduceOption(And)
// columns not in joinPairs
val lUniqueOutput = left.output.filterNot(att => leftKeys.contains(att))
val rUniqueOutput = right.output.filterNot(att => rightKeys.contains(att))
// the output list looks like: join keys, columns from left, columns from right
val projectList = joinType match {
case LeftOuter =>
leftKeys ++ lUniqueOutput ++ rUniqueOutput.map(_.withNullability(true))
case LeftExistence(_) =>
leftKeys ++ lUniqueOutput
case RightOuter =>
rightKeys ++ lUniqueOutput.map(_.withNullability(true)) ++ rUniqueOutput
case FullOuter =>
// in full outer join, joinCols should be non-null if there is.
val joinedCols = joinPairs.map { case (l, r) => Alias(Coalesce(Seq(l, r)), l.name)() }
joinedCols ++
lUniqueOutput.map(_.withNullability(true)) ++
rUniqueOutput.map(_.withNullability(true))
case _ : InnerLike =>
leftKeys ++ lUniqueOutput ++ rUniqueOutput
case _ =>
sys.error("Unsupported natural join type " + joinType)
}
// use Project to trim unnecessary fields
Project(projectList, Join(left, right, joinType, newCondition, hint))
}
/**
* Replaces [[UnresolvedDeserializer]] with the deserialization expression that has been resolved
* to the given input attributes.
*/
object ResolveDeserializer extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperatorsUp {
case p if !p.childrenResolved => p
case p if p.resolved => p
case p => p transformExpressions {
case UnresolvedDeserializer(deserializer, inputAttributes) =>
val inputs = if (inputAttributes.isEmpty) {
p.children.flatMap(_.output)
} else {
inputAttributes
}
validateTopLevelTupleFields(deserializer, inputs)
val resolved = resolveExpressionBottomUp(
deserializer, LocalRelation(inputs), throws = true)
val result = resolved transformDown {
case UnresolvedMapObjects(func, inputData, cls) if inputData.resolved =>
inputData.dataType match {
case ArrayType(et, cn) =>
MapObjects(func, inputData, et, cn, cls) transformUp {
case UnresolvedExtractValue(child, fieldName) if child.resolved =>
ExtractValue(child, fieldName, resolver)
}
case other =>
throw new AnalysisException("need an array field but got " + other.catalogString)
}
case u: UnresolvedCatalystToExternalMap if u.child.resolved =>
u.child.dataType match {
case _: MapType =>
CatalystToExternalMap(u) transformUp {
case UnresolvedExtractValue(child, fieldName) if child.resolved =>
ExtractValue(child, fieldName, resolver)
}
case other =>
throw new AnalysisException("need a map field but got " + other.catalogString)
}
}
validateNestedTupleFields(result)
result
}
}
private def fail(schema: StructType, maxOrdinal: Int): Unit = {
throw new AnalysisException(s"Try to map ${schema.catalogString} to Tuple${maxOrdinal + 1}" +
", but failed as the number of fields does not line up.")
}
/**
* For each top-level Tuple field, we use [[GetColumnByOrdinal]] to get its corresponding column
* by position. However, the actual number of columns may be different from the number of Tuple
* fields. This method is used to check the number of columns and fields, and throw an
* exception if they do not match.
*/
private def validateTopLevelTupleFields(
deserializer: Expression, inputs: Seq[Attribute]): Unit = {
val ordinals = deserializer.collect {
case GetColumnByOrdinal(ordinal, _) => ordinal
}.distinct.sorted
if (ordinals.nonEmpty && ordinals != inputs.indices) {
fail(inputs.toStructType, ordinals.last)
}
}
/**
* For each nested Tuple field, we use [[GetStructField]] to get its corresponding struct field
* by position. However, the actual number of struct fields may be different from the number
* of nested Tuple fields. This method is used to check the number of struct fields and nested
* Tuple fields, and throw an exception if they do not match.
*/
private def validateNestedTupleFields(deserializer: Expression): Unit = {
val structChildToOrdinals = deserializer
// There are 2 kinds of `GetStructField`:
// 1. resolved from `UnresolvedExtractValue`, and it will have a `name` property.
// 2. created when we build deserializer expression for nested tuple, no `name` property.
// Here we want to validate the ordinals of nested tuple, so we should only catch
// `GetStructField` without the name property.
.collect { case g: GetStructField if g.name.isEmpty => g }
.groupBy(_.child)
.mapValues(_.map(_.ordinal).distinct.sorted)
structChildToOrdinals.foreach { case (expr, ordinals) =>
val schema = expr.dataType.asInstanceOf[StructType]
if (ordinals != schema.indices) {
fail(schema, ordinals.last)
}
}
}
}
/**
* Resolves [[NewInstance]] by finding and adding the outer scope to it if the object being
* constructed is an inner class.
*/
object ResolveNewInstance extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperatorsUp {
case p if !p.childrenResolved => p
case p if p.resolved => p
case p => p transformExpressions {
case n: NewInstance if n.childrenResolved && !n.resolved =>
val outer = OuterScopes.getOuterScope(n.cls)
if (outer == null) {
throw new AnalysisException(
s"Unable to generate an encoder for inner class `${n.cls.getName}` without " +
"access to the scope that this class was defined in.\\n" +
"Try moving this class out of its parent class.")
}
n.copy(outerPointer = Some(outer))
}
}
}
/**
* Replace the [[UpCast]] expression by [[Cast]], and throw exceptions if the cast may truncate.
*/
object ResolveUpCast extends Rule[LogicalPlan] {
private def fail(from: Expression, to: DataType, walkedTypePath: Seq[String]) = {
val fromStr = from match {
case l: LambdaVariable => "array element"
case e => e.sql
}
throw new AnalysisException(s"Cannot up cast $fromStr from " +
s"${from.dataType.catalogString} to ${to.catalogString}.\\n" +
"The type path of the target object is:\\n" + walkedTypePath.mkString("", "\\n", "\\n") +
"You can either add an explicit cast to the input data or choose a higher precision " +
"type of the field in the target object")
}
def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperatorsUp {
case p if !p.childrenResolved => p
case p if p.resolved => p
case p => p transformExpressions {
case u @ UpCast(child, _, _) if !child.resolved => u
case UpCast(child, dt: AtomicType, _)
if SQLConf.get.getConf(SQLConf.LEGACY_LOOSE_UPCAST) &&
child.dataType == StringType =>
Cast(child, dt.asNullable)
case UpCast(child, dataType, walkedTypePath) if !Cast.canUpCast(child.dataType, dataType) =>
fail(child, dataType, walkedTypePath)
case UpCast(child, dataType, _) => Cast(child, dataType.asNullable)
}
}
}
/** Rule to mostly resolve, normalize and rewrite column names based on case sensitivity. */
object ResolveAlterTableChanges extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperatorsUp {
case a @ AlterTable(_, _, t: NamedRelation, changes) if t.resolved =>
// 'colsToAdd' keeps track of new columns being added. It stores a mapping from a
// normalized parent name of fields to field names that belong to the parent.
// For example, if we add columns "a.b.c", "a.b.d", and "a.c", 'colsToAdd' will become
// Map(Seq("a", "b") -> Seq("c", "d"), Seq("a") -> Seq("c")).
val colsToAdd = mutable.Map.empty[Seq[String], Seq[String]]
val schema = t.schema
val normalizedChanges = changes.flatMap {
case add: AddColumn =>
def addColumn(
parentSchema: StructType,
parentName: String,
normalizedParentName: Seq[String]): TableChange = {
val fieldsAdded = colsToAdd.getOrElse(normalizedParentName, Nil)
val pos = findColumnPosition(add.position(), parentName, parentSchema, fieldsAdded)
val field = add.fieldNames().last
colsToAdd(normalizedParentName) = fieldsAdded :+ field
TableChange.addColumn(
(normalizedParentName :+ field).toArray,
add.dataType(),
add.isNullable,
add.comment,
pos)
}
val parent = add.fieldNames().init
if (parent.nonEmpty) {
// Adding a nested field, need to normalize the parent column and position
val target = schema.findNestedField(parent, includeCollections = true, conf.resolver)
if (target.isEmpty) {
// Leave unresolved. Throws error in CheckAnalysis
Some(add)
} else {
val (normalizedName, sf) = target.get
sf.dataType match {
case struct: StructType =>
Some(addColumn(struct, parent.quoted, normalizedName :+ sf.name))
case other =>
Some(add)
}
}
} else {
// Adding to the root. Just need to normalize position
Some(addColumn(schema, "root", Nil))
}
case typeChange: UpdateColumnType =>
// Hive style syntax provides the column type, even if it may not have changed
val fieldOpt = schema.findNestedField(
typeChange.fieldNames(), includeCollections = true, conf.resolver)
if (fieldOpt.isEmpty) {
// We couldn't resolve the field. Leave it to CheckAnalysis
Some(typeChange)
} else {
val (fieldNames, field) = fieldOpt.get
if (field.dataType == typeChange.newDataType()) {
// The user didn't want the field to change, so remove this change
None
} else {
Some(TableChange.updateColumnType(
(fieldNames :+ field.name).toArray, typeChange.newDataType()))
}
}
case n: UpdateColumnNullability =>
// Need to resolve column
resolveFieldNames(
schema,
n.fieldNames(),
TableChange.updateColumnNullability(_, n.nullable())).orElse(Some(n))
case position: UpdateColumnPosition =>
position.position() match {
case after: After =>
// Need to resolve column as well as position reference
val fieldOpt = schema.findNestedField(
position.fieldNames(), includeCollections = true, conf.resolver)
if (fieldOpt.isEmpty) {
Some(position)
} else {
val (normalizedPath, field) = fieldOpt.get
val targetCol = schema.findNestedField(
normalizedPath :+ after.column(), includeCollections = true, conf.resolver)
if (targetCol.isEmpty) {
// Leave unchanged to CheckAnalysis
Some(position)
} else {
Some(TableChange.updateColumnPosition(
(normalizedPath :+ field.name).toArray,
ColumnPosition.after(targetCol.get._2.name)))
}
}
case _ =>
// Need to resolve column
resolveFieldNames(
schema,
position.fieldNames(),
TableChange.updateColumnPosition(_, position.position())).orElse(Some(position))
}
case comment: UpdateColumnComment =>
resolveFieldNames(
schema,
comment.fieldNames(),
TableChange.updateColumnComment(_, comment.newComment())).orElse(Some(comment))
case rename: RenameColumn =>
resolveFieldNames(
schema,
rename.fieldNames(),
TableChange.renameColumn(_, rename.newName())).orElse(Some(rename))
case delete: DeleteColumn =>
resolveFieldNames(schema, delete.fieldNames(), TableChange.deleteColumn)
.orElse(Some(delete))
case column: ColumnChange =>
// This is informational for future developers
throw new UnsupportedOperationException(
"Please add an implementation for a column change here")
case other => Some(other)
}
a.copy(changes = normalizedChanges)
}
/**
* Returns the table change if the field can be resolved, returns None if the column is not
* found. An error will be thrown in CheckAnalysis for columns that can't be resolved.
*/
private def resolveFieldNames(
schema: StructType,
fieldNames: Array[String],
copy: Array[String] => TableChange): Option[TableChange] = {
val fieldOpt = schema.findNestedField(
fieldNames, includeCollections = true, conf.resolver)
fieldOpt.map { case (path, field) => copy((path :+ field.name).toArray) }
}
private def findColumnPosition(
position: ColumnPosition,
parentName: String,
struct: StructType,
fieldsAdded: Seq[String]): ColumnPosition = {
position match {
case null => null
case after: After =>
(struct.fieldNames ++ fieldsAdded).find(n => conf.resolver(n, after.column())) match {
case Some(colName) =>
ColumnPosition.after(colName)
case None =>
throw new AnalysisException("Couldn't find the reference column for " +
s"$after at $parentName")
}
case other => other
}
}
}
}
/**
* Removes [[SubqueryAlias]] operators from the plan. Subqueries are only required to provide
* scoping information for attributes and can be removed once analysis is complete.
*/
object EliminateSubqueryAliases extends Rule[LogicalPlan] {
// This is also called in the beginning of the optimization phase, and as a result
// is using transformUp rather than resolveOperators.
def apply(plan: LogicalPlan): LogicalPlan = AnalysisHelper.allowInvokingTransformsInAnalyzer {
plan transformUp {
case SubqueryAlias(_, child) => child
}
}
}
/**
* Removes [[Union]] operators from the plan if it just has one child.
*/
object EliminateUnions extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan resolveOperators {
case Union(children) if children.size == 1 => children.head
}
}
/**
* Cleans up unnecessary Aliases inside the plan. Basically we only need Alias as a top level
* expression in Project(project list) or Aggregate(aggregate expressions) or
* Window(window expressions). Notice that if an expression has other expression parameters which
* are not in its `children`, e.g. `RuntimeReplaceable`, the transformation for Aliases in this
* rule can't work for those parameters.
*/
object CleanupAliases extends Rule[LogicalPlan] {
def trimAliases(e: Expression): Expression = {
e.transformDown {
case Alias(child, _) => child
case MultiAlias(child, _) => child
}
}
def trimNonTopLevelAliases(e: Expression): Expression = e match {
case a: Alias =>
a.copy(child = trimAliases(a.child))(
exprId = a.exprId,
qualifier = a.qualifier,
explicitMetadata = Some(a.metadata))
case a: MultiAlias =>
a.copy(child = trimAliases(a.child))
case other => trimAliases(other)
}
override def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperatorsUp {
case Project(projectList, child) =>
val cleanedProjectList =
projectList.map(trimNonTopLevelAliases(_).asInstanceOf[NamedExpression])
Project(cleanedProjectList, child)
case Aggregate(grouping, aggs, child) =>
val cleanedAggs = aggs.map(trimNonTopLevelAliases(_).asInstanceOf[NamedExpression])
Aggregate(grouping.map(trimAliases), cleanedAggs, child)
case Window(windowExprs, partitionSpec, orderSpec, child) =>
val cleanedWindowExprs =
windowExprs.map(e => trimNonTopLevelAliases(e).asInstanceOf[NamedExpression])
Window(cleanedWindowExprs, partitionSpec.map(trimAliases),
orderSpec.map(trimAliases(_).asInstanceOf[SortOrder]), child)
case CollectMetrics(name, metrics, child) =>
val cleanedMetrics = metrics.map {
e => trimNonTopLevelAliases(e).asInstanceOf[NamedExpression]
}
CollectMetrics(name, cleanedMetrics, child)
// Operators that operate on objects should only have expressions from encoders, which should
// never have extra aliases.
case o: ObjectConsumer => o
case o: ObjectProducer => o
case a: AppendColumns => a
case other =>
other transformExpressionsDown {
case Alias(child, _) => child
}
}
}
/**
* Ignore event time watermark in batch query, which is only supported in Structured Streaming.
* TODO: add this rule into analyzer rule list.
*/
object EliminateEventTimeWatermark extends Rule[LogicalPlan] {
override def apply(plan: LogicalPlan): LogicalPlan = plan resolveOperators {
case EventTimeWatermark(_, _, child) if !child.isStreaming => child
}
}
/**
* Maps a time column to multiple time windows using the Expand operator. Since it's non-trivial to
* figure out how many windows a time column can map to, we over-estimate the number of windows and
* filter out the rows where the time column is not inside the time window.
*/
object TimeWindowing extends Rule[LogicalPlan] {
import org.apache.spark.sql.catalyst.dsl.expressions._
private final val WINDOW_COL_NAME = "window"
private final val WINDOW_START = "start"
private final val WINDOW_END = "end"
/**
* Generates the logical plan for generating window ranges on a timestamp column. Without
* knowing what the timestamp value is, it's non-trivial to figure out deterministically how many
* window ranges a timestamp will map to given all possible combinations of a window duration,
* slide duration and start time (offset). Therefore, we express and over-estimate the number of
* windows there may be, and filter the valid windows. We use last Project operator to group
* the window columns into a struct so they can be accessed as `window.start` and `window.end`.
*
* The windows are calculated as below:
* maxNumOverlapping <- ceil(windowDuration / slideDuration)
* for (i <- 0 until maxNumOverlapping)
* windowId <- ceil((timestamp - startTime) / slideDuration)
* windowStart <- windowId * slideDuration + (i - maxNumOverlapping) * slideDuration + startTime
* windowEnd <- windowStart + windowDuration
* return windowStart, windowEnd
*
* This behaves as follows for the given parameters for the time: 12:05. The valid windows are
* marked with a +, and invalid ones are marked with a x. The invalid ones are filtered using the
* Filter operator.
* window: 12m, slide: 5m, start: 0m :: window: 12m, slide: 5m, start: 2m
* 11:55 - 12:07 + 11:52 - 12:04 x
* 12:00 - 12:12 + 11:57 - 12:09 +
* 12:05 - 12:17 + 12:02 - 12:14 +
*
* @param plan The logical plan
* @return the logical plan that will generate the time windows using the Expand operator, with
* the Filter operator for correctness and Project for usability.
*/
def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperatorsUp {
case p: LogicalPlan if p.children.size == 1 =>
val child = p.children.head
val windowExpressions =
p.expressions.flatMap(_.collect { case t: TimeWindow => t }).toSet
val numWindowExpr = windowExpressions.size
// Only support a single window expression for now
if (numWindowExpr == 1 &&
windowExpressions.head.timeColumn.resolved &&
windowExpressions.head.checkInputDataTypes().isSuccess) {
val window = windowExpressions.head
val metadata = window.timeColumn match {
case a: Attribute => a.metadata
case _ => Metadata.empty
}
def getWindow(i: Int, overlappingWindows: Int): Expression = {
val division = (PreciseTimestampConversion(
window.timeColumn, TimestampType, LongType) - window.startTime) / window.slideDuration
val ceil = Ceil(division)
// if the division is equal to the ceiling, our record is the start of a window
val windowId = CaseWhen(Seq((ceil === division, ceil + 1)), Some(ceil))
val windowStart = (windowId + i - overlappingWindows) *
window.slideDuration + window.startTime
val windowEnd = windowStart + window.windowDuration
CreateNamedStruct(
Literal(WINDOW_START) ::
PreciseTimestampConversion(windowStart, LongType, TimestampType) ::
Literal(WINDOW_END) ::
PreciseTimestampConversion(windowEnd, LongType, TimestampType) ::
Nil)
}
val windowAttr = AttributeReference(
WINDOW_COL_NAME, window.dataType, metadata = metadata)()
if (window.windowDuration == window.slideDuration) {
val windowStruct = Alias(getWindow(0, 1), WINDOW_COL_NAME)(
exprId = windowAttr.exprId, explicitMetadata = Some(metadata))
val replacedPlan = p transformExpressions {
case t: TimeWindow => windowAttr
}
// For backwards compatibility we add a filter to filter out nulls
val filterExpr = IsNotNull(window.timeColumn)
replacedPlan.withNewChildren(
Filter(filterExpr,
Project(windowStruct +: child.output, child)) :: Nil)
} else {
val overlappingWindows =
math.ceil(window.windowDuration * 1.0 / window.slideDuration).toInt
val windows =
Seq.tabulate(overlappingWindows)(i => getWindow(i, overlappingWindows))
val projections = windows.map(_ +: child.output)
val filterExpr =
window.timeColumn >= windowAttr.getField(WINDOW_START) &&
window.timeColumn < windowAttr.getField(WINDOW_END)
val substitutedPlan = Filter(filterExpr,
Expand(projections, windowAttr +: child.output, child))
val renamedPlan = p transformExpressions {
case t: TimeWindow => windowAttr
}
renamedPlan.withNewChildren(substitutedPlan :: Nil)
}
} else if (numWindowExpr > 1) {
p.failAnalysis("Multiple time window expressions would result in a cartesian product " +
"of rows, therefore they are currently not supported.")
} else {
p // Return unchanged. Analyzer will throw exception later
}
}
}
/**
* Resolve a [[CreateNamedStruct]] if it contains [[NamePlaceholder]]s.
*/
object ResolveCreateNamedStruct extends Rule[LogicalPlan] {
override def apply(plan: LogicalPlan): LogicalPlan = plan.resolveExpressions {
case e: CreateNamedStruct if !e.resolved =>
val children = e.children.grouped(2).flatMap {
case Seq(NamePlaceholder, e: NamedExpression) if e.resolved =>
Seq(Literal(e.name), e)
case kv =>
kv
}
CreateNamedStruct(children.toList)
}
}
/**
* The aggregate expressions from subquery referencing outer query block are pushed
* down to the outer query block for evaluation. This rule below updates such outer references
* as AttributeReference referring attributes from the parent/outer query block.
*
* For example (SQL):
* {{{
* SELECT l.a FROM l GROUP BY 1 HAVING EXISTS (SELECT 1 FROM r WHERE r.d < min(l.b))
* }}}
* Plan before the rule.
* Project [a#226]
* +- Filter exists#245 [min(b#227)#249]
* : +- Project [1 AS 1#247]
* : +- Filter (d#238 < min(outer(b#227))) <-----
* : +- SubqueryAlias r
* : +- Project [_1#234 AS c#237, _2#235 AS d#238]
* : +- LocalRelation [_1#234, _2#235]
* +- Aggregate [a#226], [a#226, min(b#227) AS min(b#227)#249]
* +- SubqueryAlias l
* +- Project [_1#223 AS a#226, _2#224 AS b#227]
* +- LocalRelation [_1#223, _2#224]
* Plan after the rule.
* Project [a#226]
* +- Filter exists#245 [min(b#227)#249]
* : +- Project [1 AS 1#247]
* : +- Filter (d#238 < outer(min(b#227)#249)) <-----
* : +- SubqueryAlias r
* : +- Project [_1#234 AS c#237, _2#235 AS d#238]
* : +- LocalRelation [_1#234, _2#235]
* +- Aggregate [a#226], [a#226, min(b#227) AS min(b#227)#249]
* +- SubqueryAlias l
* +- Project [_1#223 AS a#226, _2#224 AS b#227]
* +- LocalRelation [_1#223, _2#224]
*/
object UpdateOuterReferences extends Rule[LogicalPlan] {
private def stripAlias(expr: Expression): Expression = expr match { case a: Alias => a.child }
private def updateOuterReferenceInSubquery(
plan: LogicalPlan,
refExprs: Seq[Expression]): LogicalPlan = {
plan resolveExpressions { case e =>
val outerAlias =
refExprs.find(stripAlias(_).semanticEquals(stripOuterReference(e)))
outerAlias match {
case Some(a: Alias) => OuterReference(a.toAttribute)
case _ => e
}
}
}
def apply(plan: LogicalPlan): LogicalPlan = {
plan resolveOperators {
case f @ Filter(_, a: Aggregate) if f.resolved =>
f transformExpressions {
case s: SubqueryExpression if s.children.nonEmpty =>
// Collect the aliases from output of aggregate.
val outerAliases = a.aggregateExpressions collect { case a: Alias => a }
// Update the subquery plan to record the OuterReference to point to outer query plan.
s.withNewPlan(updateOuterReferenceInSubquery(s.plan, outerAliases))
}
}
}
}
| skonto/spark | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala | Scala | apache-2.0 | 157,622 |
package scife
package enumeration
package benchmarks
package test
import dependent._
import memoization._
import scife.{ enumeration => e }
import scife.util._
import logging._
import org.scalatest._
import org.scalatest.prop._
import org.scalameter.api._
import scala.language.existentials
class BinarySearchTreeBenchmarkTest2 extends FunSuite with Matchers with GeneratorDrivenPropertyChecks
with ProfileLogger {
import e.common.enumdef.BinarySearchTreeEnum._
import structures._
import BSTrees._
import Common._
import Checks._
import Util.CheckerHelper
import Math._
test("correctness") {
val ms = new scope.AccumulatingScope
val enum = constructEnumeratorBenchmark(ms)
ms.memoizations.size should be(1)
val helper = new CheckerHelper[Tree]
import helper._
withLazyClue("Elements are: " + clue) {
res = enum.getEnum(1, 1 to 3)
ms.memoizations.size should be(1)
res.size should be(3)
elements should contain theSameElementsAs (1 to 3).map(
Node(Leaf, _, Leaf))
res = enum.getEnum(2, 1 to 2)
ms.memoizations.size should be(2)
res.size should be(2)
elements should contain allOf (
Node(Leaf, 1, Node(Leaf, 2, Leaf)),
Node(Node(Leaf, 1, Leaf), 2, Leaf))
res = enum.getEnum(3, 1 to 3)
ms.memoizations.size should be(7)
res.size should be(5)
elements should contain allOf (
Node(Node(Leaf, 1, Leaf), 2, Node(Leaf, 3, Leaf)),
Node(Leaf, 1, Node(Node(Leaf, 2, Leaf), 3, Leaf)))
res = enum.getEnum(3, 1 to 4)
ms.memoizations.size should be(12)
res.size should be(5 * Binomial.binomialCoefficient(4, 3))
elements should contain allOf (
Node(Node(Leaf, 1, Leaf), 2, Node(Leaf, 3, Leaf)),
Node(Leaf, 1, Node(Node(Leaf, 2, Leaf), 3, Leaf)))
for (size <- 1 to 3) {
res = enum.getEnum((size, Range(size, size - 1)))
res.size should be(0)
elements should be('empty)
res = enum.getEnum((0, 1 to size))
res(0) should be(Leaf)
res.size should be(1)
}
}
val profileRange = 1 to 6
for (size <- profileRange) {
ms.clear
profile("Getting stream for BST of size %d".format(size)) {
res = enum.getEnum(size, 1 to size)
}
profile("Claculating size for BST of size %d".format(size)) {
res.size should be(Catalan.catalan(size))
}
profile("Getting elements for BST of size %d".format(size)) {
for (ind <- 0 until res.size) res(ind)
}
assert((for (ind <- 0 until res.size) yield res(ind)).forall(invariant(_)))
}
}
test("correctness, bigger sizes", scife.util.tags.SlowTest) {
val ms = new scope.AccumulatingScope
val enum = constructEnumeratorBenchmark(ms)
ms.memoizations.size should be(1)
// some confirmed counts
val res = enum.getEnum(12, 1 to 12)
res.size should be (208012)
}
}
| kaptoxic/SciFe | src/bench/test/scala/scife/enumeration/benchmarks/test/BinarySearchTreeBenchmarkTest.scala | Scala | gpl-2.0 | 2,958 |
package pl.touk.nussknacker.engine.api.signal
/**
* IMPORTANT lifecycle notice:
* Implementations of this class *must not* allocate resources (connections, file handles etc.)
*/
trait ProcessSignalSender
| TouK/nussknacker | components-api/src/main/scala/pl/touk/nussknacker/engine/api/signal/ProcessSignalSender.scala | Scala | apache-2.0 | 208 |
package edu.osu.cse.groenkeb.logic.model.rules
import edu.osu.cse.groenkeb.logic.Absurdity
import edu.osu.cse.groenkeb.logic.AtomicSentence
import edu.osu.cse.groenkeb.logic.Sentence
import edu.osu.cse.groenkeb.logic.model.FirstOrderModel
import edu.osu.cse.groenkeb.logic.proof.rules.CompleteResult
import edu.osu.cse.groenkeb.logic.proof.rules.EmptyArgs
import edu.osu.cse.groenkeb.logic.proof.rules.NullResult
import edu.osu.cse.groenkeb.logic.proof.rules.Rule
import edu.osu.cse.groenkeb.logic.proof.rules.RuleArgs
import edu.osu.cse.groenkeb.logic.proof.CompleteProof
import edu.osu.cse.groenkeb.logic.proof.Conclusion
import edu.osu.cse.groenkeb.logic.proof.NullProof
import edu.osu.cse.groenkeb.logic.proof.Proof
import edu.osu.cse.groenkeb.logic.proof.ProudPremise
import edu.osu.cse.groenkeb.logic.proof.rules.IdentityRule
import edu.osu.cse.groenkeb.logic.proof.rules.UnaryArgs
import edu.osu.cse.groenkeb.logic.proof.Assumption
import edu.osu.cse.groenkeb.logic.utils.Empty
case class ModelRule(val model: FirstOrderModel) extends Rule {
def major(proof: Proof) = proof match {
case CompleteProof(Conclusion(AtomicSentence(_), IdentityRule(), _), Empty()) => true
case NullProof() => true
case _ => false
}
def minor(proof: Proof) = false
def yields(conc: Sentence) = conc match {
case AtomicSentence(atom) => model.verify(conc)
case Absurdity => true
case _ => false
}
def infer(conc: Sentence)(args: RuleArgs) = conc match {
case AtomicSentence(atom) => args match {
case EmptyArgs() if model.verify(conc) => CompleteResult(CompleteProof(Conclusion(conc, this, args), Set()))
//case EmptyArgs() if !model.verify(conc) => CompleteResult(CompleteProof(Conclusion(Absurdity(), this, args), Nil))
case _ => NullResult()
}
case Absurdity => args match {
case UnaryArgs(CompleteProof(c, Empty())) if !model.verify(c.sentence) =>
CompleteResult(CompleteProof(Conclusion(Absurdity, this, args), Set(Assumption(c.sentence))))
case _ => NullResult()
}
case _ => NullResult()
}
override def toString = "M"
}
| bgroenks96/PropLogic | modelvf/src/main/scala/edu/osu/cse/groenkeb/logic/model/rules/ModelRule.scala | Scala | mit | 2,122 |
package util
import java.util.concurrent.TimeUnit
import com.typesafe.config.Config
trait ConfigWithDefault {
def rootConfig: Config
def getBoolean(path: String, default: Boolean) = ifHasPath(path, default) { _.getBoolean(path) }
def getString(path: String, default: String) = ifHasPath(path, default) { _.getString(path) }
def getInt(path: String, default: Int) = ifHasPath(path, default) { _.getInt(path) }
def getConfig(path: String, default: Config) = ifHasPath(path, default) { _.getConfig(path) }
def getMilliseconds(path: String, default: Long) = ifHasPath(path, default) {
_.getDuration(path, TimeUnit.MILLISECONDS)
}
def getOptionalString(path: String, default: Option[String] = None) = getOptional(path) { _.getString(path) }
private def ifHasPath[T](path: String, default: T)(get: Config => T): T =
if (rootConfig.hasPath(path)) get(rootConfig) else default
private def getOptional[T](fullPath: String, default: Option[T] = None)(get: Config => T) =
if (rootConfig.hasPath(fullPath)) {
Some(get(rootConfig))
} else {
default
}
}
| softwaremill/scala-clippy | ui/app/util/ConfigWithDefault.scala | Scala | apache-2.0 | 1,112 |
package com.datastax.spark.connector
import java.nio.ByteBuffer
import com.datastax.driver.core.{ProtocolVersion, Row, UDTValue => DriverUDTValue}
import com.datastax.spark.connector.types.TypeConverter.StringConverter
import org.apache.cassandra.utils.ByteBufferUtil
import scala.collection.JavaConversions._
trait AbstractGettableData {
protected def fieldNames: IndexedSeq[String]
protected def fieldValues: IndexedSeq[AnyRef]
@transient
private[connector] lazy val _indexOf =
fieldNames.zipWithIndex.toMap.withDefaultValue(-1)
@transient
private[connector] lazy val _indexOfOrThrow = _indexOf.withDefault { name =>
throw new ColumnNotFoundException(
s"Column not found: $name. " +
s"Available columns are: ${fieldNames.mkString("[", ", ", "]")}")
}
/** Total number of columns in this row. Includes columns with null values. */
def length = fieldValues.size
/** Total number of columns in this row. Includes columns with null values. */
def size = fieldValues.size
/** Returns true if column value is Cassandra null */
def isNullAt(index: Int): Boolean =
fieldValues(index) == null
/** Returns true if column value is Cassandra null */
def isNullAt(name: String): Boolean = {
fieldValues(_indexOfOrThrow(name)) == null
}
/** Returns index of column with given name or -1 if column not found */
def indexOf(name: String): Int =
_indexOf(name)
/** Returns the name of the i-th column. */
def nameOf(index: Int): String =
fieldNames(index)
/** Returns true if column with given name is defined and has an
* entry in the underlying value array, i.e. was requested in the result set.
* For columns having null value, returns true. */
def contains(name: String): Boolean =
_indexOf(name) != -1
/** Displays the content in human readable form, including the names and values of the columns */
def dataAsString = fieldNames
.zip(fieldValues)
.map(kv => kv._1 + ": " + StringConverter.convert(kv._2))
.mkString("{", ", ", "}")
override def toString = dataAsString
override def equals(o: Any) = o match {
case o: AbstractGettableData =>
if (this.fieldValues.length == o.length) {
this.fieldValues.zip(o.fieldValues).forall { case (mine, yours) => mine == yours}
} else
false
case _ => false
}
}
object AbstractGettableData {
/* ByteBuffers are not serializable, so we need to convert them to something that is serializable.
Array[Byte] seems reasonable candidate. Additionally converts Java collections to Scala ones. */
private[connector] def convert(obj: Any)(implicit protocolVersion: ProtocolVersion): AnyRef = {
obj match {
case bb: ByteBuffer => ByteBufferUtil.getArray(bb)
case list: java.util.List[_] => list.view.map(convert).toList
case set: java.util.Set[_] => set.view.map(convert).toSet
case map: java.util.Map[_, _] => map.view.map { case (k, v) => (convert(k), convert(v))}.toMap
case udtValue: DriverUDTValue => UDTValue.fromJavaDriverUDTValue(udtValue)
case other => other.asInstanceOf[AnyRef]
}
}
/** Deserializes given field from the DataStax Java Driver `Row` into appropriate Java type.
* If the field is null, returns null (not Scala Option). */
def get(row: Row, index: Int)(implicit protocolVersion: ProtocolVersion): AnyRef = {
val columnDefinitions = row.getColumnDefinitions
val columnType = columnDefinitions.getType(index)
val bytes = row.getBytesUnsafe(index)
if (bytes != null)
convert(columnType.deserialize(bytes, protocolVersion))
else
null
}
def get(row: Row, name: String)(implicit protocolVersion: ProtocolVersion): AnyRef = {
val index = row.getColumnDefinitions.getIndexOf(name)
get(row, index)
}
def get(value: DriverUDTValue, name: String)(implicit protocolVersion: ProtocolVersion): AnyRef = {
val valueType = value.getType.getFieldType(name)
val bytes = value.getBytesUnsafe(name)
if (bytes != null)
convert(valueType.deserialize(bytes, protocolVersion))
else
null
}
}
/** Thrown when the requested column does not exist in the result set. */
class ColumnNotFoundException(message: String) extends Exception(message)
| nvoron23/spark-cassandra-connector | spark-cassandra-connector/src/main/scala/com/datastax/spark/connector/AbstractGettableData.scala | Scala | apache-2.0 | 4,271 |
package cgta.oscala
package util.debugging
//////////////////////////////////////////////////////////////
// Copyright (c) 2014 Ben Jackman
// All Rights Reserved
// please contact ben@jackman.biz
// for licensing inquiries
// Created by bjackman @ 8/28/14 9:05 PM
//////////////////////////////////////////////////////////////
trait PRINT {
def |(msg : Any) : Unit
}
object PRINT extends PRINT with PrintPlat {
} | cgta/open | oscala/shared/src/main/scala/cgta/oscala/util/debugging/PRINT.scala | Scala | mit | 420 |
package org.gbif.population.spark
/**
* Performs Simple Linear Regression.
* Given a 2D matrix of points, will provide the best fit equation in the form y = mx+c along with the sum of squares
* error (SSE), sum of squares regression (SSR) and the total sum of squares (SSTO).
*
* @see http://eric-mariacher.blogspot.dk/2011/12/here-is-how-to-compute-simple-linear.html
*/
object LinearRegression {
/**
* Container of the result of the regression.
*/
class Result(val m: Double, val c: Double, val SSTO: Double, val SSR: Double, val SSE: Double);
/**
* Performs the regression.
*/
def process(pairs: List[(Double,Double)]) : Result = {
val size = pairs.size
// first pass: read in data, compute xbar and ybar
val sums = pairs.foldLeft(new X_X2_Y(0D,0D,0D))(_ + new X_X2_Y(_))
val bars = (sums.x / size, sums.y / size)
// second pass: compute summary statistics
val sumstats = pairs.foldLeft(new X2_Y2_XY(0D,0D,0D))(_ + new X2_Y2_XY(_, bars))
val beta1 = sumstats.xy / sumstats.x2
val beta0 = bars._2 - (beta1 * bars._1)
val betas = (beta0, beta1)
// Useful debug (y= mx+c)
// println("y = " + ("%4.3f" format beta1) + " * x + " + ("%4.3f" format beta0))
val correlation = pairs.foldLeft(new RSS_SSR(0D,0D))(_ + RSS_SSR.build(_, bars, betas))
//println("SSTO = " + sumstats.y2) //
//println("SSE = " + correlation.rss) // sum of squares error
//println("SSR = " + correlation.ssr) // sum of squares regression
return new Result(beta1, beta0, sumstats.y2, correlation.rss, correlation.ssr)
}
class RSS_SSR(val rss: Double, val ssr: Double) {
def +(p: RSS_SSR): RSS_SSR = new RSS_SSR(rss+p.rss, ssr+p.ssr)
}
object RSS_SSR {
def build(p: (Double,Double), bars: (Double,Double), betas: (Double,Double)): RSS_SSR = {
val fit = (betas._2 * p._1) + betas._1
val rss = (fit-p._2) * (fit-p._2)
val ssr = (fit-bars._2) * (fit-bars._2)
new RSS_SSR(rss, ssr)
}
}
class X_X2_Y(val x: Double, val x2: Double, val y: Double) {
def this(p: (Double,Double)) = this(p._1, p._1*p._1, p._2)
def +(p: X_X2_Y): X_X2_Y = new X_X2_Y(x+p.x,x2+p.x2,y+p.y)
}
class X2_Y2_XY(val x2: Double, val y2: Double, val xy: Double) {
def this(p: (Double,Double), bars: (Double,Double)) = this((p._1-bars._1)*(p._1-bars._1), (p._2-bars._2)*(p._2-bars._2),(p._1-bars._1)*(p._2-bars._2))
def +(p: X2_Y2_XY): X2_Y2_XY = new X2_Y2_XY(x2+p.x2,y2+p.y2,xy+p.xy)
}
}
| gbif/species-population | spark-process/src/main/scala/org/gbif/population/spark/LinnearRegression.scala | Scala | apache-2.0 | 2,518 |
package com.productfoundry.akka.cqrs
import akka.actor.ActorRef
import scala.reflect.ClassTag
/**
* Abstract entity supervisor factory.
*/
abstract class EntitySupervisorFactory[E <: Entity : EntityFactory : ClassTag] {
/**
* Gets or creates an entity supervisor for the specified type.
* @return Created supervisor.
*/
def getOrCreate: ActorRef
/**
* The supervisor name is based on the entity type and can be used in the actor name.
* @return Supervisor name.
*/
def supervisorName: String = implicitly[ClassTag[E]].runtimeClass.getSimpleName
} | Product-Foundry/akka-cqrs | core/src/main/scala/com/productfoundry/akka/cqrs/EntitySupervisorFactory.scala | Scala | apache-2.0 | 579 |
package io.getquill.context.finagle.postgres
import java.nio.charset.Charset
import java.time.{ LocalDate, LocalDateTime, ZoneId }
import java.util.{ Date, UUID }
import com.twitter.finagle.postgres.values.ValueDecoder
import com.twitter.util.Return
import com.twitter.util.Throw
import com.twitter.util.Try
import io.getquill.FinaglePostgresContext
import io.getquill.util.Messages.fail
import io.netty.buffer.ByteBuf
trait FinaglePostgresDecoders {
this: FinaglePostgresContext[_] =>
import ValueDecoder._
type Decoder[T] = FinaglePostgresDecoder[T]
case class FinaglePostgresDecoder[T](
vd: ValueDecoder[T],
default: Throwable => T = (e: Throwable) => fail(e.getMessage)
) extends BaseDecoder[T] {
override def apply(index: Index, row: ResultRow, session: Session): T =
row.getTry[T](index)(vd) match {
case Return(r) => r
case Throw(e) => default(e)
}
def orElse[U](f: U => T)(implicit vdu: ValueDecoder[U]): FinaglePostgresDecoder[T] = {
val mappedVd = vdu.map[T](f)
FinaglePostgresDecoder[T](
new ValueDecoder[T] {
def decodeText(recv: String, text: String): Try[T] = {
val t = vd.decodeText(recv, text)
if (t.isReturn) t
else mappedVd.decodeText(recv, text)
}
def decodeBinary(recv: String, bytes: ByteBuf, charset: Charset): Try[T] = {
val t = vd.decodeBinary(recv, bytes, charset)
if (t.isReturn) t
else mappedVd.decodeBinary(recv, bytes, charset)
}
}
)
}
}
implicit def decoderDirectly[T](implicit vd: ValueDecoder[T]): Decoder[T] = FinaglePostgresDecoder(vd)
def decoderMapped[U, T](f: U => T)(implicit vd: ValueDecoder[U]): Decoder[T] = FinaglePostgresDecoder(vd.map[T](f))
implicit def optionDecoder[T](implicit d: Decoder[T]): Decoder[Option[T]] =
FinaglePostgresDecoder[Option[T]](
new ValueDecoder[Option[T]] {
def decodeText(recv: String, text: String): Try[Option[T]] = Return(d.vd.decodeText(recv, text).toOption)
def decodeBinary(recv: String, bytes: ByteBuf, charset: Charset): Try[Option[T]] = Return(d.vd.decodeBinary(recv, bytes, charset).toOption)
},
_ => None
)
implicit def mappedDecoder[I, O](implicit mapped: MappedEncoding[I, O], d: Decoder[I]): Decoder[O] =
decoderMapped[I, O](mapped.f)(d.vd)
implicit val stringDecoder: Decoder[String] = decoderDirectly[String]
implicit val bigDecimalDecoder: Decoder[BigDecimal] = decoderDirectly[BigDecimal]
implicit val booleanDecoder: Decoder[Boolean] = decoderDirectly[Boolean]
implicit val shortDecoder: Decoder[Short] = decoderDirectly[Short]
implicit val byteDecoder: Decoder[Byte] = decoderMapped[Short, Byte](_.toByte)
implicit val intDecoder: Decoder[Int] = decoderDirectly[Int].orElse[Long](_.toInt)
implicit val longDecoder: Decoder[Long] = decoderDirectly[Long].orElse[Int](_.toLong)
implicit val floatDecoder: Decoder[Float] = decoderDirectly[Float].orElse[Double](_.toFloat)
implicit val doubleDecoder: Decoder[Double] = decoderDirectly[Double]
implicit val byteArrayDecoder: Decoder[Array[Byte]] = decoderDirectly[Array[Byte]]
implicit val dateDecoder: Decoder[Date] = decoderMapped[LocalDateTime, Date](d => Date.from(d.atZone(ZoneId.systemDefault()).toInstant))
implicit val localDateDecoder: Decoder[LocalDate] = decoderDirectly[LocalDate].orElse[LocalDateTime](_.toLocalDate)
implicit val localDateTimeDecoder: Decoder[LocalDateTime] = decoderDirectly[LocalDateTime].orElse[LocalDate](_.atStartOfDay)
implicit val uuidDecoder: Decoder[UUID] = decoderDirectly[UUID]
}
| getquill/quill | quill-finagle-postgres/src/main/scala/io/getquill/context/finagle/postgres/FinaglePostgresDecoders.scala | Scala | apache-2.0 | 3,661 |
/**
* Created by ak on 12/20/2016.
*/
class Euuet {
def main(args: Array[String]) {
val p = new User
}
}
| aksish/dukka | src/main/scala/Euuet.scala | Scala | mit | 121 |
/*
* (c) Copyright 2016 Hewlett Packard Enterprise Development LP
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package toolkit.neuralnetwork.function
import libcog._
import toolkit.neuralnetwork.DifferentiableField
import DifferentiableField.GradientPort
import toolkit.neuralnetwork.operator.sumSpray
class CrossEntropySoftmax private[CrossEntropySoftmax] (inference: DifferentiableField, labels: DifferentiableField)
extends DifferentiableField {
require(inference.batchSize == labels.batchSize,
s"inference batch size (${inference.batchSize}) must match labels batch size ${labels.batchSize}")
require(inference.forward.fieldType == labels.forward.fieldType,
s"inference field type (${inference.forward.fieldType}) must match labels field type ${labels.forward.fieldType}")
require(inference.forward.fieldShape.dimensions == 0,
s"only zero-dimensional fields are supported, got ${inference.forward.fieldShape.dimensions}-dimension input")
private val x1 = (inference.forward, inference.batchSize)
private val x2 = (labels.forward, labels.batchSize)
override val inputs: Map[Symbol, GradientPort] = Map(
'inference -> GradientPort(inference, dx1 => jacobian1(dx1, x1, x2), grad => jacobianAdjoint1(grad, x1, x2)),
'labels -> GradientPort(labels, dx2 => jacobian2(dx2, x1, x2), grad => jacobianAdjoint2(grad, x1, x2)))
override val batchSize: Int = 1
override val forward: libcog.Field = _forward(x1, x2)._1
private def _forward(x1: (Field, Int), x2: (Field, Int)): (Field, Int) = {
val in = x1._1
val ref = x2._1
require(in.fieldType == ref.fieldType, "The field types of both inputs must be equal")
require(in.fieldShape.dimensions == 0, "Only defined for zero dimensional fields")
require(x1._2 == x2._2, "The batch sizes of both inputs must be equal")
val batchSize = x1._2
val softmax = exp(in) / max(sumSpray(exp(in), batchSize), 1e-4f)
val logSoftmax = -log(max(softmax, 1e-4f))
val crossEntropy = reduceSum(fieldReduceSum(ref * logSoftmax))
(crossEntropy, batchSize)
}
private def jacobian1(dx1: Field, x1: (Field, Int), x2: (Field, Int)): Field = {
val in = x1._1
val ref = x2._1
val batchSize = x1._2
val logSoftmaxJacobian = sumSpray(exp(in) * dx1, batchSize) / sumSpray(exp(in), batchSize) - dx1
reduceSum(fieldReduceSum(ref * logSoftmaxJacobian))
}
private def jacobianAdjoint1(grad: Field, x1: (Field, Int), x2: (Field, Int)): Field = {
val in = x1._1
val ref = x2._1
val batchSize = x1._2
sumSpray(ref * grad / sumSpray(exp(in), batchSize), batchSize) * exp(in) - ref * grad
}
private def jacobian2(dx2: Field, x1: (Field, Int), x2: (Field, Int)): Field = {
val in = x1._1
val batchSize = x1._2
_forward((in, batchSize), (dx2, batchSize))._1
}
private def jacobianAdjoint2(grad: Field, x1: (Field, Int), x2: (Field, Int)): Field = {
val in = x1._1
val batchSize = x1._2
val softmax = exp(in) / max(sumSpray(exp(in), batchSize), 1e-4f)
val logSoftmax = -log(max(softmax, 1e-4f))
logSoftmax
}
// If you add/remove constructor parameters, you should alter the toString() implementation. */
/** A string description of the instance in the "case class" style. */
override def toString = this.getClass.getName +
(inference, labels)
}
/** Factory object- eliminates clutter of 'new' operator. */
object CrossEntropySoftmax {
def apply (inference: DifferentiableField, labels: DifferentiableField) =
new CrossEntropySoftmax(inference, labels)
}
| hpe-cct/cct-nn | src/main/scala/toolkit/neuralnetwork/function/CrossEntropySoftmax.scala | Scala | apache-2.0 | 4,065 |
package tests
import so.eval.{ EvaluationRequest, Router }
import so.eval.SandboxedLanguage.Result
import akka.pattern.ask
import scala.concurrent.Await
import scala.util.{ Failure, Try, Success }
class C extends LanguageTest {
val code = """#include <stdio.h>
|int main() {
| printf("hello world!\\n");
| fprintf(stderr, "hi from stderr");
| return 0;
|}""".stripMargin
describe("The C implementation") {
it("should be able to successfully compile and run C code") {
val evaluation = Router.route("c", EvaluationRequest(code))
evaluation should not be (None)
val future = router ? evaluation.get
val futureResult = Await.result(future, timeout.duration).asInstanceOf[Try[Result]]
futureResult should be('success)
val Success(result) = futureResult
inside(result) {
case Result(stdout, stderr, wallTime, exitCode, compilationResult, outputFiles) =>
stdout.trim should be("hello world!")
stderr.trim should be("hi from stderr")
wallTime should be < 1000L
exitCode should be(0)
compilationResult should not be (None)
}
}
it("should compile multiple input files with .c extensions") {
val evaluation = Router.route(
"c",
EvaluationRequest(
code,
files = Some(
Map(
"foo.c" -> "bar",
"baz.c" -> "buz"))))
evaluation should not be (None)
val compileCommand = evaluation.get.compileCommand.get.mkString(" ")
compileCommand should include("foo.c")
compileCommand should include("baz.c")
evaluation.get.deleteHomeDirectory()
}
it("should be able to run with compilationOnly set") {
val evaluation = Router.route(
"c",
EvaluationRequest(
code,
compilationOnly = true))
evaluation should not be (None)
val future = router ? evaluation.get
val futureResult = Await.result(future, timeout.duration).asInstanceOf[Try[Result]]
futureResult should be('success)
val Success(result) = futureResult
inside(result) {
case Result(stdout, stderr, wallTime, exitCode, compilationResult, outputFiles) =>
stdout.trim should be("")
stderr.trim should be("")
wallTime should be < 1000L
exitCode should be(0)
compilationResult should be(None)
}
}
}
}
| eval-so/minibcs | src/test/scala/languages/C.scala | Scala | apache-2.0 | 2,507 |
/**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package rx.lang.scala
import org.junit.{Assert, Test}
import org.junit.Assert
import org.scalatest.junit.JUnitSuite
import scala.concurrent.duration._
import scala.language.postfixOps
import org.mockito.Mockito._
import org.mockito.Matchers._
import org.junit.Assert.assertEquals
import org.junit.Assert.assertTrue
import org.junit.Assert.assertFalse
import rx.lang.scala.subscriptions.{SerialSubscription, MultipleAssignmentSubscription, CompositeSubscription}
class SubscriptionTests extends JUnitSuite {
@Test
def subscriptionCreate() {
val subscription = Subscription()
assertFalse(subscription.isUnsubscribed)
subscription.unsubscribe()
assertTrue(subscription.isUnsubscribed)
}
@Test
def subscriptionUnsubscribeIdempotent() {
var called = false
val subscription = Subscription{ called = !called }
assertFalse(called)
assertFalse(subscription.isUnsubscribed)
subscription.unsubscribe()
assertTrue(called)
assertTrue(subscription.isUnsubscribed)
subscription.unsubscribe()
assertTrue(called)
assertTrue(subscription.isUnsubscribed)
}
@Test
def compositeSubscriptionAdd() {
val s0 = Subscription()
val s1 = Subscription()
val composite = CompositeSubscription()
assertFalse(composite.isUnsubscribed)
composite += s0
composite += s1
composite.unsubscribe()
assertTrue(composite.isUnsubscribed)
assertTrue(s0.isUnsubscribed)
assertTrue(s1.isUnsubscribed)
val s2 = Subscription{}
assertFalse(s2.isUnsubscribed)
composite += s2
assertTrue(s2.isUnsubscribed)
}
@Test
def compositeSubscriptionRemove() {
val s0 = Subscription()
val composite = CompositeSubscription()
composite += s0
assertFalse(s0.isUnsubscribed)
composite -= s0
assertTrue(s0.isUnsubscribed)
composite.unsubscribe()
assertTrue(composite.isUnsubscribed)
assertTrue(s0.isUnsubscribed)
}
@Test
def multiAssignmentSubscriptionAdd() {
val s0 = Subscription()
val s1 = Subscription()
val multiple = MultipleAssignmentSubscription()
assertFalse(multiple.isUnsubscribed)
assertFalse(s0.isUnsubscribed)
assertFalse(s1.isUnsubscribed)
multiple.subscription = s0
assertFalse(s0.isUnsubscribed)
assertFalse(s1.isUnsubscribed)
multiple.subscription = s1
assertFalse(s0.isUnsubscribed) // difference with SerialSubscription
assertFalse(s1.isUnsubscribed)
multiple.unsubscribe()
assertTrue(multiple.isUnsubscribed)
assertFalse(s0.isUnsubscribed)
assertTrue(s1.isUnsubscribed)
val s2 = Subscription()
assertFalse(s2.isUnsubscribed)
multiple.subscription = s2
assertTrue(s2.isUnsubscribed)
assertFalse(s0.isUnsubscribed)
}
@Test
def serialSubscriptionAdd() {
val s0 = Subscription()
val s1 = Subscription()
val serial = SerialSubscription()
assertFalse(serial.isUnsubscribed)
assertFalse(s0.isUnsubscribed)
assertFalse(s1.isUnsubscribed)
serial.subscription = s0
assertFalse(s0.isUnsubscribed)
assertFalse(s1.isUnsubscribed)
serial.subscription = s1
assertTrue(s0.isUnsubscribed) // difference with MultipleAssignmentSubscription
assertFalse(s1.isUnsubscribed)
serial.unsubscribe()
assertTrue(serial.isUnsubscribed)
assertTrue(s1.isUnsubscribed)
val s2 = Subscription()
assertFalse(s2.isUnsubscribed)
serial.subscription = s2
assertTrue(s2.isUnsubscribed)
}
}
| samuelgruetter/RxScala | src/test/scala/rx/lang/scala/SubscriptionTests.scala | Scala | apache-2.0 | 4,244 |
package com.twitter.finagle.addr
import com.twitter.concurrent.{Offer, Broker}
import com.twitter.finagle.{Addr, Address}
import com.twitter.finagle.stats.{StatsReceiver, NullStatsReceiver}
import com.twitter.finagle.util.DefaultTimer
import com.twitter.util.{Duration, Future, Time, Timer}
import scala.collection.immutable.Queue
private[finagle] object StabilizingAddr {
private[finagle /*(testing*/ ] object State extends Enumeration {
type Health = Value
// explicitly define intuitive ids as they
// are exported for stats.
val Healthy = Value(1)
val Unknown = Value(0)
val Unhealthy = Value(-1)
}
private def qcontains[T](q: Queue[(T, _)], elem: T): Boolean =
q exists { case (e, _) => e == elem }
/**
* A StabilizingAddr conservatively removes elements from a bound
* Addr depending on the source health (exposed through `pulse`).
* More specifically, removes are delayed until the source is in a
* healthy state for at least `grace` period.
*
* @param addr An offer for underlying address updates.
* @param pulse An offer for communicating group health.
* Invariant: The offer should communicate Health in FIFO order
* with respect to time.
* @param grace The duration that must elapse before an element
* is removed from the group.
*/
def apply(
addr: Offer[Addr],
pulse: Offer[State.Health],
grace: Duration,
statsReceiver: StatsReceiver = NullStatsReceiver,
timer: Timer = DefaultTimer
): Offer[Addr] = new Offer[Addr] {
import State._
implicit val injectTimer = timer
@volatile var nq = 0
@volatile var healthStat = Healthy.id
val health = statsReceiver.addGauge("health") { healthStat }
val limbo = statsReceiver.addGauge("limbo") { nq }
val stabilized = new Broker[Addr]
/**
* Exclusively maintains the elements in current
* based on adds, removes, and health transitions.
* Removes are delayed for grace period and each health
* transition resets the grace period.
*/
def loop(
remq: Queue[(Address, Time)],
h: Health,
active: Set[Address],
needPush: Boolean,
srcAddr: Addr
): Future[Unit] = {
nq = remq.size
Offer.select(
pulse map { newh =>
healthStat = newh.id
// If our health transitions into healthy, reset removal
// times foreach elem in remq.
newh match {
case newh if h == newh =>
loop(remq, newh, active, needPush, srcAddr)
case Healthy =>
// Transitioned to healthy: push back
val newTime = Time.now + grace
val newq = remq map { case (elem, _) => (elem, newTime) }
loop(newq, Healthy, active, needPush, srcAddr)
case newh =>
loop(remq, newh, active, needPush, srcAddr)
}
},
addr map {
case addr @ Addr.Bound(newSet, _) =>
// Update our pending queue so that newly added
// entries aren't later removed.
var q = remq filter { case (e, _) => !(newSet contains e) }
// Add newly removed elements to the remove queue.
val until = Time.now + grace
for (el <- active &~ newSet if !qcontains(q, el))
q = q.enqueue((el, until))
loop(q, h, active ++ newSet, true, addr)
case addr =>
// A nonbound address will enqueue all active members
// for removal, so that if we become bound again, we can
// continue on merrily.
val until = Time.now + grace
val q = remq.enqueue(active.map(el => (el, until)))
loop(q, h, active, true, addr)
},
if (h != Healthy || remq.isEmpty) Offer.never
else {
// Note: remq is ordered by 'until' time.
val ((elem, until), nextq) = remq.dequeue
Offer.timeout(until - Time.now) map { _ => loop(nextq, h, active - elem, true, srcAddr) }
},
if (!needPush) Offer.never
else {
// We always bind if active is nonempty. Otherwise we
// pass through the current active address.
val attrs = srcAddr match {
case Addr.Bound(_, attrs) => attrs
case _ => Addr.Metadata.empty
}
val addr =
if (active.nonEmpty) Addr.Bound(active, attrs)
else srcAddr
stabilized.send(addr) map { _ => loop(remq, h, active, false, srcAddr) }
}
)
}
loop(Queue.empty, Healthy, Set.empty, false, Addr.Pending)
// Defer to the underlying Offer.
def prepare() = stabilized.recv.prepare()
}
}
| twitter/finagle | finagle-core/src/main/scala/com/twitter/finagle/addr/StabilizingAddr.scala | Scala | apache-2.0 | 4,739 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License
*/
package org.apache.toree.comm
import org.apache.toree.comm.CommCallbacks.{CloseCallback, OpenCallback}
import org.apache.toree.kernel.protocol.v5
import org.apache.toree.kernel.protocol.v5.UUID
import org.apache.toree.kernel.protocol.v5.content.{CommClose, CommOpen}
import org.mockito.invocation.InvocationOnMock
import org.mockito.stubbing.Answer
import org.scalatest.mock.MockitoSugar
import org.scalatest.{BeforeAndAfter, Matchers, FunSpec}
import org.mockito.Mockito._
import org.mockito.Matchers.{eq => mockEq, _}
class CommManagerSpec extends FunSpec with Matchers with BeforeAndAfter
with MockitoSugar
{
private val TestTargetName = "some target"
private val TestCommId = java.util.UUID.randomUUID().toString
/** Creates a new Comm Manager, filling in the Comm writer method. */
private def newCommManager(
commRegistrar: CommRegistrar,
commWriter: CommWriter
): CommManager = new CommManager(commRegistrar) {
override protected def newCommWriter(commId: UUID): CommWriter = commWriter
}
private var mockCommWriter: CommWriter = _
private var mockCommRegistrar: CommRegistrar = _
private var commManager: CommManager = _
before {
mockCommWriter = mock[CommWriter]
mockCommRegistrar = mock[CommRegistrar]
doReturn(mockCommRegistrar).when(mockCommRegistrar)
.register(anyString())
doReturn(mockCommRegistrar).when(mockCommRegistrar)
.addOpenHandler(any(classOf[OpenCallback]))
doReturn(mockCommRegistrar).when(mockCommRegistrar)
.addCloseHandler(any(classOf[CloseCallback]))
doReturn(mockCommRegistrar).when(mockCommRegistrar)
.withTarget(anyString())
commManager = newCommManager(mockCommRegistrar, mockCommWriter)
}
describe("CommManager") {
describe("#withTarget") {
it("should return a registrar using the target name provided") {
val commRegistrar = commManager.withTarget(TestTargetName)
verify(commRegistrar).withTarget(TestTargetName)
}
}
describe("#register") {
it("should register the target name provided") {
commManager.register(TestTargetName)
verify(mockCommRegistrar).register(TestTargetName)
}
// TODO: Is there a better/cleaner way to assert the contents of the callback?
it("should add a link callback to the received open events") {
var linkFunc: OpenCallback = null
// Setup used to extract the function of the callback
doAnswer(new Answer[CommRegistrar]() {
override def answer(p1: InvocationOnMock): CommRegistrar = {
linkFunc = p1.getArguments.head.asInstanceOf[OpenCallback]
mockCommRegistrar
}
}).when(mockCommRegistrar).addOpenHandler(any(classOf[OpenCallback]))
// Call register and verify that the underlying registrar method called
commManager.register(TestTargetName)
verify(mockCommRegistrar).addOpenHandler(any(classOf[OpenCallback]))
// Trigger the callback to test what it does
linkFunc(mock[CommWriter], TestCommId, TestTargetName, v5.MsgData.Empty)
verify(mockCommRegistrar).link(TestTargetName, TestCommId)
}
// TODO: Is there a better/cleaner way to assert the contents of the callback?
it("should add an unlink callback to the received close events") {
var unlinkFunc: CloseCallback = null
// Setup used to extract the function of the callback
doAnswer(new Answer[CommRegistrar]() {
override def answer(p1: InvocationOnMock): CommRegistrar = {
unlinkFunc = p1.getArguments.head.asInstanceOf[CloseCallback]
mockCommRegistrar
}
}).when(mockCommRegistrar).addCloseHandler(any(classOf[CloseCallback]))
// Call register and verify that the underlying registrar method called
commManager.register(TestTargetName)
verify(mockCommRegistrar).addCloseHandler(any(classOf[CloseCallback]))
// Trigger the callback to test what it does
unlinkFunc(mock[CommWriter], TestCommId, v5.MsgData.Empty)
verify(mockCommRegistrar).unlink(TestCommId)
}
}
describe("#unregister") {
it("should remove the target from the collection of targets") {
val commManager = newCommManager(
new CommRegistrar(new CommStorage()),
mockCommWriter
)
commManager.register(TestTargetName)
commManager.unregister(TestTargetName)
commManager.isRegistered(TestTargetName) should be (false)
}
}
describe("#isRegistered") {
it("should return true if the target is currently registered") {
val commManager = newCommManager(
new CommRegistrar(new CommStorage()),
mockCommWriter
)
commManager.register(TestTargetName)
commManager.isRegistered(TestTargetName) should be (true)
}
it("should return false if the target is not currently registered") {
val commManager = newCommManager(
new CommRegistrar(new CommStorage()),
mockCommWriter
)
commManager.register(TestTargetName)
commManager.unregister(TestTargetName)
commManager.isRegistered(TestTargetName) should be (false)
}
it("should return false if the target has never been registered") {
val commManager = newCommManager(
new CommRegistrar(new CommStorage()),
mockCommWriter
)
commManager.isRegistered(TestTargetName) should be (false)
}
}
describe("#open") {
it("should return a new CommWriter instance that links during open") {
val commWriter = commManager.open(TestTargetName, v5.MsgData.Empty)
commWriter.writeOpen(TestTargetName)
// Should have been executed once during commManager.open(...) and
// another time with the call above
verify(mockCommRegistrar, times(2))
.link(mockEq(TestTargetName), any[v5.UUID])
}
it("should return a new CommWriter instance that unlinks during close") {
val commWriter = commManager.open(TestTargetName, v5.MsgData.Empty)
commWriter.writeClose(v5.MsgData.Empty)
verify(mockCommRegistrar).unlink(any[v5.UUID])
}
it("should initiate a comm_open") {
commManager.open(TestTargetName, v5.MsgData.Empty)
verify(mockCommWriter).writeOpen(TestTargetName, v5.MsgData.Empty)
}
}
}
}
| hmost1/incubator-toree | protocol/src/test/scala/org/apache/toree/comm/CommManagerSpec.scala | Scala | apache-2.0 | 7,277 |
/* NSC -- new Scala compiler
* Copyright 2005-2013 LAMP/EPFL
* @author Martin Odersky
*/
package dotty.tools
package dotc
import core.Contexts.Context
import reporting.Reporter
/* To do:
*/
object Main extends Driver {
def resident(compiler: Compiler): Reporter = unsupported("resident") /*loop { line =>
val command = new CompilerCommand(line split "\\\\s+" toList, new Settings(scalacError))
compiler.reporter.reset()
new compiler.Run() compile command.files
}*/
override def newCompiler(): Compiler = new Compiler
override def doCompile(compiler: Compiler, fileNames: List[String], reporter: Option[Reporter] = None)(implicit ctx: Context): Reporter = {
if (new config.Settings.Setting.SettingDecorator[Boolean](ctx.base.settings.resident).value(ctx))
resident(compiler)
else
super.doCompile(compiler, fileNames, reporter)
}
}
| spetz911/dotty | src/dotty/tools/dotc/Main.scala | Scala | bsd-3-clause | 879 |
package chandu0101.scalajs.react.components.semanticui
import chandu0101.macros.tojs.JSMacro
import scala.scalajs.js
case class ButtonAnimatedType private (value: String) extends AnyVal
object ButtonAnimatedType {
val fade = ButtonAnimatedType("fade")
val vertical = ButtonAnimatedType("vertical")
val values = List(fade, vertical)
}
case class PointingDirection private (value: String) extends AnyVal
object PointingDirection {
val left = PointingDirection("left")
val right = PointingDirection("right")
val top = PointingDirection("top")
val top_left = PointingDirection("top left")
val top_right = PointingDirection("top right")
val bottom = PointingDirection("bottom")
val bottom_left = PointingDirection("bottom left")
val bottom_right = PointingDirection("bottom right")
}
case class SuiIconType(value: String) extends AnyVal
case class SuiCountry(value: String) extends AnyVal
@js.native
trait SuiRateObject extends js.Object {
def rating: Int = js.native
def maxRating: Int = js.native
}
@js.native
trait PaginationEventData extends js.Object {
def activePage: Int = js.native
}
@js.native
trait SuiDropDownOption extends js.Object {
def key: String = js.native
def text: String = js.native
def value: String = js.native
}
@js.native
trait SuiDropdownChangeObject extends js.Object {
def value: String = js.native
}
object SuiDropDownOption {
def apply(key: String, text: String, value: String): SuiDropDownOption = {
js.Dynamic.literal(key = key, text = text, value = value).asInstanceOf[SuiDropDownOption]
}
def apply(str: String): SuiDropDownOption = {
js.Dynamic.literal(key = str, text = str, value = str).asInstanceOf[SuiDropDownOption]
}
}
class SuiHorizontallyOrVertically(val value: String) extends AnyVal
object SuiHorizontallyOrVertically {
val horizontally = new Left("horizontally")
val vertically = new Left("vertically")
val values = List(horizontally, vertically)
}
class SuiFormFieldControlType(val value: String) extends AnyVal
object SuiFormFieldControlType {
val button = new Left("button")
val input = new Left("input")
val select = new Left("select")
val textarea = new Left("textarea")
val values = List(button, input, select, textarea)
}
class SuiModalDimmer(val value: String) extends AnyVal
object SuiModalDimmer {
val inverted = new Left("inverted")
val blurring = new Left("blurring")
val values = List(inverted, blurring)
}
class SuiFourDirections(val value: String) extends AnyVal
object SuiFourDirections {
val left = new Left("left")
val right = new Left("right")
val bottom = new Left("bottom")
val top = new Left("top")
val values = List(left, right, bottom, top)
}
| aparo/scalajs-react-components | core/src/main/scala/chandu0101/scalajs/react/components/semanticui/types.scala | Scala | apache-2.0 | 2,712 |
/* Copyright 2009-2021 EPFL, Lausanne */
import stainless.annotation._
object While1 {
def foo(): Int = {
var a = 0
var i = 0
while(i < 10) {
a = a + 1
i = i + 1
}
a
} ensuring(_ == 10)
}
| epfl-lara/stainless | frontends/benchmarks/imperative/valid/While1.scala | Scala | apache-2.0 | 227 |
// AORTA is copyright (C) 2012 Dustin Carlino, Mike Depinet, and Piyush
// Khandelwal of UT Austin
// License: GNU GPL v2
package utexas.aorta.sim.intersections
import utexas.aorta.map.{Turn, Line, Vertex}
import utexas.aorta.sim.drivers.Agent
import utexas.aorta.sim.make.IntersectionType
import utexas.aorta.common.{Physics, cfg, Util}
import scala.collection.mutable
class AIMPolicy(vertex: Vertex, ordering: IntersectionOrdering[Ticket])
extends ReservationPolicy(vertex, ordering)
{
// We can't model when drivers will cross conflict points exactly
private val slack = 15 * cfg.dt_s
private val conflict_map: Map[Turn, Map[Turn, Conflict]] = find_conflicts()
// These're for conflict detection
// How far along their turn each agent was during last tick
private val dist_last_tick = new mutable.HashMap[Agent, Double]()
private val exited_this_tick = new mutable.HashSet[Ticket]()
// TODO serialization
// Agents must pause a moment, be the head of their queue, and be close enough
// to us (in case they looked ahead over small edges).
override def candidates =
request_queue.filter(ticket =>
(ticket.a.is_stopped &&
ticket.a.cur_queue.head.get == ticket.a &&
ticket.a.how_far_away(intersection) <= cfg.end_threshold &&
!ticket.turn_blocked)
)
override def can_accept(ticket: Ticket): Boolean = {
// See if there's a predicted conflict with any agent that's been accepted
for (t <- accepted if t.turn.conflicts_with(ticket.turn)) {
// Factor in slack. These times are the earliest possible.
val our_time = sim.tick + conflict_map(t.turn)(ticket.turn).time(ticket.turn, 0)
val their_time = t.accept_tick + conflict_map(t.turn)(ticket.turn).time(t.turn, 0)
val range1 = our_time to our_time + slack by cfg.dt_s
val range2 = their_time to their_time + slack by cfg.dt_s
if (range1.intersect(range2).nonEmpty) {
//println(s"clash (slack=$slack): ${ticket.a} @ $our_time vs ${t.a} @ $their_time")
return false
}
}
return true
}
// Check for collisions by seeing how close agents are to pre-defined collision point
override def end_step() {
val in_intersection = accepted.filter(t => t.a.at.on == t.turn) ++ exited_this_tick
// O(n^2 / 2), n = agents doing turns
for (t1 <- in_intersection; t2 <- in_intersection if t1.a.id.int < t2.a.id.int) {
if (t1.turn.conflicts_with(t2.turn)) {
val conflict = conflict_map(t1.turn)(t2.turn)
// Are the agents near the collision point? Negative means before point, positive means
// after it
val delta1 =
if (exited_this_tick.contains(t1)) t1.turn.length else t1.a.at.dist - conflict.dist(t1.turn)
val delta2 =
if (exited_this_tick.contains(t2)) t2.turn.length else t2.a.at.dist - conflict.dist(t2.turn)
// If the agents cross the point (neg -> pos) the same tick, then they definitely hit!
// If they're not in dist_last_tick, then this is their first tick in the intersection. If
// they're already positive (past the point), then they crossed it! Hence default value of
// 0.
// TODO weird type inference issues here, hence intermediates
//val old_delta1 = dist_last_tick.getOrElse(t1.a, 0) - conflict.dist(t1.turn)
//val old_delta2 = dist_last_tick.getOrElse(t2.a, 0) - conflict.dist(t2.turn)
val old_dist1: Double = dist_last_tick.getOrElse(t1.a, 0)
val old_delta1 = old_dist1 - conflict.dist(t1.turn)
val old_dist2: Double = dist_last_tick.getOrElse(t2.a, 0)
val old_delta2 = old_dist2 - conflict.dist(t2.turn)
if ((old_delta1 < 0 && delta1 > 0) && (old_delta2 < 0 && delta2 > 0)) {
throw new Exception(s"${t1.a} and ${t2.a} crossed at an AIM intersection at ${sim.tick}!")
}
}
// Update distances, just for people actually still in the intersection
for (t <- accepted.filter(t => t.a.at.on == t.turn)) {
dist_last_tick(t.a) = t.a.at.dist
}
}
// Cleanup
dist_last_tick --= exited_this_tick.map(_.a)
exited_this_tick.clear()
}
override def handle_exit(t: Ticket) {
super.handle_exit(t)
exited_this_tick += t
// We want to run end_step the tick agents leave, even if none are left in the intersection,
// since there's a case where exiting agents collide.
sim.active_intersections += intersection
// Also check that our slack value is sufficient.
val projected = Physics.simulate_steps(t.turn.length + cfg.end_threshold, 0, t.turn.speed_limit)
if (t.duration < projected || t.duration > projected + slack) {
throw new Exception(s"Actual duration ${t.duration} isn't in [$projected, ${projected + slack}]")
}
}
override def policy_type = IntersectionType.AIM
case class Conflict(turn1: Turn, collision_dist1: Double, turn2: Turn, collision_dist2: Double) {
// TODO awkward that all methods are duped for 1,2
def dist(turn: Turn) = turn match {
case `turn1` => collision_dist1
case `turn2` => collision_dist2
case _ => throw new IllegalArgumentException(s"$turn doesn't belong to $this")
}
// Includes end_threshold, so assumes we're starting right at that mark...
def time(turn: Turn, initial_speed: Double) = turn match {
case `turn1` => Physics.simulate_steps(collision_dist1 + cfg.end_threshold, initial_speed, turn1.speed_limit)
case `turn2` => Physics.simulate_steps(collision_dist2 + cfg.end_threshold, initial_speed, turn2.speed_limit)
case _ => throw new IllegalArgumentException(s"$turn doesn't belong to $this")
}
}
private def find_conflicts(): Map[Turn, Map[Turn, Conflict]] = {
val all_conflicts =
for (t1 <- intersection.v.turns; t2 <- intersection.v.turns if t1 != t2)
yield make_conflict(t1, t2)
val map = intersection.v.turns.map(t => t -> new mutable.HashMap[Turn, Conflict]).toMap
for (c <- all_conflicts.flatten) {
map(c.turn1)(c.turn2) = c
map(c.turn2)(c.turn1) = c
}
return intersection.v.turns.map(t => t -> map(t).toMap).toMap
}
private def make_conflict(turn1: Turn, turn2: Turn): Option[Conflict] =
turn1.conflict_line.segment_intersection(turn2.conflict_line) match {
case Some(pt) => Some(Conflict(
turn1, new Line(turn1.conflict_line.start, pt).length,
turn2, new Line(turn2.conflict_line.start, pt).length
))
// if same destination lane and doesnt conflict normally by the line, force collision at end
case None if turn1.to == turn2.to => Some(Conflict(turn1, turn1.length, turn2, turn2.length))
case None => None
}
}
| dabreegster/aorta | utexas/aorta/sim/intersections/AIMPolicy.scala | Scala | gpl-2.0 | 6,710 |
// Copyright (c) 2011 Paul Butcher
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package com.borachio.scalatest
import com.borachio.AbstractMockFactory
import org.scalatest.{BeforeAndAfterEach, Suite}
/** Trait that can be mixed into a [[http://www.scalatest.org/ ScalaTest]] suite to provide
* mocking support.
*
* See [[com.borachio]] for overview documentation.
*/
trait MockFactory extends AbstractMockFactory with BeforeAndAfterEach { this: Suite =>
override def beforeEach() {
resetExpectations
}
override def afterEach() {
if (autoVerify)
verifyExpectations
}
protected var autoVerify = true
}
| paulbutcher/borachio | frameworks/scalatest/src/main/scala/MockFactory.scala | Scala | mit | 1,677 |
/*
* Licensed to Intel Corporation under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* Intel Corporation licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.nn
import breeze.numerics.{abs, pow}
import com.intel.analytics.bigdl.nn.abstractnn.TensorModule
import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.RandomGenerator._
import scala.reflect.ClassTag
/**
* a convolution of width 1, commonly used for word embeddings;
*/
@SerialVersionUID( - 4832171200145114633L)
class LookupTable[T: ClassTag]
(val nIndex: Int, val nOutput: Int, val paddingValue: Double = 0,
val maxNorm: Double = Double.MaxValue,
val normType: Double = 2.0, shouldScaleGradByFreq: Boolean = false)
(implicit ev: TensorNumeric[T]) extends TensorModule[T] {
val weight = Tensor[T](nIndex, nOutput)
val gradWeight = Tensor[T](nIndex, nOutput).zero()
private var inputBuffer = Tensor[T]()
private var normBuffer = Tensor[T]()
private val countBuffer = Tensor[T]()
reset()
override def reset(): Unit = {
// todo: stdv = stdv or 1
weight.apply1(_ => ev.fromType[Double](RNG.normal(0, 1)))
}
private def renorm(input : Tensor[T]): Unit = {
if (Double.MaxValue == maxNorm) {
return
}
normBuffer.resize(input.size()).copy(input)
if (normBuffer.dim() == 2) {
normBuffer = normBuffer.view(normBuffer.nElement())
}
require(weight.isContiguous(), "weight must be contiguous")
require(normBuffer.isContiguous(), "input must be contiguous")
require(normBuffer.nDimension() == 1, "idx must be a vector")
require(normType > 0, "non-positive-norm not supported")
val rowIdx = normBuffer.storage().array()
val rowOffset = normBuffer.storageOffset() - 1
var numEle = normBuffer.nElement()
val stride = weight.stride(1)
val gw = weight.storage().array()
val gw_offset = weight.storageOffset() - 1
var i = 0
while (i < numEle) {
require(ev.isGreater(ev.fromType(weight.size(1) + 1), rowIdx(i + rowOffset)),
"elements of input should be little than or equal to nIndex+1")
require(ev.isGreaterEq(rowIdx(i + rowOffset), ev.one),
"elements of input should be greater than or equal to 1")
i += 1
}
implicit val ord = Ordering.fromLessThan[T]((e1, e2) => (ev.isGreater(e1, e2)))
scala.util.Sorting.quickSort(rowIdx)
var ptr = 0
i = 0
while (i < numEle) {
if (i == 0 || rowIdx(i + rowOffset) != rowIdx(i - 1 + rowOffset)) {
rowIdx(ptr + rowOffset) = rowIdx(i + rowOffset)
ptr += 1
}
i += 1
}
numEle = ptr
i = 0
while (i < numEle) {
val k = ev.toType[Int](rowIdx(i + rowOffset)) - 1
renormRow(gw, k * stride + gw_offset, stride, maxNorm, normType)
i += 1
}
}
private def renormRow(row_data: Array[T], offset: Int, stride: Int,
maxNorm: Double, normType: Double): Unit = {
var norm = 0.0
var j = 0
while (j < stride) {
if (normType == 1) {
norm += ev.toType[Double](ev.abs(row_data(j + offset)))
} else if (normType == 2) {
norm += ev.toType[Double](ev.times(row_data(j + offset), row_data(j + offset)))
} else {
norm += math.pow(abs(ev.toType[Double](row_data(j + offset))), normType)
}
j += 1
}
norm = pow(norm, 1.0 / normType)
if (norm > maxNorm) {
val new_norm = maxNorm / (norm + 1e-7)
j = 0
while (j < stride) {
row_data(j + offset) = ev.times(row_data(j + offset), ev.fromType(new_norm))
j += 1
}
}
}
private def resetCount(count: Tensor[T], input: Tensor[T]): Unit = {
var i = 1
val numEle = input.nElement()
while (i <= numEle) {
val k = ev.toType[Int](input.valueAt(i))
count.update(k, ev.zero)
i += 1
}
i = 1
while (i <= numEle) {
val k = ev.toType[Int](input.valueAt(i))
count.update(k, ev.plus(count.valueAt(k), ev.one))
i += 1
}
}
override def updateOutput(input: Tensor[T]): Tensor[T] = {
require(input.dim() == 1 || input.dim() == 2,
"LookupTable: " + ErrorInfo.constrainInputAsVectorOrBatch)
renorm(input)
inputBuffer = input.contiguous()
if (inputBuffer.dim() == 1) {
output.index(1, inputBuffer, weight)
} else if (inputBuffer.dim() == 2) {
output.index(1, inputBuffer.view(inputBuffer.nElement()), weight)
output = output.view(inputBuffer.size(1), inputBuffer.size(2), weight.size(2))
}
output
}
override def updateGradInput(input: Tensor[T], gradOutput: Tensor[T]): Tensor[T] = {
if (!gradInput.isSameSizeAs(input)) {
gradInput.resizeAs(input).zero()
}
gradInput
}
override def accGradParameters(input: Tensor[T], gradOutput: Tensor[T],
scale: Double = 1.0): Unit = {
inputBuffer = input.contiguous()
require(gradWeight.isContiguous(), "gradWeight must be contiguous")
require(inputBuffer.dim() == 1 || inputBuffer.dim() == 2, "input must be a vector or matrix")
if (inputBuffer.dim() == 2) {
inputBuffer.view(inputBuffer.nElement())
}
val _gradOutput = gradOutput.contiguous()
val count_data : Array[T] = null
if (shouldScaleGradByFreq) {
countBuffer.resize(gradWeight.size(1))
resetCount(countBuffer, inputBuffer)
}
val input_data = inputBuffer.storage().array()
val input_offset = inputBuffer.storageOffset() - 1
val numEle = inputBuffer.nElement()
var i = 0
while (i < numEle) {
require(ev.isGreater(ev.fromType(gradWeight.size(1) + 1), input_data(i + input_offset)),
"elements of input should be little than or equal to nIndex+1")
require(ev.isGreaterEq(input_data(i + input_offset), ev.one),
"elements of input should be greater than or equal to 1")
i += 1
}
val gw = gradWeight.storage().array()
val go = _gradOutput.storage().array()
val stride = gradWeight.stride(1)
i = 0
while (i < numEle) {
if (input_data(i + input_offset) != paddingValue) {
val k = ev.toType[Int](input_data(i + input_offset)) - 1
val scale_ = if (null != count_data) scale / ev.toType[Double](count_data(k)) else scale
ev.axpy(stride, ev.fromType(scale_), go, i*stride + _gradOutput.storageOffset() - 1, 1,
gw, k*stride + gradWeight.storageOffset() - 1, 1)
}
i += 1
}
}
override def toString(): String = {
s"nn.LookupTable($nIndex, $nOutput, $paddingValue, $maxNorm, $normType)"
}
override def zeroGradParameters(): Unit = {
gradWeight.zero()
}
override def parameters(): (Array[Tensor[T]], Array[Tensor[T]]) = {
(Array(this.weight), Array(this.gradWeight))
}
override def clearState() : this.type = {
super.clearState()
inputBuffer.set()
countBuffer.set()
normBuffer.set()
this
}
override def canEqual(other: Any): Boolean = other.isInstanceOf[LookupTable[T]]
override def equals(other: Any): Boolean = other match {
case that: LookupTable[T] =>
super.equals(that) &&
(that canEqual this) &&
weight == that.weight &&
gradWeight == that.gradWeight &&
nIndex == that.nIndex &&
nOutput == that.nOutput &&
paddingValue == that.paddingValue &&
maxNorm == that.maxNorm &&
normType == that.normType
case _ => false
}
override def hashCode(): Int = {
def getHashCode(a: Any): Int = if (a == null) 0 else a.hashCode()
val state = Seq(super.hashCode(), weight, gradWeight, nIndex, nOutput,
paddingValue, maxNorm, normType)
state.map(getHashCode).foldLeft(0)((a, b) => 31 * a + b)
}
}
object LookupTable {
def apply[@specialized(Float, Double)T: ClassTag](
nIndex: Int, nOutput: Int,
paddingValue: Double = 0, maxNorm: Double = Double.MaxValue,
normType: Double = 2.0, shouldScaleGradByFreq: Boolean = false)
(implicit ev: TensorNumeric[T]): LookupTable[T] =
new LookupTable[T](nIndex, nOutput, paddingValue, maxNorm, normType, shouldScaleGradByFreq)
}
| SeaOfOcean/BigDL | dl/src/main/scala/com/intel/analytics/bigdl/nn/LookupTable.scala | Scala | apache-2.0 | 8,810 |
/*
* Copyright (c) 2016. <jason.zou@gmail.com>
*
* FieldTest.scala is part of marc4scala.
*
* marc4scala is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* marc4scala is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with marc4scala; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
package org.marc4scala
import org.scalatest.FlatSpec
/**
* Created by jason on 3/1/16.
*/
class FieldTest extends FlatSpec {
}
| jasonzou/marc4scala | src/test/scala/org/marc4scala/FieldTest.scala | Scala | gpl-3.0 | 963 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package system.basic
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
import common.JsHelpers
import common.WskTestHelpers
import common.Wsk
@RunWith(classOf[JUnitRunner])
class WskCliUnicodePython2Tests extends WskUnicodeTests with WskTestHelpers with JsHelpers {
override val wsk: common.Wsk = new Wsk
override lazy val actionKind: String = "python:2"
override lazy val actionSource: String = "unicode2.py"
}
| duynguyen/incubator-openwhisk | tests/src/test/scala/system/basic/WskCliUnicodePython2Tests.scala | Scala | apache-2.0 | 1,247 |
/*
* Accio is a platform to launch computer science experiments.
* Copyright (C) 2016-2018 Vincent Primault <v.primault@ucl.ac.uk>
*
* Accio is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Accio is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Accio. If not, see <http://www.gnu.org/licenses/>.
*/
package fr.cnrs.liris.accio.cli
import java.io.File
import com.twitter.util.Future
import fr.cnrs.liris.accio.domain.thrift.ThriftAdapter
import fr.cnrs.liris.accio.dsl.json.JsonWorkflowParser
import fr.cnrs.liris.accio.server.ValidateWorkflowRequest
import fr.cnrs.liris.infra.cli.app.{Environment, ExitCode, Reporter}
import fr.cnrs.liris.infra.thriftserver.FieldViolation
import fr.cnrs.liris.util.FileUtils
final class ValidateCommand extends AccioCommand {
override def name = "validate"
override def help = "Validate the syntax and semantics of Accio job definition files."
override def allowResidue = true
override def execute(residue: Seq[String], env: Environment): Future[ExitCode] = {
if (residue.isEmpty) {
env.reporter.error("You must specify at least one file to validate as argument")
return Future.value(ExitCode.CommandLineError)
}
val fs = residue.map(path => validate(path, env))
Future.collect(fs).map(ExitCode.select)
}
private def validate(uri: String, env: Environment): Future[ExitCode] = {
val file = FileUtils.expandPath(uri).toFile
if (!file.canRead) {
env.reporter.error(s"Cannot read file ${file.getAbsolutePath}")
return Future.value(ExitCode.DefinitionError)
}
val client = createAccioClient(env)
JsonWorkflowParser.default
.parse(file)
.flatMap(workflow => client.validateWorkflow(ValidateWorkflowRequest(ThriftAdapter.toThrift(workflow))))
.map(resp => handleResponse(resp.errors, resp.warnings, file, env.reporter))
}
private def handleResponse(errors: Seq[FieldViolation], warnings: Seq[FieldViolation], file: File, reporter: Reporter) = {
warnings.foreach { violation =>
reporter.warn(s"${violation.message} (at ${violation.field})")
}
errors.foreach { violation =>
reporter.error(s"${violation.message} (at ${violation.field})")
}
if (errors.isEmpty) {
reporter.info(s"Validated file ${file.getAbsolutePath}")
ExitCode.Success
} else {
ExitCode.DefinitionError
}
}
} | privamov/accio | accio/java/fr/cnrs/liris/accio/cli/ValidateCommand.scala | Scala | gpl-3.0 | 2,831 |
/* checkAll and friends were copied from the scalaz-specs2 project.
* Source file: src/main/scala/Spec.scala
* Project address: https://github.com/typelevel/scalaz-specs2
* Copyright (C) 2013 Lars Hupel
* License: MIT. https://github.com/typelevel/scalaz-specs2/blob/master/LICENSE.txt
* Commit df921e18cf8bf0fd0bb510133f1ca6e1caea512b
* Copied on. 11/1/2015
*/
package org.http4s
import cats.implicits._
import fs2._
import fs2.interop.cats._
import fs2.text._
import org.http4s.testing._
import org.http4s.util.threads.newDaemonPool
import org.scalacheck.Arbitrary.arbitrary
import org.scalacheck._
import org.scalacheck.util.{FreqMap, Pretty}
import org.specs2.ScalaCheck
import org.specs2.matcher.{TaskMatchers => _, _}
import org.specs2.mutable.Specification
import org.specs2.scalacheck.Parameters
import org.specs2.specification.core.Fragments
import org.specs2.specification.create.{DefaultFragmentFactory => ff}
import org.specs2.specification.dsl.FragmentsDsl
import org.typelevel.discipline.specs2.mutable.Discipline
import scala.concurrent.ExecutionContext
/**
* Common stack for http4s' own specs.
*
* Not published in testing's main, because it doesn't depend on specs2.
*/
trait Http4sSpec extends Specification
with ScalaCheck
with AnyMatchers
with OptionMatchers
with Http4s
with ArbitraryInstances
with FragmentsDsl
with Discipline
with TaskMatchers
with Http4sMatchers
{
implicit def testExecutionContext: ExecutionContext = Http4sSpec.TestExecutionContext
implicit def testStrategy: Strategy = Http4sSpec.TestStrategy
implicit def testScheduler: Scheduler = Http4sSpec.TestScheduler
implicit val params = Parameters(maxSize = 20)
implicit class ParseResultSyntax[A](self: ParseResult[A]) {
def yolo: A = self.valueOr(e => sys.error(e.toString))
}
/** This isn't really ours to provide publicly in implicit scope */
implicit lazy val arbitraryByteChunk: Arbitrary[Chunk[Byte]] =
Arbitrary {
Gen.containerOf[Array, Byte](arbitrary[Byte])
.map { b => Chunk.bytes(b) }
}
def writeToString[A](a: A)(implicit W: EntityEncoder[A]): String =
Stream.eval(W.toEntity(a))
.flatMap { case Entity(body, _ ) => body }
.through(utf8Decode)
.foldMonoid
.runLast
.map(_.getOrElse(""))
.unsafeRun
def writeToByteVector[A](a: A)(implicit W: EntityEncoder[A]): Chunk[Byte] =
Stream.eval(W.toEntity(a))
.flatMap { case Entity(body, _ ) => body }
.bufferAll
.chunks
.runLast
.map(_.getOrElse(Chunk.empty))
.unsafeRun
def checkAll(name: String, props: Properties)(implicit p: Parameters, f: FreqMap[Set[Any]] => Pretty): Fragments = {
addFragment(ff.text(s"$name ${props.name} must satisfy"))
addFragments(Fragments.foreach(props.properties) { case (name, prop) =>
Fragments(name in check(prop, p, f))
})
}
def checkAll(props: Properties)(implicit p: Parameters, f: FreqMap[Set[Any]] => Pretty): Fragments = {
addFragment(ff.text(s"${props.name} must satisfy"))
addFragments(Fragments.foreach(props.properties) { case (name, prop) =>
Fragments(name in check(prop, p, f))
})
}
implicit def enrichProperties(props: Properties) = new {
def withProp(propName: String, prop: Prop) = new Properties(props.name) {
for {(name, p) <- props.properties} property(name) = p
property(propName) = prop
}
}
def beStatus(status: Status): Matcher[Response] = { resp: Response =>
(resp.status == status) -> s" doesn't have status ${status}"
}
}
object Http4sSpec {
val TestExecutionContext: ExecutionContext =
ExecutionContext.fromExecutor(newDaemonPool("http4s-spec", timeout = true))
val TestStrategy: Strategy =
Strategy.fromExecutionContext(TestExecutionContext)
val TestScheduler: Scheduler =
Scheduler.fromFixedDaemonPool(4)
}
| ZizhengTai/http4s | testing/src/test/scala/org/http4s/Http4sSpec.scala | Scala | apache-2.0 | 3,914 |
package locals
import com.github.nscala_time.time.Imports._
object Constants {
val SecondsInDay = 86400
val DuskOrDawn = 2700 //Dusk or dawn roughly starts / ends 45 mins before or after sunset / sunrise
val FlowPolygonVertices = 4
val MaxLatitudeShift = 0.005
val MaxLongitudeShift = 0.005
val LarvaeCapacityAtSite = 10000
val Ocean = -27
val MinimumDate = new DateTime(1976, 1, 1, 0, 0)
val EarthsRadius = 6378137.0
// Need to make this dynamic
object ShapeAttribute {
val Geometry = (0, "the_geom")
val Habitat = (1, "HABITAT")
val State = (2, "STATE")
val Group = (3, "GROUPID")
val Patch = (4, "PATCH_NUM")
val XCoordinate = (5, "X_COORD")
val YCoordinate = (6, "Y_COORD")
val Area = (7, "AREA")
val Perimeter = (8, "PERIMETER")
}
object LightWeightException {
val NoReefToSettleException = -1
val UndefinedVelocityException = -2
val UndefinedCoordinateException = -3
val CoordinateNotFoundException = -4
val NoReefSensedException = -5
val NoSwimmingAngleException = -6
}
object Interpolation {
val CubicPoints = 4
val BicubicPoints = 16
val TricubicPoints = 64
}
object NetcdfIndex {
val X = 0
val Y = 1
val Z = 2
val Time = 3
}
}
| shawes/zissou | src/main/scala/locals/Constants.scala | Scala | mit | 1,269 |
package org.helgoboss.domino.configuration_watching
import org.helgoboss.capsule.Capsule
import org.osgi.service.metatype.{MetaTypeProvider => JMetaTypeProvider}
import org.helgoboss.scala_osgi_metatype.adapters.MetaTypeProviderAdapter
import org.helgoboss.scala_osgi_metatype.interfaces.MetaTypeProvider
/**
* Contains some common methods for both the configuration and factory configuration capsules.
*
* @constructor Initializes the capsule.
* @param metaTypeProvider Optional meta type provider
*/
abstract class AbstractConfigurationWatcherCapsule(
metaTypeProvider: Option[MetaTypeProvider]) extends Capsule with JMetaTypeProvider {
/**
* Contains the adapter which translates the Scala OSGi metatype definition into a native OSGi metatype definition.
*/
protected lazy val metaTypeProviderAdapter = metaTypeProvider map { new MetaTypeProviderAdapter(_) }
def getObjectClassDefinition(id: String, locale: String) = {
metaTypeProviderAdapter map { _.getObjectClassDefinition(id, locale) } orNull
}
def getLocales = metaTypeProviderAdapter map { _.getLocales } orNull
}
| lefou/domino | src/main/scala/org/helgoboss/domino/configuration_watching/AbstractConfigurationWatcherCapsule.scala | Scala | mit | 1,111 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.oap.execution
import java.nio.ByteBuffer
import java.util.concurrent.TimeUnit._
import com.intel.oap.vectorized._
import com.intel.oap.ColumnarPluginConfig
import org.apache.spark.{broadcast, TaskContext}
import org.apache.spark.rdd.RDD
import org.apache.spark.util.{Utils, UserAddedJarUtils}
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions.{
Attribute,
Expression,
SortOrder,
UnsafeProjection
}
import org.apache.spark.sql.catalyst.expressions.codegen._
import org.apache.spark.sql.catalyst.plans._
import org.apache.spark.sql.catalyst.plans.physical._
import org.apache.spark.sql.execution._
import org.apache.spark.sql.execution.metric.{SQLMetric, SQLMetrics}
import scala.collection.JavaConverters._
import org.apache.spark.internal.Logging
import org.apache.spark.sql.catalyst.expressions.BoundReference
import org.apache.spark.sql.catalyst.expressions.BindReferences.bindReference
import org.apache.spark.sql.vectorized.{ColumnarBatch, ColumnVector}
import scala.collection.mutable.ListBuffer
import org.apache.arrow.vector.ipc.message.ArrowFieldNode
import org.apache.arrow.vector.ipc.message.ArrowRecordBatch
import org.apache.arrow.vector.types.pojo.ArrowType
import org.apache.arrow.vector.types.pojo.Field
import org.apache.arrow.vector.types.pojo.Schema
import org.apache.arrow.gandiva.expression._
import org.apache.arrow.gandiva.evaluator._
import io.netty.buffer.ArrowBuf
import io.netty.buffer.ByteBuf
import com.google.common.collect.Lists;
import com.intel.oap.expression._
import com.intel.oap.vectorized.ExpressionEvaluator
import org.apache.spark.sql.execution.joins.BroadcastHashJoinExec
import org.apache.spark.sql.execution.joins.{BuildLeft, BuildRight, BuildSide}
/**
* Performs a hash join of two child relations by first shuffling the data using the join keys.
*/
case class DataToArrowColumnarExec(child: SparkPlan, numPartitions: Int) extends UnaryExecNode {
override def output: Seq[Attribute] = child.output
override def outputPartitioning: Partitioning = UnknownPartitioning(numPartitions)
override def outputOrdering: Seq[SortOrder] = child.outputOrdering
override def doExecute(): RDD[InternalRow] = {
val numOutputRows = longMetric("numOutputRows")
val numOutputBatches = longMetric("numOutputBatches")
val inputRdd = BroadcastColumnarRDD(
sparkContext,
metrics,
numPartitions,
child.executeBroadcast[ColumnarHashedRelation]())
inputRdd.mapPartitions { batches =>
val toUnsafe = UnsafeProjection.create(output, output)
batches.flatMap { batch =>
numOutputBatches += 1
numOutputRows += batch.numRows()
batch.rowIterator().asScala.map(toUnsafe)
}
}
}
override def doExecuteBroadcast[T](): broadcast.Broadcast[T] = {
child.executeBroadcast()
}
override def supportsColumnar: Boolean = true
override lazy val metrics: Map[String, SQLMetric] = Map(
"numOutputRows" -> SQLMetrics.createMetric(sparkContext, "number of output rows"),
"numOutputBatches" -> SQLMetrics.createMetric(sparkContext, "output_batches"),
"processTime" -> SQLMetrics.createTimingMetric(sparkContext, "totaltime_datatoarrowcolumnar"))
override def doExecuteColumnar(): RDD[ColumnarBatch] = {
val numOutputRows = longMetric("numOutputRows")
val numOutputBatches = longMetric("numOutputBatches")
BroadcastColumnarRDD(
sparkContext,
metrics,
numPartitions,
child.executeBroadcast[ColumnarHashedRelation]())
}
override def canEqual(other: Any): Boolean = other.isInstanceOf[DataToArrowColumnarExec]
override def equals(other: Any): Boolean = other match {
case that: DataToArrowColumnarExec =>
(that canEqual this) && super.equals(that)
case _ => false
}
}
| Intel-bigdata/OAP | oap-native-sql/core/src/main/scala/com/intel/oap/execution/DataToArrowColumnarExec.scala | Scala | apache-2.0 | 4,630 |
/*
* Copyright (c) 2014-2020 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.reactive.internal.operators
import cats.effect.ExitCase
import monix.execution.Callback
import monix.eval.Task
import monix.execution.Ack.{Continue, Stop}
import monix.execution.atomic.{Atomic, AtomicBoolean}
import monix.execution.internal.Platform
import monix.execution.schedulers.TrampolineExecutionContext.immediate
import monix.execution.schedulers.TrampolinedRunnable
import monix.execution.{Ack, Cancelable, FutureUtils, Scheduler}
import monix.reactive.Observable
import monix.reactive.observers.Subscriber
import scala.concurrent.Future
import scala.util.control.NonFatal
import scala.util.{Failure, Success, Try}
private[reactive] class GuaranteeCaseObservable[A](source: Observable[A], f: ExitCase[Throwable] => Task[Unit])
extends Observable[A] {
def unsafeSubscribeFn(out: Subscriber[A]): Cancelable = {
implicit val s = out.scheduler
val isActive = Atomic(true)
try {
val out2 = new GuaranteeSubscriber(out, isActive)
val c = source.unsafeSubscribeFn(out2)
Cancelable.collection(c, out2)
} catch {
case NonFatal(e) =>
fireAndForget(isActive, ExitCase.Error(e))
s.reportFailure(e)
Cancelable.empty
}
}
private def fireAndForget(isActive: AtomicBoolean, ec: ExitCase[Throwable])(implicit s: Scheduler): Unit = {
if (isActive.getAndSet(false))
s.execute(new TrampolinedRunnable {
def run(): Unit =
try {
f(ec).runAsyncAndForget
} catch {
case NonFatal(e) =>
s.reportFailure(e)
}
})
}
private final class GuaranteeSubscriber(out: Subscriber[A], isActive: AtomicBoolean)
extends Subscriber[A] with Cancelable {
implicit val scheduler: Scheduler = out.scheduler
private[this] var ack: Future[Ack] = Continue
def onNext(elem: A): Future[Ack] = {
var catchErrors = true
try {
val fa = out.onNext(elem)
ack = fa
catchErrors = false
detectStopOrFailure(fa)
} catch {
case NonFatal(e) if catchErrors =>
detectStopOrFailure(Future.failed(e))
}
}
def onError(ex: Throwable): Unit =
signalComplete(ex)
def onComplete(): Unit =
signalComplete(null)
def cancel(): Unit =
fireAndForget(isActive, ExitCase.Canceled)
private def detectStopOrFailure(ack: Future[Ack]): Future[Ack] =
ack match {
case Continue => Continue
case Stop =>
stopAsFuture(ExitCase.Canceled)
case async =>
FutureUtils.transformWith(async, asyncTransformRef)(immediate)
}
private[this] val asyncTransformRef: Try[Ack] => Future[Ack] = {
case Success(value) =>
detectStopOrFailure(value)
case Failure(e) =>
stopAsFuture(ExitCase.Error(e))
}
private def stopAsFuture(e: ExitCase[Throwable]): Future[Ack] = {
// Thread-safety guard
if (isActive.getAndSet(false)) {
Task
.suspend(f(e))
.redeem(e => { scheduler.reportFailure(e); Stop }, _ => Stop)
.runToFuture
} else {
Stop
}
}
private def signalComplete(e: Throwable): Unit = {
def composeError(e: Throwable, e2: Throwable) = {
if (e != null) Platform.composeErrors(e, e2)
else e2
}
// We have to back-pressure the final acknowledgement, otherwise
// the implementation is broken
val task = Task
.fromFuture(ack)
.redeemWith(
e2 => {
if (isActive.getAndSet(false)) {
val error = composeError(e, e2)
f(ExitCase.Error(error)).map(_ => Stop)
} else {
scheduler.reportFailure(e2)
Task.now(Stop)
}
},
ack => {
if (isActive.getAndSet(false)) {
val code = if (e != null) ExitCase.Error(e) else ExitCase.Completed
f(code).map(_ => ack)
} else {
Task.now(Stop)
}
}
)
task.runAsyncUncancelable(new Callback[Throwable, Ack] {
def onSuccess(value: Ack): Unit = {
if (value == Continue) {
if (e != null) out.onError(e)
else out.onComplete()
}
}
def onError(e2: Throwable): Unit =
out.onError(composeError(e, e2))
})
}
}
}
| alexandru/monifu | monix-reactive/shared/src/main/scala/monix/reactive/internal/operators/GuaranteeCaseObservable.scala | Scala | apache-2.0 | 5,079 |
package io.iohk.ethereum.vm
import io.iohk.ethereum.vm.utils.EvmTestEnv
import org.scalatest.freespec.AnyFreeSpec
import org.scalatest.matchers.should.Matchers
// scalastyle:off magic.number
class ThrowSpec extends AnyFreeSpec with Matchers {
"EVM running Throw contract" - {
"should handle throwing" in new EvmTestEnv {
val (_, contract) = deployContract("Throw")
val result = contract.justThrow().call()
result.error shouldBe Some(InvalidOpCode(0xfe.toByte))
}
}
}
| input-output-hk/etc-client | src/evmTest/scala/io/iohk/ethereum/vm/ThrowSpec.scala | Scala | mit | 504 |
package ua.ds.persistent.iteration5
sealed trait PersistentList[+E] {
import PersistentList._
def takeWhile()(predicate: E => Boolean): PersistentList[E] = this match {
case Cons(head, tail) if predicate(head) => head :: tail.takeWhile()(predicate)
case _ => Empty
}
def take(n: Int): PersistentList[E] = this match {
case Empty => Empty
case Cons(_, _) if n == 0 => Empty
case Cons(head, tail) => head :: tail.take(n - 1)
}
def zip[A, B](that: PersistentList[A])(zipper: (E, A) => B): PersistentList[B] = (this, that) match {
case (Cons(head1, tail1), Cons(head2, tail2)) => zipper(head1, head2) :: tail1.zip(tail2)(zipper)
case _ => Empty
}
def dropWhile()(predicate: E => Boolean): PersistentList[E] = this match {
case Cons(head, tail) if predicate(head) => tail.dropWhile()(predicate)
case _ => this
}
def drop(n: Int): PersistentList[E] = this match {
case Cons(head, tail) if n > 0 => tail.drop(n - 1)
case _ => this
}
def :+[A >: E](elem: A): PersistentList[A] = this match {
case Cons(head, tail) => head :: (tail :+ elem)
case Empty => Cons(elem, this)
}
def ++[A >: E](that: PersistentList[A]): PersistentList[A] = this match {
case Cons(head, tail) => Cons(head, tail ++ that)
case Empty => that
}
def tail: Option[PersistentList[E]] = this match {
case Cons(_, next) => Some(next)
case Empty => None
}
def head: Option[E] = this match {
case Cons(elem, _) => Some(elem)
case Empty => None
}
def ::[A >: E](e: A): PersistentList[A] = Cons(e, this)
override def toString: String = {
def loop(list: PersistentList[E], builder: StringBuilder): String = {
list match {
case Cons(head, tail) =>
if (builder.nonEmpty) {
builder.append(", ")
}
loop(tail, builder.append(head))
case Empty => builder.toString()
}
}
"[" + loop(this, new StringBuilder) + "]"
}
}
object PersistentList {
def apply[T](): PersistentList[T] = Empty
private case object Empty extends PersistentList[Nothing]
private case class Cons[+E](elem: E, next: PersistentList[E]) extends PersistentList[E]
}
| Alex-Diez/persistent-data-sturctures | data-structures-practices/persistent-list/src/main/scala/ua/ds/persistent/iteration5/PersistentList.scala | Scala | mit | 2,204 |
/*
* Copyright 2014 Twitter, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License. You may obtain
* a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.twitter.scrooge.linter
import com.twitter.scrooge.ast._
import java.io.{ObjectInputStream, ByteArrayInputStream, ObjectOutputStream, ByteArrayOutputStream}
import java.nio.ByteBuffer
import org.apache.thrift.protocol._
import org.apache.thrift.transport.TMemoryBuffer
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
import org.scalatest.WordSpec
import org.scalatest.junit.JUnitRunner
import org.scalatest.matchers.MustMatchers
@RunWith(classOf[JUnitRunner])
class LinterSpec extends WordSpec with MustMatchers {
def mustPass(errors: Iterable[LintMessage]) =
errors.size must equal(0)
"Linter" should {
"pass Namespaces" in {
mustPass(
LintRule.Namespaces(Document(Seq(
Namespace("java", Identifier("com.twitter.oatmeal")),
Namespace("scala", Identifier("com.twitter.oatmeal"))
), Nil))
)
}
"fail Namespaces" in {
val errors = LintRule.Namespaces(Document(Seq(Namespace("java", SimpleID("asdf"))), Nil)).toSeq
errors.length must be(1)
assert(errors(0).msg contains("Missing namespace"))
}
"pass RelativeIncludes" in {
mustPass(
LintRule.RelativeIncludes(
Document(
Seq(
Namespace("java", SimpleID("asdf")),
Include("com.twitter.oatmeal", Document(Seq(), Seq()))),
Nil))
)
}
"fail RelativeIncludes" in {
val errors = LintRule.RelativeIncludes(Document(
Seq(
Namespace("java", SimpleID("asdf")),
Include("./dir1/../dir1/include1.thrift", Document(Seq(), Seq()))),
Nil)).toSeq
errors.size must be(1)
assert(errors(0).msg contains("Relative include path found"))
}
"pass CamelCase" in {
mustPass(
LintRule.CamelCase(Document(
Seq(),
Seq(Struct(
SimpleID("SomeType"),
"SomeType",
Seq(Field(1,
SimpleID("camelCaseFieldName"),
"camelCaseFieldName",
TString)),
None))))
)
}
"fail CamelCase" in {
val errors = LintRule.CamelCase(Document(
Seq(),
Seq(Struct(
SimpleID("SomeType"),
"SomeType",
Seq(Field(1,
SimpleID("non_camel_case"),
"non_camel_case",
TString)),
None)))).toSeq
errors.length must be(1)
assert(errors(0).msg contains("lowerCamelCase"))
}
def struct(name: String, fields: Map[String, FieldType], persisted: Boolean = false) =
Struct(
SimpleID(name),
name,
fields.zipWithIndex.map {
case ((fieldName, fieldType), i) => Field(i, SimpleID(fieldName), fieldName, fieldType)
}.toSeq,
None,
if (persisted) Map("persisted" -> "true") else Map.empty
)
"fail TransitivePersistence" in {
val errors = LintRule.TransitivePersistence(
Document(
Seq(),
Seq(
struct(
"SomeType",
Map(
"foo" -> TString,
"bar" -> StructType(struct("SomeOtherType", Map.empty))
),
true
)
)
)).toSeq
errors.length must be(1)
val error = errors(0).msg
assert(error.contains("persisted"))
assert(error.contains("SomeType"))
assert(error.contains("SomeOtherType"))
}
"pass TransitivePersistence" in {
mustPass(LintRule.TransitivePersistence(
Document(
Seq(),
Seq(
struct(
"SomeType",
Map(
"foo" -> TString,
"bar" -> StructType(struct("SomeOtherType", Map.empty, true))
),
true
)
)
)))
}
"fail DocumentedPersisted" in {
val errors = LintRule.DocumentedPersisted(
Document(
Seq(),
Seq(
struct(
"SomeType",
Map(
"foo" -> TString
),
true
)
)
)).toSeq
errors.length must be(2)
val structError = errors(0).msg
assert(structError.contains("SomeType"))
val fieldError = errors(1).msg
assert(fieldError.contains("foo"))
assert(fieldError.contains("SomeType"))
}
"pass DocumentedPersisted" in {
mustPass(LintRule.DocumentedPersisted(
Document(
Seq(),
Seq(
Struct(
SimpleID("SomeType"),
"SomeType",
Seq(Field(1,
SimpleID("foo"),
"foo",
TString,
docstring = Some("blah blah"))),
docstring = Some("documented struct is documented"),
Map("persisted" -> "true"))
)
)))
}
"pass RequiredFieldDefault" in {
mustPass(
LintRule.RequiredFieldDefault(Document(
Seq(),
Seq(Struct(
SimpleID("SomeType"),
"SomeType",
Seq(
Field(
1,
SimpleID("f1"),
"f1",
TString,
default = Some(StringLiteral("v1")),
requiredness = Requiredness.Optional),
Field(
2,
SimpleID("f2"),
"f2",
TString,
default = None,
requiredness = Requiredness.Required)),
None))))
)
}
"fail RequiredFieldDefault" in {
val errors = LintRule.RequiredFieldDefault(Document(
Seq(),
Seq(Struct(
SimpleID("SomeType"),
"SomeType",
Seq(
Field(
1,
SimpleID("f1"),
"f1",
TString,
default = Some(StringLiteral("v1")),
requiredness = Requiredness.Required)),
None)))).toSeq
errors.length must be(1)
assert(errors(0).msg contains("Required field"))
}
"pass Keywords" in {
mustPass(
LintRule.Keywords(Document(
Seq(),
Seq(Struct(
SimpleID("SomeType"),
"SomeType",
Seq(
Field(
1,
SimpleID("klass"),
"klass",
TString,
default = Some(StringLiteral("v1")),
requiredness = Requiredness.Optional),
Field(
2,
SimpleID("notAKeyWord"),
"notAKeyWord",
TString,
default = None,
requiredness = Requiredness.Required)),
None))))
)
}
"fail Keywords" in {
val errors = LintRule.Keywords(Document(
Seq(),
Seq(Struct(
SimpleID("SomeType"),
"SomeType",
Seq(
Field(
1,
SimpleID("val"),
"val",
TString,
default = Some(StringLiteral("v1")),
requiredness = Requiredness.Optional)
),
None)))).toSeq
errors.length must be(1)
assert(errors(0).msg contains("Avoid using keywords"))
}
}
}
| travisbrown/scrooge | scrooge-linter/src/test/scala/com/twitter/scrooge/linter/LinterSpec.scala | Scala | apache-2.0 | 7,948 |
package xmlrpc.protocol
import java.util.Date
import xmlrpc.protocol.Deserializer.{DeserializationError, Deserialized}
import scala.language.postfixOps
import java.time.{LocalDateTime, ZoneId, ZoneOffset}
import scala.xml.{Node, NodeSeq}
import scalaz.Scalaz._
trait BasicTypes extends Protocol {
import Deserializer.StringToError
/**
* In all the deserialize methods, we query \\ "value" instead of \\ "param" \ "value"
* because the array and struct contains elements only inside a value tag
*/
implicit object Base64Xmlrpc extends Datatype[Array[Byte]] {
override def serialize(value: Array[Byte]): Node = <base64>{value.map(_.toChar).mkString}</base64>.inValue
override def deserialize(from: NodeSeq): Deserialized[Array[Byte]] =
from \\ "value" headOption match {
case Some(<value><base64>{content}</base64></value>) => content.text.getBytes.success
case _ => s"Expected base64 structure in $from".toError.failures
}
}
implicit object DatetimeXmlrpc extends Datatype[Date] {
override def serialize(value: Date): Node = {
val localDate = value.toInstant.atZone(serverTimezone).toLocalDateTime
<dateTime.iso8601>{ISO8601Format.format(localDate)}</dateTime.iso8601>.inValue
}
override def deserialize(from: NodeSeq): Deserialized[Date] =
from \\ "value" headOption match {
case Some(<value><dateTime.iso8601>{date}</dateTime.iso8601></value>) =>
try {
val localDateTime = LocalDateTime.parse(date.text, ISO8601Format)
val d = Date.from(localDateTime.atZone(serverTimezone).toInstant)
d.success
} catch {
case e: Exception => DeserializationError(s"The date ${from.text} has not been parsed correctly", Some(e)).failures
}
case _ => s"Expected datetime structure in $from".toError.failures
}
}
implicit object DoubleXmlrpc extends Datatype[Double] {
override def serialize(value: Double): Node = <double>{value}</double>.inValue
override def deserialize(from: NodeSeq): Deserialized[Double] =
from \\ "value" headOption match {
case Some(<value><double>{double}</double></value>) =>
makeNumericConversion(_.toDouble, double.text)
case _ => "Expected double structure in $from".toError.failures
}
}
implicit object IntegerXmlrpc extends Datatype[Int] {
override def serialize(value: Int): Node = <int>{value}</int>.inValue
override def deserialize(from: NodeSeq): Deserialized[Int] =
from \\ "value" headOption match {
case Some(<value><int>{integer}</int></value>) =>
makeNumericConversion(_.toInt, integer.text)
case Some(<value><i4>{integer}</i4></value>) =>
makeNumericConversion(_.toInt, integer.text)
case _ => s"Expected int structure in $from".toError.failures
}
}
implicit object LogicalValueXmlrpc extends Datatype[Boolean] {
override def serialize(value: Boolean): Node = <boolean>{if(value) 1 else 0}</boolean>.inValue
override def deserialize(from: NodeSeq): Deserialized[Boolean] =
from \\ "value" headOption match {
case Some(<value><boolean>{logicalValue}</boolean></value>) =>
logicalValue.text match {
case "1" => true.success
case "0" => false.success
case _ => "No logical value in boolean structure".toError.failures
}
case _ => s"Expected boolean structure in $from".toError.failures
}
}
implicit object StringXmlrpc extends Datatype[String] {
override def serialize(value: String): Node = {
def encodeSpecialCharacters(content: String) =
content.replace("&", "&").replace("<", "<")
<string>{encodeSpecialCharacters(value)}</string>.inValue
}
override def deserialize(from: NodeSeq): Deserialized[String] = {
def decodeSpecialCharacters(content: String) =
content.replace("&", "&").replace("<", "<")
from \\ "value" headOption match {
case Some(<value><string>{content}</string></value>) => decodeSpecialCharacters(content.text).success
case Some(<value>{content}</value>) => decodeSpecialCharacters(content.text).success
case _ => s"Expected string structure in $from".toError.failures
}
}
}
object Void
type Empty = Void.type
implicit object VoidXmlrpc extends Datatype[Empty] {
override def serialize(value: Empty): NodeSeq = NodeSeq.Empty
// If there is no param tag, then it is a void
override def deserialize(from: NodeSeq): Deserialized[Empty] =
from \\ "param" headOption match {
case Some(_) => s"Expected void (without any param tag) in $from".toError.failures
case _ => Void.success
}
}
type Null = scala.xml.Null.type
implicit object NilXmlrpc extends Datatype[Null] {
override def serialize(value: Null): Node = <nil/>.inValue
override def deserialize(from: NodeSeq): Deserialized[Null] =
from \\ "value" headOption match {
case Some(<value><nil/></value>) => scala.xml.Null.success
case _ => s"Expected nil structure in $from".toError.failures
}
}
}
| jvican/xmlrpc | src/main/scala/xmlrpc/protocol/BasicTypes.scala | Scala | mit | 5,207 |
package code.snippet
import net.liftweb._
import http._
import SHtml._
import util._
import Helpers._
import common.{Box, Full, Empty, Failure}
import mapper._
import js._
import JsCmds._
import JE._
import scala.xml._
import code.comet.ActivityViewer
import code.model._
import code.snippet._
import java.util.Date
import java.text.SimpleDateFormat
import java.util.Calendar
import java.util.GregorianCalendar
class ProfileActivityViewer extends ActivityViewer {
/**
* Renders the activity list
*/
def render = {
val us = UserHelpers.getUser(UserHelpers.userId)
us match {
case Full(u) => {
val activities = u.getActivities()
if (activities.length > 0) {
".activity *" #> activities.map(post => renderActivity(u, post))
} else {
"*" #> ClearNodes
}
}
case _ => {
"*" #> ClearNodes
}
}
}
}
| Cerovec/LiftSocial | src/main/scala/code/snippet/ProfileActivityViewer.scala | Scala | apache-2.0 | 854 |
package scales.el
import scala.util.Success
import com.greencatsoft.greenlight.TestSuite
object IdentifierTest extends TestSuite {
"Identifier.evaluate(context)" should "return an instance matching the identifier from the given context" in {
val context = TestExpressionContext(
"Kenny" -> "You've got to know when to hold 'em.", "Rogers" -> "Know when to fold 'em.")
Identifier("Kenny").evaluate(context) should be (Success("You've got to know when to hold 'em."))
Identifier("Rogers").evaluate(context) should be (Success("Know when to fold 'em."))
}
It should "return EvaluationFailureException when there's no object matching the specified identifier" in {
val context = TestExpressionContext("Ginger" -> "You like tomato and I like tomahto.")
val result = Identifier("Rogers").evaluate(context)
result.isFailure should be (true)
result.failed foreach {
_.getClass should be (classOf[EvaluationFailureException])
}
}
} | nightscape/scales | test/src/test/scala/scales/el/IdentifierTest.scala | Scala | apache-2.0 | 982 |
/*
* Copyright 2013-2015 Websudos, Limited.
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* - Explicit consent must be obtained from the copyright owner, Websudos Limited before any redistribution is made.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
package com.websudos.phantom.column
import com.websudos.phantom.builder.query.CQLQuery
import scala.reflect.runtime.{currentMirror => cm}
sealed trait CassandraWrites[T] {
/**
* Provides the serialisation mechanism of a value to a CQL string.
* The vast majority of serializers are fed in via the Primitives mechanism.
*
* Primitive columns will automatically override and define "asCql" based on the
* serialization of specific primitives. When T is context bounded by a primitive:
*
* {{{
* def asCql(v: T): String = implicitly[Primitive[T]].asCql(value)
* }}}
*
* @param v The value of the object to convert to a string.
* @return A string that can be directly appended to a CQL query.
*/
def asCql(v: T): String
def cassandraType: String
}
private[phantom] trait AbstractColumn[@specialized(Int, Double, Float, Long, Boolean, Short) T] extends CassandraWrites[T] {
type Value = T
private[phantom] val isPrimary = false
private[phantom] val isSecondaryKey = false
private[phantom] val isPartitionKey = false
private[phantom] val isCounterColumn = false
private[phantom] val isStaticColumn = false
private[phantom] val isClusteringKey = false
private[phantom] val isAscending = false
private[phantom] val isMapKeyIndex = false
private[this] lazy val _name: String = {
cm.reflect(this).symbol.name.toTypeName.decodedName.toString
}
def name: String = _name
def qb: CQLQuery = {
CQLQuery(name).forcePad.append(cassandraType)
}
}
| analytically/phantom | phantom-dsl/src/main/scala/com/websudos/phantom/column/AbstractColumn.scala | Scala | bsd-2-clause | 3,020 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ly.stealth.mesos.kafka
import ly.stealth.mesos.kafka.executor.{Executor, LaunchConfig}
import ly.stealth.mesos.kafka.json.JsonUtil
import org.junit.Test
import org.junit.Assert._
import org.apache.mesos.Protos.{Status, TaskState}
import scala.collection.JavaConversions._
class ExecutorTest extends KafkaMesosTestCase {
@Test(timeout = 5000)
def startBroker_success {
val data = JsonUtil.toJson(LaunchConfig(0))
Executor.startBroker(executorDriver, task("id", "task", "slave", data))
executorDriver.waitForStatusUpdates(2)
assertEquals(2, executorDriver.statusUpdates.size())
assertEquals(
Seq(TaskState.TASK_STARTING, TaskState.TASK_RUNNING),
executorDriver.statusUpdates.map(_.getState))
assertTrue(Executor.server.isStarted)
Executor.server.stop()
executorDriver.waitForStatusUpdates(3)
assertEquals(3, executorDriver.statusUpdates.size())
val status = executorDriver.statusUpdates.get(2)
assertEquals(TaskState.TASK_FINISHED, status.getState)
assertFalse(Executor.server.isStarted)
}
@Test(timeout = 5000)
def startBroker_failure {
Executor.server.asInstanceOf[TestBrokerServer].failOnStart = true
Executor.startBroker(executorDriver, task())
executorDriver.waitForStatusUpdates(1)
assertEquals(1, executorDriver.statusUpdates.size())
val status = executorDriver.statusUpdates.get(0)
assertEquals(TaskState.TASK_FAILED, status.getState)
assertFalse(Executor.server.isStarted)
}
@Test
def stopExecutor {
Executor.server.start(null, null)
assertTrue(Executor.server.isStarted)
assertEquals(Status.DRIVER_RUNNING, executorDriver.status)
Executor.stopExecutor(executorDriver)
assertFalse(Executor.server.isStarted)
assertEquals(Status.DRIVER_STOPPED, executorDriver.status)
Executor.stopExecutor(executorDriver) // no error
assertEquals(Status.DRIVER_STOPPED, executorDriver.status)
}
@Test(timeout = 5000)
def launchTask {
val data = JsonUtil.toJson(LaunchConfig(0))
Executor.launchTask(executorDriver, task("id", "task", "slave", data))
executorDriver.waitForStatusUpdates(1)
assertTrue(Executor.server.isStarted)
}
@Test(timeout = 5000)
def killTask {
Executor.server.start(null, null)
Executor.killTask(executorDriver, taskId())
Executor.server.waitFor()
assertFalse(Executor.server.isStarted)
}
@Test
def shutdown {
Executor.server.start(null, null)
Executor.shutdown(executorDriver)
assertFalse(Executor.server.isStarted)
}
}
| tc-dc/kafka-mesos | src/test/ly/stealth/mesos/kafka/ExecutorTest.scala | Scala | apache-2.0 | 3,359 |
package deburnat.transade.gui.center
import swing.Component
import javax.swing.tree.{DefaultTreeCellRenderer, DefaultMutableTreeNode}
import javax.swing.event.{TreeSelectionEvent, TreeSelectionListener}
import javax.swing.JTree
import xml.Node
import collection.mutable.Map
import deburnat.transade.gui.admins.GuiAdmin._
/**
* Project name: transade
* @author Patrick Meppe (tapmeppe@gmail.com)
* Description:
* An algorithm for the transfer of selected/adapted data
* from one repository to another.
*
* Date: 1/1/14
* Time: 12:00 AM
*
* This case class is used to create the tree presentable on the GUI from the given [transfer] node.
* @param transNode The given [transfer] node.
*/
protected[center] case class TransTree(transNode: Node) extends Component{
/**
* This method is used to restructure the node strings.
* @param node The given node.
* @param tbn the tab sequence to remove.
* @return The restructured node string.
*/
private def replace(node: Node, tbn: String) = node.mkString.replaceAll(tbn+"<(/?)", "<$1")
/**
* This class is an (modified) extended JTree object enabling
* the presentation and identification of xml nodes.
* @note Since the peer's scope is public, this class has to be public as well.
* @param node_popupMap The tree node and the identifier repository.
*/
class _TransTree(node_popupMap: (DefaultMutableTreeNode, Map[String, String])) extends JTree(node_popupMap._1)
with TreeSelectionListener{
addTreeSelectionListener(this) //listenTo
val popupMap = node_popupMap._2
def valueChanged(e: TreeSelectionEvent) { //reactions
TreeNodePopupMenu(popupMap(e.getPath.toString)).show(TransTree.this)
}
//the setCellRenderer method helps setting the icons
//in this case the default icons (folder- & file cons) are simply being removed
setCellRenderer(new DefaultTreeCellRenderer{
setOpenIcon(null)
setClosedIcon(null)
setLeafIcon(null)
})
}
override lazy val peer = new _TransTree({
//the repository of the xml nodes to be presented once the adequate tree node click
val popupMap = Map[String, String]()
//the root tree node
val labTransfer = transfer + " [%s=%s]".format(id, (transNode \\_id).text)
val transferTreeNode = new DefaultMutableTreeNode(labTransfer)
popupMap("[%s]".format(labTransfer)) = replace(transNode, tb1)
(transNode \\\\ source).foreach{ sourceNode =>
//the source tree node
val labSource = source + " [%s=%s & %s=%s]".format(id, (sourceNode \\_id).text, format, (sourceNode \\_format).text)
val sourceTreeNode = new DefaultMutableTreeNode(labSource)
popupMap += "[%s, %s]".format(labTransfer, labSource) -> replace(sourceNode, tb2)
//the source.definitions tree node
try{
val sourceDefsNode = (sourceNode \\\\ defs)(0) //this will throw a NullPointException if no definitions node is found
val defNodes = sourceDefsNode \\_def
if(defNodes.length > 0){
val labSourceDefs = defs + " [%s]".format(defNodes.map(defNode =>
(defNode \\_key).text +"="+ (defNode \\_val).text
).mkString(" & "))
sourceTreeNode.add(new DefaultMutableTreeNode(labSourceDefs, false)) //add the def to the source tree node
popupMap += "[%s, %s, %s]".format(labTransfer, labSource, labSourceDefs) -> replace(sourceDefsNode, tb3)
}
}catch{case e: NullPointerException => }
//the target tree nodes
(sourceNode \\\\ target).foreach{ targetNode =>
val labTarget = target + " [%s=%s & %s=%s]".format(
id, (targetNode \\_id).text, format, (targetNode \\_format).text
)
val targetTreeNode = new DefaultMutableTreeNode(labTarget)
popupMap("[%s, %s, %s]".format(labTransfer, labSource, labTarget)) = replace(targetNode, tb3)
//the target.definitions tree node
try{
val targetDefsNode = (targetNode \\\\ defs)(0) //this will throw a NullPointException if no definitions node is found
val defNodes = targetDefsNode \\_def
if(defNodes.length > 0){
val labTargetDefs = defs + " [%s]".format((targetDefsNode \\_def).map(defNode =>
(defNode \\_key).text +"="+ (defNode \\_val).text
).mkString(" & "))
targetTreeNode.add(new DefaultMutableTreeNode(labTargetDefs, false)) //add the def to the target tree ode
popupMap("[%s, %s, %s, %s]".format(labTransfer, labSource, labTarget, labTargetDefs)) =
replace(targetDefsNode, tb4)
}
}catch{case e: NullPointerException => }
//the parse tree nodes
(targetNode \\\\ parse).foreach{parseNode =>
val row = new StringBuilder((parseNode \\_sName).text) //source name as an attribute
if(row.mkString.isEmpty) row.append((parseNode \\ sName).text) //source name as a child node
val parseLabel = parse + " [%s=%s & %s=%s]".format(tName, (parseNode \\_tName).text, sName, "%s")
val labParse = if(row.mkString.nonEmpty) parseLabel.format(row) else parseLabel.format(view.read("treenodeclick"))
targetTreeNode.add(new DefaultMutableTreeNode(labParse, false)) //add the parse to the target tree node
popupMap("[%s, %s, %s, %s]".format(labTransfer, labSource, labTarget, labParse)) =
replace(parseNode, tb4)
}
sourceTreeNode.add(targetTreeNode) //add the target to the source tree node
}
transferTreeNode.add(sourceTreeNode) //add the source to the root tree node
}
(transferTreeNode, popupMap) //return
})
}
| deburnatshazem/transade | gui/src/main/scala/deburnat/transade/gui/center/TransTree.scala | Scala | apache-2.0 | 5,629 |
package adapter.bitflyer
import domain.ProductCode
object Path {
val BASE = "https://api.bitflyer.jp"
val EXECUTIONS = "/v1/executions"
val COLLATERAL = "/v1/me/getcollateral"
val POSITIONS = s"/v1/me/getpositions?product_code=${ProductCode.btcFx}"
val CHILD_ORDER = "/v1/me/sendchildorder"
val ME_EXECUTIONS = "/v1/me/getexecutions"
}
| rysh/scalatrader | scalatrader/app/adapter/bitflyer/Path.scala | Scala | mit | 350 |
package io.circe.jackson
import com.fasterxml.jackson.core.JsonParser
import com.fasterxml.jackson.databind.{ DeserializationContext, JsonNode, ObjectMapper, ObjectWriter }
import com.fasterxml.jackson.databind.node.ObjectNode
private[jackson] trait JacksonCompat {
protected def makeWriter(mapper: ObjectMapper): ObjectWriter = mapper.writerWithDefaultPrettyPrinter()
protected def handleUnexpectedToken(context: DeserializationContext)(
klass: Class[_],
parser: JsonParser
): Unit =
context.handleUnexpectedToken(klass, parser)
protected def objectNodeSetAll(node: ObjectNode, fields: java.util.Map[String, JsonNode]): JsonNode =
node.setAll[JsonNode](fields)
}
| circe/circe-jackson | 210/src/main/scala/io/circe/jackson/JacksonCompat.scala | Scala | apache-2.0 | 692 |
package mesosphere.marathon
package raml
import mesosphere.UnitTest
import mesosphere.marathon.api.serialization.ContainerSerializer
import org.apache.mesos.{ Protos => Mesos }
class ContainerConversionTest extends UnitTest {
def convertToProtobufThenToRAML(container: => state.Container, raml: => Container): Unit = {
"convert to protobuf, then to RAML" in {
val proto = ContainerSerializer.toProto(container)
val proto2Raml = proto.toRaml
proto2Raml should be(raml)
}
}
"A Mesos Plain container is converted" when {
"a mesos container" should {
val container = state.Container.Mesos(
volumes = Seq(coreHostVolume),
portMappings = Seq(corePortMapping))
val raml = container.toRaml[Container]
behave like convertToProtobufThenToRAML(container, raml)
"convert to a RAML container" in {
raml.`type` should be(EngineType.Mesos)
raml.appc should be(empty)
raml.docker should be(empty)
raml.volumes should be(Seq(ramlHostVolume))
raml.portMappings should contain(Seq(ramlPortMapping))
}
}
"a RAML container" should {
"convert to a mesos container" in {
val container = Container(EngineType.Mesos, portMappings = Option(Seq(ramlPortMapping)), volumes = Seq(ramlHostVolume))
val mc = Some(container.fromRaml).collect {
case c: state.Container.Mesos => c
}.getOrElse(fail("expected Container.Mesos"))
mc.portMappings should be(Seq(corePortMapping))
mc.volumes should be(Seq(coreHostVolume))
}
}
}
"A Mesos Docker container is converted" when {
"a mesos-docker container" should {
val container = state.Container.MesosDocker(Seq(coreHostVolume), "test", Seq(corePortMapping), Some(credentials))
val raml = container.toRaml[Container]
behave like convertToProtobufThenToRAML(container, raml)
"convert to a RAML container" in {
raml.`type` should be(EngineType.Mesos)
raml.appc should be(empty)
raml.volumes should be(Seq(ramlHostVolume))
raml.portMappings should contain(Seq(ramlPortMapping))
raml.docker should be(defined)
raml.docker.get.image should be("test")
raml.docker.get.credential should be(defined)
raml.docker.get.credential.get.principal should be(credentials.principal)
raml.docker.get.credential.get.secret should be(credentials.secret)
}
}
"a mesos-docker container w/o port mappings" should {
val container = state.Container.MesosDocker(Seq(coreHostVolume), "test", portMappings = Seq.empty, Some(credentials))
val raml = container.toRaml[Container]
behave like convertToProtobufThenToRAML(container, raml)
}
"a RAML container" should {
"convert to a mesos-docker container" in {
val container = Container(EngineType.Mesos, portMappings = Option(Seq(ramlPortMapping)), docker = Some(DockerContainer(
image = "foo", credential = Some(DockerCredentials(credentials.principal, credentials.secret)))),
volumes = Seq(ramlHostVolume))
val mc = Some(container.fromRaml).collect {
case c: state.Container.MesosDocker => c
}.getOrElse(fail("expected Container.MesosDocker"))
mc.portMappings should be(Seq(corePortMapping))
mc.volumes should be(Seq(coreHostVolume))
mc.image should be("foo")
mc.credential should be(Some(credentials))
mc.forcePullImage should be(container.docker.head.forcePullImage)
}
}
}
"A Mesos AppC container is created correctly" when {
"a mesos-appc container" should {
val container = state.Container.MesosAppC(Seq(coreHostVolume), "test", Seq(corePortMapping), Some("id"))
val raml = container.toRaml[Container]
behave like convertToProtobufThenToRAML(container, raml)
"convert to a RAML container" in {
raml.`type` should be(EngineType.Mesos)
raml.volumes should be(Seq(ramlHostVolume))
raml.portMappings should contain(Seq(ramlPortMapping))
raml.docker should be(empty)
raml.appc should be(defined)
raml.appc.get.image should be("test")
raml.appc.get.id should be(Some("id"))
}
}
"a RAML container" should {
"convert to a mesos-appc container" in {
val container = Container(
EngineType.Mesos, portMappings = Option(Seq(ramlPortMapping)), appc = Some(AppCContainer(image = "foo")),
volumes = Seq(ramlHostVolume))
val mc = Some(container.fromRaml).collect {
case c: state.Container.MesosAppC => c
}.getOrElse(fail("expected Container.MesosAppC"))
mc.portMappings should be(Seq(corePortMapping))
mc.volumes should be(Seq(coreHostVolume))
mc.image should be("foo")
mc.forcePullImage should be(container.appc.head.forcePullImage)
mc.id should be(container.appc.head.id)
mc.labels should be(empty)
}
}
}
"A Docker Docker container is created correctly" when {
"a legacy docker protobuf container (host)" should {
"convert to RAML" in {
val legacyProto = Protos.ExtendedContainerInfo.newBuilder()
.setType(Mesos.ContainerInfo.Type.DOCKER)
.setDocker(Protos.ExtendedContainerInfo.DockerInfo.newBuilder()
.setImage("image0")
.setOBSOLETENetwork(Mesos.ContainerInfo.DockerInfo.Network.HOST)
)
.build
val expectedRaml = Container(
`type` = EngineType.Docker,
docker = Option(DockerContainer(
image = "image0",
network = Option(DockerNetwork.Host),
portMappings = None
)),
portMappings = Option(Seq.empty)
)
legacyProto.toRaml[Container] should be(expectedRaml)
}
}
"a legacy docker protobuf container (user)" should {
"convert to RAML" in {
val legacyProto = Protos.ExtendedContainerInfo.newBuilder()
.setType(Mesos.ContainerInfo.Type.DOCKER)
.setDocker(Protos.ExtendedContainerInfo.DockerInfo.newBuilder()
.setImage("image0")
.setOBSOLETENetwork(Mesos.ContainerInfo.DockerInfo.Network.USER)
)
.build
val expectedRaml = Container(
`type` = EngineType.Docker,
docker = Option(DockerContainer(
image = "image0",
network = Option(DockerNetwork.User),
portMappings = Option(Seq.empty)
)),
portMappings = Option(Seq.empty)
)
legacyProto.toRaml[Container] should be(expectedRaml)
}
}
"a legacy docker protobuf container (bridge)" should {
"convert to RAML" in {
val legacyProto = Protos.ExtendedContainerInfo.newBuilder()
.setType(Mesos.ContainerInfo.Type.DOCKER)
.setDocker(Protos.ExtendedContainerInfo.DockerInfo.newBuilder()
.setImage("image0")
.setOBSOLETENetwork(Mesos.ContainerInfo.DockerInfo.Network.BRIDGE)
.addOBSOLETEPortMappings(
Protos.ExtendedContainerInfo.DockerInfo.ObsoleteDockerPortMapping.newBuilder()
.setName("http").setContainerPort(1).setHostPort(2).setServicePort(3)
.addLabels(Mesos.Label.newBuilder().setKey("foo").setValue("bar"))
.build
)
)
.build
val expectedRaml = Container(
`type` = EngineType.Docker,
docker = Option(DockerContainer(
image = "image0",
network = Option(DockerNetwork.Bridge),
portMappings = Option(Seq(
ContainerPortMapping(
containerPort = 1,
hostPort = Option(2),
labels = Map("foo" -> "bar"),
name = Option("http"),
servicePort = 3
))
)
)),
portMappings = None
)
legacyProto.toRaml[Container] should be(expectedRaml)
}
}
"a docker-docker container" should {
val container = state.Container.Docker(Seq(coreHostVolume), "test", Seq(corePortMapping))
val raml = container.toRaml[Container]
behave like convertToProtobufThenToRAML(container, raml)
"convert to a RAML container" in {
raml.`type` should be(EngineType.Docker)
raml.appc should be(empty)
raml.volumes should be(Seq(ramlHostVolume))
raml.docker should be(defined)
raml.docker.get.image should be("test")
raml.docker.get.credential should be(empty)
raml.docker.get.network should be(empty)
raml.portMappings should contain(Seq(ramlPortMapping))
}
}
"a docker-docker container w/o port mappings" should {
val container = state.Container.Docker(Seq(coreHostVolume), "test")
val raml = container.toRaml[Container]
behave like convertToProtobufThenToRAML(container, raml)
}
"a RAML container" should {
"convert to a docker-docker container" in {
val container = Container(EngineType.Docker, portMappings = Option(Seq(ramlPortMapping)), docker = Some(DockerContainer(
image = "foo", parameters = Seq(DockerParameter("qws", "erf")))), volumes = Seq(ramlHostVolume))
val mc = Some(container.fromRaml).collect {
case c: state.Container.Docker => c
}.getOrElse(fail("expected Container.Docker"))
mc.portMappings should be(Seq(corePortMapping))
mc.volumes should be(Seq(coreHostVolume))
mc.image should be("foo")
mc.forcePullImage should be(container.docker.head.forcePullImage)
mc.parameters should be(Seq(state.Parameter("qws", "erf")))
mc.privileged should be(container.docker.head.privileged)
}
}
}
private lazy val credentials = state.Container.Credential("principal", Some("secret"))
private lazy val ramlPortMapping = ContainerPortMapping(
containerPort = 80,
hostPort = Some(90),
servicePort = 100,
name = Some("pok"),
labels = Map("wer" -> "rty")
)
private lazy val corePortMapping = state.Container.PortMapping(
containerPort = 80,
hostPort = Some(90),
servicePort = 100,
name = Some("pok"),
labels = Map("wer" -> "rty")
)
private lazy val coreHostVolume = state.DockerVolume("cpath", "/host/path", Mesos.Volume.Mode.RW)
private lazy val ramlHostVolume = AppVolume("cpath", Option("/host/path"), mode = ReadMode.Rw)
}
| natemurthy/marathon | src/test/scala/mesosphere/marathon/raml/ContainerConversionTest.scala | Scala | apache-2.0 | 10,497 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources
import org.apache.spark.sql.QueryTest
import org.apache.spark.sql.test.SharedSQLContext
class FileFormatWriterSuite extends QueryTest with SharedSQLContext {
test("empty file should be skipped while write to file") {
withTempPath { path =>
spark.range(100).repartition(10).where("id = 50").write.parquet(path.toString)
val partFiles = path.listFiles()
.filter(f => f.isFile && !f.getName.startsWith(".") && !f.getName.startsWith("_"))
assert(partFiles.length === 2)
}
}
}
| minixalpha/spark | sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/FileFormatWriterSuite.scala | Scala | apache-2.0 | 1,366 |
/*
*/
package see
import org.junit._
/** Tests vector handling.
* For now, we only test vectors of dim 1, although see will put no limits
*/
//@Ignore
class VectorTest extends TestCase {
@Before
override def setUp() {
//TestCase.
super.setUp()
scope.eval(parse("va = (1,2,3)"))
scope.eval(parse("vb = (4,5,6)"))
}
@Test
def testParsing() {
println("Vector Parsing")
parse("(a, b, c)")
parse("((a, b, c))")
parse("((a, b, c), (1))")
parse("((a, true, 1.0), (1))")
shouldFail("Failed to catch syntax error.") {
node = parse("(1, 2, 2, )")
println(node)
}
// actually, this should be made to work, too
shouldFail("Failed to catch syntax error.") {
parse("(1 ; 2)")
}
}
@Test
def testComposition() {
println("Vector Composition")
expect("va", IntVector(1, 2, 3))
expect("vb", IntVector(4, 5, 6))
expect("(4, 2 + 3, yy = 6)", IntVector(4, 5, 6))
testVar("yy", 6)
expectTrue("v1 = (1, true, 1.0)")
expect("len(v1)", 3)
expectFalse("v2 = ((1, 1), (true), (1.0, (1, 2), false))")
expect("len(v2)", 3)
expectTrue("v3 = (1, va, vb, 6)")
expect("len(v3)", 4)
shouldFail("Did not catch undefined.") {
expectTrue("(1 , x)")
}
scope.set("x", 1)
expect(IntVector(1, 1))
expectTrue("defined((1, x))")
expectFalse("defined((1, y))")
}
@Test
def testCall() {
println("Vector Calling")
// Calling an empty vector with any arguments results in empty vector
// However, arguments must be still defined.
expectTrue("()(1,2,Int) == ()")
// If elements are not callable, arguments are ignored
expect("(1,2,3)(1,true, Value)", IntVector(1, 2, 3))
expect("va(1,true, Value)", IntVector(1, 2, 3))
expectFalse("f(x) := { 2 * x}")
expect("(1,f,3)(5)", IntVector(1, 10, 3))
expectFalse("g(x,y) := { y * x}")
expect("(1,g)(5,6)", IntVector(1, 30))
expectTrue("(1,f,3)(va) == (1,(2,4,6),3)")
}
@Test
def testDefine() {
println("Vector, Defined")
val prog = """
f(x) := { x * a};
g(x) := { x * 2};
vf = (1, f);
vg = (1, g)
"""
expectFalse(prog)
expectTrue(" defined(va)")
expectTrue(" defined(vf)")
expectTrue(" defined(vg)")
expectTrue(" defined(va(1))")
expectFalse(" defined(vf(1))")
expectTrue(" defined(vg(1))")
scope.set("a", 1)
expectTrue(" defined(vf(1))")
}
@Test
def testSubscript() {
println("Vector Subscripting")
expect("va@0", 1)
expect("va@1", 2)
expect("vb@2", 6)
shouldFail("Bounds check failed.") {
expect("va@3", 0)
}
expect("va@-1", 3)
expect("va@-2", 2)
expect("va@-3", 1)
shouldFail("Bounds check failed.") {
expect("va@-4", 0)
}
expectTrue("defined((1, va)@1)")
expectFalse("defined((1, y)@0)")
expectFalse("defined((1, y)@1)")
expectFalse("defined(va@4)")
expectFalse("defined(va@ -4)")
expectTrue("defined(va@1)")
expectTrue("defined(va@ -1)")
expectFalse("defined(()@0)")
expectFalse("vz = ()")
expectTrue("defined(vz)")
expectFalse("defined(vz@ -1)")
expectFalse("defined(va@index)")
scope.set("index", 2)
expectTrue("defined(va@index)")
expectTrue("vx = (1, (2, 3), 4)")
expectTrue("len(vx@1) == 2")
expect("vx@1", IntVector(2, 3))
expect("vx@(1,1)", 3)
// subscripting the first element should work, no matter what
expect("1@(0,0,0,0)", 1)
expect("(2,3,4)@(0,0,0,0)", 2)
expect("((9,8),3,4)@(0,0,0,0)", 9)
expect( """ "abc"@(0,0,0,0)""", "a")
expect( """ ("abc", 5)@(0,0,0,0)""", "a")
// ... except for an empty vector/string
shouldFail("Bounds check failed.") {
expectTrue( """ ()@(0,0,0,0) == ()""")
}
shouldFail("Bounds check failed.") {
expect( """ ""@(0,0,0,0)""", "")
}
// ... or if we hit some undefined on the way
shouldFail("Bounds check failed.") {
expect("xx@(0,0,0,0)", 42)
}
scope.set("xx", 42)
expect(42)
shouldFail("Bounds check failed.") {
expect("(yy,0)@(0,0,0,0)", 43)
}
scope.set("yy", 43)
expect(43)
// same for last element
scope.clear()
expect("1@(-1,-1,-1,-1)", 1)
expect("(2,3,4)@(-1,-1,-1,-1)", 4)
expect("((9,8),3,(4,5))@(-1,-1,-1,-1)", 5)
expect( """ "abc"@(-1,-1,-1,-1)""", "c")
expect( """ ("abc", "def")@(-1,-1,-1,-1)""", "f")
shouldFail("Bounds check failed.") {
expectTrue( """ ()@(-1,-1,-1,-1) == ()""")
}
shouldFail("Bounds check failed.") {
expect( """ ""@(-1,-1,-1,-1)""", "")
}
// ... or if we hit some undefined on the way
shouldFail("Bounds check failed.") {
expect("xx@(-1,-1,-1,-1)", 42)
}
scope.set("xx", 42)
expect(42)
}
@Test
def testSlice() {
println("Vector Slicing")
// slicing, produces [start , end[ !
expect("va@@(0, 2)", IntVector(1, 2))
expect("va@@(1, 3)", IntVector(2, 3))
expect("va@@(3, 1)", IntVector(3, 2))
// A slice shall always produce a vector, even id only a single ele,ent is selected
expect("va@@(1, 2)", IntVector(2))
// out of bounds shall produce as much as possible
expect("va@@(1,4)", IntVector(2, 3))
expect("va@@(-2, -5)", IntVector(2, 1))
// .. and an empty vector, if it is totally invalid
expect("len(va@@(4, 100))", 0)
expect("len(va@@(-5, -10))", 0)
expect("len(va@@(-5, 10))", 0)
// Although both indices are out of bounds, the described range includes
// the whole vector reversed. Strange, but makes sense, if you think about it.
// Probably best to avoid such slice indices...
expect("va@@(10, -10)", IntVector(3, 2, 1))
expect("va@@(0, len(va))", IntVector(1, 2, 3))
expectTrue("va@@(0, len(va)) == va")
expectTrue("va@@(1, 1) == ()")
expectTrue("defined( va@@(0, 2) )")
expectTrue("defined( va@@(-1, 0) )")
// .. consequently, it will be always defined, even if out of bounds.
expectTrue("defined( va@@(0, 4) )")
expectTrue("defined( va@@(-1, -5) )")
// whole vector
expect("va@@(0, -1)", IntVector(1, 2, 3))
// reversed
expect("va@@(-1, 0)", IntVector(3, 2, 1))
expect("va@@(-1, -2)", IntVector(3))
expect("va@@(-1, -3)", IntVector(3, 2))
expect("va@@(-1, -4)", IntVector(3, 2, 1))
// There's more about that, but that is too esoteric
}
@Test
def testComparison() {
println("Vector Comparison")
expectFalse("va == vb")
expectFalse("va == (4,5,6)")
expectFalse("va == ()")
expectTrue("va == (1,2,3)")
expectTrue("va + 3 == vb")
expectTrue("va != vb")
expectTrue("va != (4,5,6)")
expectTrue("va != ()")
expectFalse("va != (1,2,3)")
expectFalse("va + 3 != vb")
// other relations are not very well defined
// but at least size comparisons should give expected results:
expectTrue("va < (0,0,0,0)")
expectTrue("va > (0,0)")
expectTrue("va > (1,2)")
expectFalse("va < (1000,1000)")
}
@Test
def testArith() {
println("Vector Arithmetics")
expect("va + 1", IntVector(2, 3, 4))
expect("va - 1", IntVector(0, 1, 2))
expect("vx = va * 2", IntVector(2, 4, 6))
expect("vx / 2", IntVector(1, 2, 3))
expect("-va", IntVector(-1, -2, -3))
expect("va + vb", IntVector(5, 7, 9))
expect("vb - va", IntVector(3, 3, 3))
expect("vb * va", IntVector(4, 10, 18))
// scalar product
expect("vb *+ va", 32)
}
@Test
def testZip() {
println("Vector Zipping")
expectTrue("vx = zip(va, vb)")
expect("vx@0", IntVector(1, 4))
expect("vx@1", IntVector(2, 5))
expect("vx@2", IntVector(3, 6))
}
@Test
def testGenerators() {
println("Vector Generators")
// Generators
//expectFalse("gen = {$ += 1}")
expect("rep(4, 66)", IntVector(66, 66, 66, 66))
expect("5; rep(4, $+1)", IntVector(6, 6, 6, 6))
expect("8; vx = rep(5, $+=1 )", IntVector(9, 9, 9, 9, 9))
expect("8; vx = rep(5, {$+=1} )", IntVector(9, 10, 11, 12, 13))
expect("vy = pad(vx, 3, 0)", IntVector(9, 10, 11))
expect("vy = pad(vy, 6, 1)", IntVector(9, 10, 11, 1, 1, 1))
expect("vy = pad(vy, 2, 0)", IntVector(9, 10))
expect("99; vy = pad(vy, 3, $)", IntVector(9, 10, 99))
expect("88; vy = pad(vy, 4, vy@1)", IntVector(9, 10, 99, 10))
expect("vy = pad(vx, 7, vx@ -1)", IntVector(9, 10, 11, 12, 13, 13, 13))
expect("-1; vy = pad(va, 5, $-1)", IntVector(1, 2, 3, -2, -2))
// All elements should be the same...
expectTrue("vx = rep(4, rnd(0.5) ); true")
println(result("vx"))
expectTrue("vx@0 == vx@1 == vx@2")
// ... while this would be an extreme coincidence here...
expectTrue("vx = rep(4, {rnd(0.5)} ); true")
println(result("vx"))
expectFalse("vx@0 == vx@1 == vx@2")
}
@Test
def testConcatenation() {
println("Vector Concatenation")
// Concatenation
expect("va ++ ()", IntVector(1, 2, 3))
expect("vx = vb ++ va", IntVector(4, 5, 6, 1, 2, 3))
expect("4 ++ 5 ++ 7", IntVector(4, 5, 7))
expect("va ++ 7", IntVector(1, 2, 3, 7))
// .. shall always produce a vector
expect("5 ++ ()", IntVector(5))
expect("() ++ 5", IntVector(5))
expect("len( () ++ () )", 0)
expect("vx = va +++ vb; len(vx)", 2)
expect("vx@0", IntVector(1, 2, 3))
expect("vx@1", IntVector(4, 5, 6))
expect("len(() +++ ())", 2) // -> ((),())
}
@Test
def testReduction() {
println("Vector Reduction")
// Reduction
expect("min(va)", 1)
expect("max(vb)", 6)
expect("sum(vb)", 15)
expect("prod(va)", 6)
expect("mean(va)", 2)
}
@Test
def testFold() {
println("Vector Folding")
expectFalse("plus(x,y) := {x + y}")
expect("fold(1, plus, va)", 7)
expectFalse("or_(x,y) := {x | y}")
expect("fold(1, or_, (0x80, 0x20, 0x180))", 0x1a1)
shouldFail("Invalid operands.") {
expect("fold(0, or_, (10, 1.5))", 0)
}
expect("fold(1, or_, ())", 1)
expect("fold(1, or_, 4)", 5)
scope.set("a", 1)
scope.set("b", 2)
expectFalse("bor(x,y) := {x || y}") // note that this will not shortcut!
expectTrue("fold(false, bor, (0, 1.5, \\"a\\", {a > b} ) )")
}
} | acruise/see | src/test/scala/see/VectorTest.scala | Scala | bsd-3-clause | 10,218 |
package mesosphere.marathon.integration
import java.lang.{ Double => JDouble }
import mesosphere.marathon.Protos.HealthCheckDefinition.Protocol
import mesosphere.marathon.api.v2.json.{ V2AppDefinition, V2AppUpdate }
import mesosphere.marathon.health.HealthCheck
import mesosphere.marathon.integration.setup._
import mesosphere.marathon.state.{ Command, PathId }
import org.scalatest.{ BeforeAndAfter, GivenWhenThen, Matchers }
import org.slf4j.LoggerFactory
import play.api.libs.json.JsArray
import spray.httpx.UnsuccessfulResponseException
import scala.concurrent.duration._
class AppDeployIntegrationTest
extends IntegrationFunSuite
with SingleMarathonIntegrationTest
with Matchers
with BeforeAndAfter
with GivenWhenThen {
private[this] val log = LoggerFactory.getLogger(getClass)
//clean up state before running the test case
before(cleanUp())
test("create a simple app without health checks") {
Given("a new app")
val app = v2AppProxy(testBasePath / "app", "v1", instances = 1, withHealth = false)
When("The app is deployed")
val result = marathon.createAppV2(app)
Then("The app is created")
result.code should be (201) //Created
extractDeploymentIds(result) should have size 1
waitForEvent("deployment_success")
waitForTasks(app.id, 1) //make sure, the app has really started
}
test("increase the app count metric when an app is created") {
Given("a new app")
val app = v2AppProxy(testBasePath / "app", "v1", instances = 1, withHealth = false)
var appCount = (marathon.metrics().entityJson \ "gauges" \ "service.mesosphere.marathon.app.count" \ "value").as[Int]
appCount should be (0)
When("The app is deployed")
val result = marathon.createAppV2(app)
Then("The app count metric should increase")
result.code should be (201) // Created
appCount = (marathon.metrics().entityJson \ "gauges" \ "service.mesosphere.marathon.app.count" \ "value").as[Int]
appCount should be (1)
}
test("create a simple app without health checks via secondary (proxying)") {
if (!config.useExternalSetup) {
Given("a new app")
val app = v2AppProxy(testBasePath / "app", "v1", instances = 1, withHealth = false)
When("The app is deployed")
val result = marathonProxy.createAppV2(app)
Then("The app is created")
result.code should be (201) //Created
extractDeploymentIds(result) should have size 1
waitForEvent("deployment_success")
waitForTasks(app.id, 1) //make sure, the app has really started
}
}
test("create a simple app with http health checks") {
Given("a new app")
val app = v2AppProxy(testBasePath / "http-app", "v1", instances = 1, withHealth = false).
copy(healthChecks = Set(healthCheck))
val check = appProxyCheck(app.id, "v1", true)
When("The app is deployed")
val result = marathon.createAppV2(app)
Then("The app is created")
result.code should be (201) //Created
extractDeploymentIds(result) should have size 1
waitForEvent("deployment_success")
check.pingSince(5.seconds) should be (true) //make sure, the app has really started
}
test("create a simple app with tcp health checks") {
Given("a new app")
val app = v2AppProxy(testBasePath / "tcp-app", "v1", instances = 1, withHealth = false).
copy(healthChecks = Set(healthCheck.copy(protocol = Protocol.TCP)))
When("The app is deployed")
val result = marathon.createAppV2(app)
Then("The app is created")
result.code should be (201) //Created
extractDeploymentIds(result) should have size 1
waitForEvent("deployment_success")
}
test("create a simple app with command health checks") {
Given("a new app")
val app = v2AppProxy(testBasePath / "command-app", "v1", instances = 1, withHealth = false).
copy(healthChecks = Set(healthCheck.copy(protocol = Protocol.COMMAND, command = Some(Command("true")))))
When("The app is deployed")
val result = marathon.createAppV2(app)
Then("The app is created")
result.code should be (201) //Created
extractDeploymentIds(result) should have size 1
waitForEvent("deployment_success")
}
test("list running apps and tasks") {
Given("a new app is deployed")
val appId = testBasePath / "app"
val app = v2AppProxy(appId, "v1", instances = 2, withHealth = false)
marathon.createAppV2(app).code should be (201) //Created
When("the deployment has finished")
waitForEvent("deployment_success")
Then("the list of running app tasks can be fetched")
val apps = marathon.listAppsInBaseGroup
apps.code should be(200)
apps.value should have size 1
val tasks = marathon.tasks(appId)
tasks.code should be(200)
tasks.value should have size 2
}
test("create an app that fails to deploy") {
Given("a new app that is not healthy")
val appId = testBasePath / "failing"
val check = appProxyCheck(appId, "v1", state = false)
val app = v2AppProxy(appId, "v1", instances = 1, withHealth = true)
When("The app is deployed")
val create = marathon.createAppV2(app)
Then("The deployment can not be finished")
create.code should be (201) //Created
extractDeploymentIds(create) should have size 1
intercept[AssertionError] {
waitForEvent("deployment_success")
}
When("The app is deleted")
val delete = marathon.deleteApp(appId, force = true)
delete.code should be (200)
waitForChange(delete)
marathon.listAppsInBaseGroup.value should have size 0
}
test("update an app") {
Given("a new app")
val appId = testBasePath / "app"
val v1 = v2AppProxy(appId, "v1", instances = 1, withHealth = true)
marathon.createAppV2(v1).code should be (201)
waitForEvent("deployment_success")
val before = marathon.tasks(appId)
When("The app is updated")
val check = appProxyCheck(appId, "v2", state = true)
val update = marathon.updateApp(v1.id, V2AppUpdate(cmd = v2AppProxy(appId, "v2", 1).cmd))
Then("The app gets updated")
update.code should be (200)
waitForEvent("deployment_success")
waitForTasks(appId, before.value.size)
check.pingSince(5.seconds) should be (true) //make sure, the new version is alive
}
test("scale an app up and down") {
Given("a new app")
val app = v2AppProxy(testBasePath / "app", "v1", instances = 1, withHealth = false)
marathon.createAppV2(app)
waitForEvent("deployment_success")
When("The app gets an update to be scaled up")
val scaleUp = marathon.updateApp(app.id, V2AppUpdate(instances = Some(2)))
Then("New tasks are launched")
scaleUp.code should be (200) //OK
waitForEvent("deployment_success")
waitForTasks(app.id, 2)
When("The app gets an update to be scaled down")
val scaleDown = marathon.updateApp(app.id, V2AppUpdate(instances = Some(1)))
Then("Tasks are killed")
scaleDown.code should be (200) //OK
waitForEventWith("status_update_event", _.info("taskStatus") == "TASK_KILLED")
waitForTasks(app.id, 1)
}
test("restart an app") {
Given("a new app")
val appId = testBasePath / "app"
val v1 = v2AppProxy(appId, "v1", instances = 1, withHealth = false)
marathon.createAppV2(v1).code should be (201)
waitForEvent("deployment_success")
val before = marathon.tasks(appId)
When("The app is restarted")
val restart = marathon.restartApp(v1.id)
Then("All instances of the app get restarted")
restart.code should be (200)
waitForChange(restart)
val after = marathon.tasks(appId)
waitForTasks(appId, before.value.size)
before.value.toSet should not be after.value.toSet
}
test("list app versions") {
Given("a new app")
val v1 = v2AppProxy(testBasePath / "app", "v1", instances = 1, withHealth = false)
val createResponse = marathon.createAppV2(v1)
createResponse.code should be (201)
waitForEvent("deployment_success")
When("The list of versions is fetched")
val list = marathon.listAppVersions(v1.id)
Then("The response should contain all the versions")
list.code should be (200)
list.value.versions should have size 1
list.value.versions.head should be (createResponse.value.version)
}
test("correctly version apps") {
Given("a new app")
val v1 = v2AppProxy(testBasePath / "app", "v1", instances = 1, withHealth = false)
val createResponse = marathon.createAppV2(v1)
createResponse.code should be (201)
val originalVersion = createResponse.value.version
waitForEvent("deployment_success")
When("A resource specification is updated")
val updatedDisk: JDouble = v1.disk + 1.0
val appUpdate = V2AppUpdate(Option(v1.id), disk = Option(updatedDisk))
val updateResponse = marathon.updateApp(v1.id, appUpdate)
updateResponse.code should be (200)
waitForEvent("deployment_success")
Then("It should create a new version with the right data")
val responseOriginalVersion = marathon.appVersion(v1.id, originalVersion)
responseOriginalVersion.code should be (200)
responseOriginalVersion.value.disk should be (v1.disk)
val updatedVersion = updateResponse.value.version
val responseUpdatedVersion = marathon.appVersion(v1.id, updatedVersion)
responseUpdatedVersion.code should be (200)
responseUpdatedVersion.value.disk should be (updatedDisk)
}
test("kill a task of an App") {
Given("a new app")
val app = v2AppProxy(testBasePath / "app", "v1", instances = 1, withHealth = false)
marathon.createAppV2(app).code should be (201)
waitForEvent("deployment_success")
val taskId = marathon.tasks(app.id).value.head.id
When("a task of an app is killed")
marathon.killTask(app.id, taskId)
waitForEventWith("status_update_event", _.info("taskStatus") == "TASK_KILLED")
Then("All instances of the app get restarted")
waitForTasks(app.id, 1)
marathon.tasks(app.id).value.head should not be taskId
}
test("kill a task of an App with scaling") {
Given("a new app")
val app = v2AppProxy(testBasePath / "app", "v1", instances = 2, withHealth = false)
marathon.createAppV2(app).code should be (201)
waitForEvent("deployment_success")
val taskId = marathon.tasks(app.id).value.head.id
When("a task of an app is killed and scaled")
marathon.killTask(app.id, taskId, scale = true)
waitForEventWith("status_update_event", _.info("taskStatus") == "TASK_KILLED")
Then("All instances of the app get restarted")
waitForTasks(app.id, 1)
marathon.app(app.id).value.app.instances should be (1)
}
test("kill all tasks of an App") {
Given("a new app with multiple tasks")
val app = v2AppProxy(testBasePath / "app", "v1", instances = 2, withHealth = false)
marathon.createAppV2(app).code should be (201)
waitForEvent("deployment_success")
When("all task of an app are killed")
marathon.killAllTasks(app.id)
waitForEventWith("status_update_event", _.info("taskStatus") == "TASK_KILLED")
waitForEventWith("status_update_event", _.info("taskStatus") == "TASK_KILLED")
Then("All instances of the app get restarted")
waitForTasks(app.id, 2)
}
test("kill all tasks of an App with scaling") {
Given("a new app with multiple tasks")
val app = v2AppProxy(testBasePath / "tokill", "v1", instances = 2, withHealth = false)
marathon.createAppV2(app).code should be (201)
waitForEvent("deployment_success")
marathon.app(app.id).value.app.instances should be (2)
When("all task of an app are killed")
val result = marathon.killAllTasksAndScale(app.id)
result.value.version should not be empty
waitForEventWith("status_update_event", _.info("taskStatus") == "TASK_KILLED")
waitForEventWith("status_update_event", _.info("taskStatus") == "TASK_KILLED")
waitForEvent("deployment_success")
Then("All instances of the app get restarted")
waitForTasks(app.id, 0)
marathon.app(app.id).value.app.instances should be (0)
}
test("delete an application") {
Given("a new app with one task")
val app = v2AppProxy(testBasePath / "app", "v1", instances = 1, withHealth = false)
marathon.createAppV2(app).code should be (201)
waitForEvent("deployment_success")
When("the app is deleted")
val delete = marathon.deleteApp(app.id)
delete.code should be (200)
waitForChange(delete)
Then("All instances of the app get restarted")
marathon.listAppsInBaseGroup.value should have size 0
}
test("create and deploy an app with two tasks") {
Given("a new app")
log.info("new app")
val appIdPath: PathId = testBasePath / "/test/app"
val appId: String = appIdPath.toString
val app = v2AppProxy(appIdPath, "v1", instances = 2, withHealth = false)
When("the app gets posted")
log.info("new app")
val createdApp: RestResult[V2AppDefinition] = marathon.createAppV2(app)
Then("the app is created and a success event arrives eventually")
log.info("new app")
createdApp.code should be(201) // created
Then("we get various events until deployment success")
val deploymentIds: Seq[String] = extractDeploymentIds(createdApp)
deploymentIds.length should be(1)
val deploymentId = deploymentIds.head
log.info("waiting for deployment success")
val events: Map[String, Seq[CallbackEvent]] = waitForEvents(
"api_post_event", "group_change_success", "deployment_info",
"status_update_event", "status_update_event",
"deployment_success")(30.seconds)
val Seq(apiPostEvent) = events("api_post_event")
apiPostEvent.info("appDefinition").asInstanceOf[Map[String, Any]]("id").asInstanceOf[String] should
be(appId)
val Seq(groupChangeSuccess) = events("group_change_success")
groupChangeSuccess.info("groupId").asInstanceOf[String] should be(appIdPath.parent.toString)
val Seq(taskUpdate1, taskUpdate2) = events("status_update_event")
taskUpdate1.info("appId").asInstanceOf[String] should be(appId)
taskUpdate2.info("appId").asInstanceOf[String] should be(appId)
val Seq(deploymentSuccess) = events("deployment_success")
deploymentSuccess.info("id") should be(deploymentId)
Then("after that deployments should be empty")
val event: RestResult[List[ITDeployment]] = marathon.listDeploymentsForBaseGroup()
event.value should be('empty)
Then("Both tasks respond to http requests")
def pingTask(taskInfo: CallbackEvent): RestResult[String] = {
val host: String = taskInfo.info("host").asInstanceOf[String]
val port: Int = taskInfo.info("ports").asInstanceOf[Seq[Int]].head
appMock.ping(host, port)
}
pingTask(taskUpdate1).entityString should be(s"Pong $appId\n")
pingTask(taskUpdate2).entityString should be(s"Pong $appId\n")
}
test("stop (forcefully delete) a deployment") {
Given("a new app that is not healthy")
val appId = testBasePath / "failing"
val app = v2AppProxy(appId, "v1", instances = 1, withHealth = true)
appProxyCheck(appId, "v1", state = false)
val create = marathon.createAppV2(app)
create.code should be (201) // Created
val deploymentId = extractDeploymentIds(create).head
Then("the deployment can not be finished")
marathon.listDeploymentsForBaseGroup().value should have size 1
When("the deployment is forcefully removed")
val delete = marathon.deleteDeployment(deploymentId, force = true)
delete.code should be (202)
Then("the deployment should be gone")
waitForEvent("deployment_failed")
marathon.listDeploymentsForBaseGroup().value should have size 0
Then("the app should still be there")
marathon.app(appId).code should be (200)
}
test("rollback a deployment") {
Given("a new app that is not healthy")
val appId = testBasePath / "failing"
val app = v2AppProxy(appId, "v1", instances = 1, withHealth = true)
appProxyCheck(appId, "v1", state = false)
val create = marathon.createAppV2(app)
create.code should be (201) // Created
val deploymentId = extractDeploymentIds(create).head
Then("the deployment can not be finished")
marathon.listDeploymentsForBaseGroup().value should have size 1
When("the deployment is rolled back")
val delete = marathon.deleteDeployment(deploymentId, force = false)
delete.code should be (200)
Then("the deployment should be gone")
waitForEvent("deployment_failed")
marathon.listDeploymentsForBaseGroup().value should have size 0
Then("the app should also be gone")
val result = intercept[UnsuccessfulResponseException] {
marathon.app(appId).code should be (404)
}
result.response.status.intValue should be(404)
}
def healthCheck = HealthCheck(gracePeriod = 20.second, interval = 1.second, maxConsecutiveFailures = 10)
def extractDeploymentIds(app: RestResult[V2AppDefinition]): Seq[String] = {
for (deployment <- (app.entityJson \ "deployments").as[JsArray].value)
yield (deployment \ "id").as[String]
}
}
| spacejam/marathon | src/test/scala/mesosphere/marathon/integration/AppDeployIntegrationTest.scala | Scala | apache-2.0 | 17,059 |
/*
* Copyright 2017 PayPal
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.squbs.unicomplex
import akka.http.scaladsl.model.Uri
import akka.http.scaladsl.model.Uri.Path
import akka.http.scaladsl.model.Uri.Path.{Empty, Segment, Slash}
import org.scalatest.flatspec.AnyFlatSpecLike
import org.scalatest.matchers.should.Matchers
class RegisterContextSpec extends AnyFlatSpecLike with Matchers {
"Path matching" should "work" in {
val emptyPath = Path("")
emptyPath shouldBe empty
emptyPath should have length 0
emptyPath.charCount should be (0)
println(emptyPath.getClass.getName)
emptyPath should be (Empty)
emptyPath should not be 'startsWithSegment
emptyPath should not be 'startsWithSlash
emptyPath.startsWith(Empty) should be (true)
val root = Path("/")
root should not be empty
root should have length 1
root.charCount should be (1)
println(root.getClass.getName)
root shouldBe a [Slash]
root should not be 'startsWithSegment
root shouldBe 'startsWithSlash
root.startsWith(Empty) should be (true)
root.head should be ('/')
root.tail should be (Empty)
val single = Path("/abc")
single should not be empty
single should have length 2
single.charCount should be (4)
println(single.getClass.getName)
single shouldBe a[Slash]
single should not be 'startsWithSegment
single shouldBe 'startsWithSlash
single.startsWith(Path("/")) should be (true)
single.startsWith(Path("")) should be (true)
single.startsWith(Path("abc")) should be (false)
single.head should be ('/')
single.tail should be (Path("abc"))
val simple = Path("abc")
simple should not be empty
simple should have length 1
simple.charCount should be (3)
println(simple.getClass.getName)
simple shouldBe a[Segment]
simple shouldBe 'startsWithSegment
simple should not be 'startsWithSlash
simple.startsWith(Path("/")) should be (false)
simple.startsWith(Path("")) should be (true)
simple.startsWith(Path("abc")) should be (true)
simple.head should be ("abc")
simple.tail should be (Empty)
val multi = Path("abc/def")
multi should not be empty
multi should have length 3
multi.charCount should be (7)
println(multi.getClass.getName)
multi shouldBe a[Segment]
multi shouldBe 'startsWithSegment
multi should not be 'startsWithSlash
multi.startsWith(Path("/")) should be (false)
multi.startsWith(Path("")) should be (true)
multi.startsWith(Path("abc")) should be (true)
multi.head should be ("abc")
multi.tail shouldBe a [Slash]
multi.startsWith(Path("abc/de")) should be (true)
}
"request path matching" should "work" in {
Uri("http://www.ebay.com").path should not be 'startsWithSlash
Uri("http://www.ebay.com").path should not be 'startsWithSegment
Uri("http://www.ebay.com").path.startsWith(Empty) should be (true)
Uri("http://www.ebay.com/").path shouldBe 'startsWithSlash
Uri("http://www.ebay.com/").path should not be 'startsWithSegment
Uri("http://www.ebay.com/").path.startsWith(Empty) should be (true)
Uri("http://www.ebay.com").path should be (Path(""))
Uri("http://www.ebay.com/").path should be (Path("/"))
Uri("http://127.0.0.1:8080/abc").path shouldBe 'startsWithSlash
Uri("http://www.ebay.com/").path.startsWith(Path("")) should be (true)
Uri("http://www.ebay.com").path.startsWith(Path("")) should be (true)
Uri("http://www.ebay.com/abc").path.startsWith(Path("")) should be (true)
Uri("http://www.ebay.com/abc").path.tail.startsWith(Path("")) should be (true)
Uri("http://www.ebay.com/abc").path.tail.startsWith(Path("abc")) should be (true)
Uri("http://www.ebay.com/abc/def").path.tail.startsWith(Path("abc")) should be (true)
Uri("http://www.ebay.com/abc/def").path.tail.startsWith(Path("abc/def")) should be (true)
}
}
| paypal/squbs | squbs-unicomplex/src/test/scala/org/squbs/unicomplex/RegisterContextSpec.scala | Scala | apache-2.0 | 4,450 |
/**
* Licensed to Big Data Genomics (BDG) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The BDG licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.bdgenomics.utils.instrumentation
import org.apache.spark.rdd.MetricsContext._
import org.bdgenomics.utils.misc.SparkFunSuite
import scala.collection.JavaConversions._
class InstrumentedOrderedRDDFunctionsSuite extends SparkFunSuite {
sparkBefore("Before") {
Metrics.initialize(sc)
}
sparkAfter("After") {
Metrics.stopRecording()
}
sparkTest("Nested timings are not recorded for sortByKey operation") {
val rdd = sc.parallelize(Seq(1, 2, 3, 4, 5), 2).instrument().keyBy(e => e).sortByKey()
assert(rdd.count() === 5)
Metrics.Recorder.value.foreach(recorder => {
val timerMap = recorder.accumulable.value.timerMap
// We should have timings for 4 timers: keyBy, keyBy's function call, sortByKey, and count,
// but not for any nested operations within sortByKey
val timerNames = timerMap.keys().map(_.timerName).toSet
assert(timerNames.size === 4)
assertContains(timerNames, "keyBy")
assertContains(timerNames, "sortByKey")
assertContains(timerNames, "function call")
assertContains(timerNames, "count")
})
}
private def assertContains(names: Set[String], nameStartsWith: String) = {
assert(names.count(_.startsWith(nameStartsWith)) === 1,
"Timer names [" + names + "] did not contain [" + nameStartsWith + "]")
}
}
| tdanford/bdg-utils | utils-metrics/src/test/scala/org/bdgenomics/utils/instrumentation/InstrumentedOrderedRDDFunctionsSuite.scala | Scala | apache-2.0 | 2,132 |
import scala.language.implicitConversions
case class Class1(field:Int)
object Test {
implicit def unwrap(c1:Class1):Int = {
c1.field
}
def main(args: Array[String]) {
val x = new Class1(2)
val y = new Class1(3)
println("type correct paths can be executed: "+(x+y))
}
def neverCalled() {
new SimpleClass(8, "str")
new SimpleClass(6)
new SimpleClass() //not enough parameters
new SimpleClass(5, 5) //too many parameters
new SimpleClass[Clazz](8) //type parameters not allowed
// no such class
new NSC()
NSC().field
NSC().method(4, "str")
CaseClass()
CaseClass(5)
CaseClass(5, 5)
}
} | scaladyno/scaladyno-plugin | examples/Test4compilesAndRuns.scala | Scala | bsd-3-clause | 669 |
package com.alexitc.coinalerts.data.async
import javax.inject.Inject
import com.alexitc.coinalerts.config.DatabaseExecutionContext
import com.alexitc.coinalerts.data.{ExchangeCurrencyBlockingDataHandler, ExchangeCurrencyDataHandler}
import com.alexitc.coinalerts.models._
import com.alexitc.playsonify.core.FutureApplicationResult
import scala.concurrent.Future
class ExchangeCurrencyFutureDataHandler @Inject()(blockingDataHandler: ExchangeCurrencyBlockingDataHandler)(
implicit ec: DatabaseExecutionContext)
extends ExchangeCurrencyDataHandler[FutureApplicationResult] {
override def create(createModel: CreateExchangeCurrencyModel): FutureApplicationResult[ExchangeCurrency] = Future {
blockingDataHandler.create(createModel)
}
override def getBy(exchangeCurrencyId: ExchangeCurrencyId): FutureApplicationResult[Option[ExchangeCurrency]] =
Future {
blockingDataHandler.getBy(exchangeCurrencyId)
}
override def getBy(
exchange: Exchange,
market: Market,
currency: Currency,
currencyName: CurrencyName): FutureApplicationResult[Option[ExchangeCurrency]] = Future {
blockingDataHandler.getBy(exchange, market, currency, currencyName)
}
override def getBy(exchange: Exchange, market: Market): FutureApplicationResult[List[ExchangeCurrency]] = Future {
blockingDataHandler.getBy(exchange, market)
}
override def getMarkets(exchange: Exchange): FutureApplicationResult[List[Market]] = Future {
blockingDataHandler.getMarkets(exchange)
}
override def getAll(): FutureApplicationResult[List[ExchangeCurrency]] = Future {
blockingDataHandler.getAll()
}
}
| AlexITC/crypto-coin-alerts | alerts-server/app/com/alexitc/coinalerts/data/async/ExchangeCurrencyFutureDataHandler.scala | Scala | gpl-3.0 | 1,647 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.