code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1 value | license stringclasses 15 values | size int64 5 1M |
|---|---|---|---|---|---|
package org.jetbrains.plugins.scala
package highlighter
import _root_.org.jetbrains.plugins.scala.lang.psi.api.toplevel.{ScEarlyDefinitions, ScModifierListOwner}
import com.intellij.extapi.psi.StubBasedPsiElementBase
import com.intellij.psi.util.PsiTreeUtil
import lang.psi.api.base.patterns._
import lang.psi.api.statements._
import com.intellij.psi._
import lang.psi.api.statements.params.{ScParameter, ScTypeParam}
import lang.psi.api.base.{ScConstructor, ScReferenceElement, ScStableCodeReferenceElement}
import org.jetbrains.plugins.scala.lang.psi.api.expr.ScReferenceExpression
import lang.psi.api.toplevel.typedef.{ScClass, ScTypeDefinition, ScTrait, ScObject}
import lang.psi.impl.toplevel.synthetic.ScSyntheticClass
import com.intellij.lang.annotation.AnnotationHolder
import lang.psi.api.base.types.ScSimpleTypeElement
import lang.psi.api.toplevel.templates.ScTemplateBody
import lang.lexer.ScalaTokenTypes
import stubs.StubElement
import lang.psi.{ScalaPsiUtil, ScalaStubBasedElementImpl}
import lang.psi.api.expr._
import com.intellij.openapi.editor.colors.TextAttributesKey
import lang.psi.impl.ScalaPsiManager
import lang.psi.impl.ScalaPsiManager.ClassCategory
import lang.psi.types.{ScFunctionType, ScType}
import lang.psi.types.result.TypingContext
import lang.psi.api.toplevel.imports.ScImportExpr
import lang.refactoring.util.ScalaNamesUtil
import com.intellij.psi.codeStyle.CodeStyleSettingsManager
import lang.formatting.settings.ScalaCodeStyleSettings
import settings.ScalaProjectSettings
import org.jetbrains.plugins.scala.extensions.toPsiClassExt
/**
* User: Alexander Podkhalyuzin
* Date: 17.07.2008
*/
object AnnotatorHighlighter {
private val JAVA_COLLECTIONS_BASES = List("java.util.Map", "java.util.Collection")
private val SCALA_FACTORY_METHODS_NAMES = Set("make", "apply")
private val SCALA_COLLECTION_MUTABLE_BASE = "_root_.scala.collection.mutable."
private val SCALA_COLLECTION_IMMUTABLE_BASE = "_root_.scala.collection.immutable."
private val SCALA_COLLECTION_GENERIC_BASE = "_root_.scala.collection.generic."
private val SCALA_PREDEFINED_OBJECTS = Set("scala", "scala.Predef")
private val SCALA_PREDEF_IMMUTABLE_BASES = Set("_root_.scala.PredefMap", "_root_.scala.PredefSet", "scalaList",
"scalaNil", "scalaStream", "scalaVector", "scalaSeq")
private def getParentStub(el: StubBasedPsiElement[_ <: StubElement[_ <: PsiElement]]): PsiElement = {
val stub: StubElement[_ <: PsiElement] = el.getStub
if (stub != null) {
stub.getParentStub.getPsi
} else el.getParent
}
private def getParentByStub(x: PsiElement): PsiElement = {
x match {
case el: ScalaStubBasedElementImpl[_] => getParentStub(el)
case _ => x.getContext
}
}
def highlightReferenceElement(refElement: ScReferenceElement, holder: AnnotationHolder) {
def annotateCollectionByType(resolvedType: ScType) {
if (ScalaNamesUtil.isOperatorName(
resolvedType.presentableText.substring(0, resolvedType.presentableText.prefixLength(_ != '.')))) return
val scalaProjectSettings: ScalaProjectSettings = ScalaProjectSettings.getInstance(refElement.getProject)
scalaProjectSettings.getCollectionTypeHighlightingLevel match {
case ScalaProjectSettings.COLLECTION_TYPE_HIGHLIGHTING_NONE => return
case ScalaProjectSettings.COLLECTION_TYPE_HIGHLIGHTING_NOT_QUALIFIED =>
refElement.qualifier match {
case None =>
case _ => return
}
case ScalaProjectSettings.COLLECTION_TYPE_HIGHLIGHTING_ALL =>
}
def conformsByNames(tp: ScType, qn: List[String]): Boolean = {
qn.exists(textName => {
val cachedClass = ScalaPsiManager.instance(refElement.getProject).getCachedClass(textName, refElement.getResolveScope, ClassCategory.TYPE)
if (cachedClass == null) false
else tp.conforms(ScType.designator(cachedClass))
})
}
def simpleAnnotate(annotationText: String, annotationAttributes: TextAttributesKey) {
if (SCALA_FACTORY_METHODS_NAMES.contains(refElement.nameId.getText)) {
return
}
val annotation = holder.createInfoAnnotation(refElement.nameId, annotationText)
annotation.setTextAttributes(annotationAttributes)
}
val text = resolvedType.canonicalText
if (text == null) return
if (text.startsWith(SCALA_COLLECTION_IMMUTABLE_BASE) || SCALA_PREDEF_IMMUTABLE_BASES.contains(text)) {
simpleAnnotate(ScalaBundle.message("scala.immutable.collection"), DefaultHighlighter.IMMUTABLE_COLLECTION)
} else if (text.startsWith(SCALA_COLLECTION_MUTABLE_BASE)) {
simpleAnnotate(ScalaBundle.message("scala.mutable.collection"), DefaultHighlighter.MUTABLE_COLLECTION)
} else if (conformsByNames(resolvedType, JAVA_COLLECTIONS_BASES)) {
simpleAnnotate(ScalaBundle.message("java.collection"), DefaultHighlighter.JAVA_COLLECTION)
} else if (resolvedType.canonicalText.startsWith(SCALA_COLLECTION_GENERIC_BASE) && refElement.isInstanceOf[ScReferenceExpression]) {
refElement.asInstanceOf[ScReferenceExpression].getType(TypingContext.empty).foreach(_ match {
case f@ScFunctionType(returnType, params) => Option(returnType).foreach(a =>
if (a.canonicalText.startsWith(SCALA_COLLECTION_MUTABLE_BASE)) {
simpleAnnotate(ScalaBundle.message("scala.mutable.collection"), DefaultHighlighter.MUTABLE_COLLECTION)
} else if (a.canonicalText.startsWith(SCALA_COLLECTION_IMMUTABLE_BASE)) {
simpleAnnotate(ScalaBundle.message("scala.immutable.collection"), DefaultHighlighter.IMMUTABLE_COLLECTION)
})
case _ =>
})
}
}
def annotateCollection(resolvedClazz: PsiClass) {
annotateCollectionByType(ScType.designator(resolvedClazz))
}
val c = ScalaPsiUtil.getParentOfType(refElement, classOf[ScConstructor])
c match {
case null =>
case c => if (c.getParent.isInstanceOf[ScAnnotationExpr]) return
}
val resolvedElement = refElement.resolve()
if (PsiTreeUtil.getParentOfType(refElement, classOf[ScImportExpr]) == null && resolvedElement.isInstanceOf[PsiClass]) {
annotateCollection(resolvedElement.asInstanceOf[PsiClass])
}
val annotation = holder.createInfoAnnotation(refElement.nameId, null)
resolvedElement match {
case c: PsiClass if ScType.baseTypesQualMap.contains(c.qualifiedName) => //this is td, it's important!
annotation.setTextAttributes(DefaultHighlighter.PREDEF)
case x: ScClass if x.getModifierList.has(ScalaTokenTypes.kABSTRACT) =>
annotation.setTextAttributes(DefaultHighlighter.ABSTRACT_CLASS)
case _: ScTypeParam =>
annotation.setTextAttributes(DefaultHighlighter.TYPEPARAM)
case x: ScTypeAlias =>
x.getOriginalElement match {
case originalElement: ScTypeAliasDefinition =>
originalElement.aliasedType.foreach(annotateCollectionByType(_))
case _ =>
}
annotation.setTextAttributes(DefaultHighlighter.TYPE_ALIAS)
case c: ScClass if referenceIsToCompanionObjectOfClass(refElement) =>
annotation.setTextAttributes(DefaultHighlighter.OBJECT)
case _: ScClass =>
annotation.setTextAttributes(DefaultHighlighter.CLASS)
case _: ScObject =>
annotation.setTextAttributes(DefaultHighlighter.OBJECT)
case _: ScTrait =>
annotation.setTextAttributes(DefaultHighlighter.TRAIT)
case x: PsiClass if x.isInterface =>
annotation.setTextAttributes(DefaultHighlighter.TRAIT)
case x: PsiClass if x.getModifierList != null && x.getModifierList.hasModifierProperty("abstract") =>
annotation.setTextAttributes(DefaultHighlighter.ABSTRACT_CLASS)
case _: PsiClass if refElement.isInstanceOf[ScStableCodeReferenceElement] =>
annotation.setTextAttributes(DefaultHighlighter.CLASS)
case _: PsiClass if refElement.isInstanceOf[ScReferenceExpression] =>
annotation.setTextAttributes(DefaultHighlighter.OBJECT)
case x: ScBindingPattern =>
val parent = x.nameContext
parent match {
case r@(_: ScValue | _: ScVariable) =>
Option(x.containingClass).foreach(a => if (SCALA_PREDEFINED_OBJECTS.contains(a.qualifiedName)) {
x.getType(TypingContext.empty).foreach(annotateCollectionByType(_))
})
getParentByStub(parent) match {
case _: ScTemplateBody | _: ScEarlyDefinitions =>
r match {
case mod: ScModifierListOwner if mod.hasModifierProperty("lazy") =>
annotation.setTextAttributes(DefaultHighlighter.LAZY)
case _: ScValue => annotation.setTextAttributes(DefaultHighlighter.VALUES)
case _: ScVariable => annotation.setTextAttributes(DefaultHighlighter.VARIABLES)
case _ =>
}
case _ =>
r match {
case mod: ScModifierListOwner if mod.hasModifierProperty("lazy") =>
annotation.setTextAttributes(DefaultHighlighter.LOCAL_LAZY)
case _: ScValue => annotation.setTextAttributes(DefaultHighlighter.LOCAL_VALUES)
case _: ScVariable => annotation.setTextAttributes(DefaultHighlighter.LOCAL_VARIABLES)
case _ =>
}
}
case _: ScCaseClause =>
annotation.setTextAttributes(DefaultHighlighter.PATTERN)
case _ =>
}
case x: PsiField =>
if (!x.hasModifierProperty("final")) annotation.setTextAttributes(DefaultHighlighter.VARIABLES)
else annotation.setTextAttributes(DefaultHighlighter.VALUES)
case x: ScParameter =>
annotation.setTextAttributes(DefaultHighlighter.PARAMETER)
case x@(_: ScFunctionDefinition | _: ScFunctionDeclaration | _: ScMacroDefinition) =>
if (SCALA_FACTORY_METHODS_NAMES.contains(x.asInstanceOf[PsiMethod].getName) || x.asInstanceOf[PsiMethod].isConstructor) {
val clazz = PsiTreeUtil.getParentOfType(x, classOf[PsiClass])
if (clazz != null) {
annotateCollection(clazz)
}
}
if (x != null) {
val fun = x.asInstanceOf[ScFunction]
val clazz = fun.containingClass
clazz match {
case o: ScObject if o.objectSyntheticMembers.contains(fun) =>
annotation.setTextAttributes(DefaultHighlighter.OBJECT_METHOD_CALL)
return
case _ =>
}
getParentByStub(x) match {
case _: ScTemplateBody | _: ScEarlyDefinitions =>
getParentByStub(getParentByStub(getParentByStub(x))) match {
case _: ScClass | _: ScTrait =>
annotation.setTextAttributes(DefaultHighlighter.METHOD_CALL)
case _: ScObject =>
annotation.setTextAttributes(DefaultHighlighter.OBJECT_METHOD_CALL)
case _ =>
}
case _ =>
annotation.setTextAttributes(DefaultHighlighter.LOCAL_METHOD_CALL)
}
}
case x: PsiMethod =>
if (x.isConstructor) {
annotateCollection(PsiTreeUtil.getParentOfType(x, classOf[PsiClass]))
}
if (x.getModifierList != null && x.getModifierList.hasModifierProperty("static")) {
annotation.setTextAttributes(DefaultHighlighter.OBJECT_METHOD_CALL)
} else {
annotation.setTextAttributes(DefaultHighlighter.METHOD_CALL)
}
case x => //println("" + x + " " + x.getText)
}
}
def highlightElement(element: PsiElement, holder: AnnotationHolder) {
element match {
case x: ScAnnotation => visitAnnotation(x, holder)
case x: ScParameter => visitParameter(x, holder)
case x: ScCaseClause => visitCaseClause(x, holder)
case x: ScTypeAlias => visitTypeAlias(x, holder)
case _ if element.getNode.getElementType == ScalaTokenTypes.tIDENTIFIER =>
getParentByStub(element) match {
case _: ScNameValuePair =>
val annotation = holder.createInfoAnnotation(element, null)
annotation.setTextAttributes(DefaultHighlighter.ANNOTATION_ATTRIBUTE)
case _: ScTypeParam =>
val annotation = holder.createInfoAnnotation(element, null)
annotation.setTextAttributes(DefaultHighlighter.TYPEPARAM)
case clazz: ScClass =>
if (clazz.getModifierList.has(ScalaTokenTypes.kABSTRACT)) {
val annotation = holder.createInfoAnnotation(clazz.nameId, null)
annotation.setTextAttributes(DefaultHighlighter.ABSTRACT_CLASS)
} else {
val annotation = holder.createInfoAnnotation(clazz.nameId, null)
annotation.setTextAttributes(DefaultHighlighter.CLASS)
}
case _: ScObject =>
val annotation = holder.createInfoAnnotation(element, null)
annotation.setTextAttributes(DefaultHighlighter.OBJECT)
case _: ScTrait =>
val annotation = holder.createInfoAnnotation(element, null)
annotation.setTextAttributes(DefaultHighlighter.TRAIT)
case x: ScBindingPattern =>
var parent: PsiElement = x
while (parent != null && !(parent.isInstanceOf[ScValue] || parent.isInstanceOf[ScVariable]))
parent = getParentByStub(parent)
parent match {
case r@(_: ScValue | _: ScVariable) =>
getParentByStub(parent) match {
case _: ScTemplateBody | _: ScEarlyDefinitions =>
val annotation = holder.createInfoAnnotation(element, null)
r match {
case mod: ScModifierListOwner if mod.hasModifierProperty("lazy") =>
annotation.setTextAttributes(DefaultHighlighter.LAZY)
case _: ScValue => annotation.setTextAttributes(DefaultHighlighter.VALUES)
case _: ScVariable => annotation.setTextAttributes(DefaultHighlighter.VARIABLES)
case _ =>
}
case _ =>
val annotation = holder.createInfoAnnotation(element, null)
r match {
case mod: ScModifierListOwner if mod.hasModifierProperty("lazy") =>
annotation.setTextAttributes(DefaultHighlighter.LOCAL_LAZY)
case _: ScValue => annotation.setTextAttributes(DefaultHighlighter.LOCAL_VALUES)
case _: ScVariable => annotation.setTextAttributes(DefaultHighlighter.LOCAL_VARIABLES)
case _ =>
}
}
case _ =>
}
case _: ScFunctionDefinition | _: ScFunctionDeclaration =>
val annotation = holder.createInfoAnnotation(element, null)
annotation.setTextAttributes(DefaultHighlighter.METHOD_DECLARATION)
case _ =>
}
case _ =>
}
}
private def visitAnnotation(annotation: ScAnnotation, holder: AnnotationHolder): Unit = {
val annotation1 = holder.createInfoAnnotation(annotation.getFirstChild, null)
annotation1.setTextAttributes(DefaultHighlighter.ANNOTATION)
val element = annotation.annotationExpr.constr.typeElement
val annotation2 = holder.createInfoAnnotation(element, null)
annotation2.setTextAttributes(DefaultHighlighter.ANNOTATION)
}
private def visitTypeAlias(typeAlias: ScTypeAlias, holder: AnnotationHolder): Unit = {
val annotation = holder.createInfoAnnotation(typeAlias.nameId, null)
annotation.setTextAttributes(DefaultHighlighter.TYPE_ALIAS)
}
private def visitClass(clazz: ScClass, holder: AnnotationHolder): Unit = {
if (clazz.getModifierList.has(ScalaTokenTypes.kABSTRACT)) {
val annotation = holder.createInfoAnnotation(clazz.nameId, null)
annotation.setTextAttributes(DefaultHighlighter.ABSTRACT_CLASS)
} else {
val annotation = holder.createInfoAnnotation(clazz.nameId, null)
annotation.setTextAttributes(DefaultHighlighter.CLASS)
}
}
private def visitParameter(param: ScParameter, holder: AnnotationHolder): Unit = {
val annotation = holder.createInfoAnnotation(param.nameId, null)
annotation.setTextAttributes(DefaultHighlighter.PARAMETER)
}
private def visitPattern(pattern: ScPattern, holder: AnnotationHolder): Unit = {
for (binding <- pattern.bindings if !binding.isWildcard) {
val annotation = holder.createInfoAnnotation(binding.nameId, null)
annotation.setTextAttributes(DefaultHighlighter.PATTERN)
}
}
private def visitCaseClause(clause: ScCaseClause, holder: AnnotationHolder): Unit = {
clause.pattern match {
case Some(x) => visitPattern(x, holder)
case None =>
}
}
private def referenceIsToCompanionObjectOfClass(r: ScReferenceElement): Boolean = {
Option(r.getContext) exists {
case _: ScMethodCall | _: ScReferenceExpression => true // These references to 'Foo' should be 'object' references: case class Foo(a: Int); Foo(1); Foo.apply(1).
case _ => false
}
}
}
| consulo/consulo-scala | src/org/jetbrains/plugins/scala/highlighter/AnnotatorHighlighter.scala | Scala | apache-2.0 | 17,261 |
package com.sksamuel.elastic4s.analyzers
import org.scalatest.{Matchers, WordSpec}
class KeywordTokenizerTest extends WordSpec with TokenizerDsl with Matchers {
"KeywordTokenizer builder" should {
"set buffer size" in {
keywordTokenizer("testy").bufferSize(123).json.string shouldBe """{"type":"keyword","bufferSize":123}"""
}
}
}
| k4200/elastic4s | elastic4s-core-tests/src/test/scala/com/sksamuel/elastic4s/analyzers/KeywordTokenizerTest.scala | Scala | apache-2.0 | 352 |
package com.lynbrookrobotics.potassium
import squants.Time
import squants.time.{Milliseconds, Seconds}
import org.scalatest.FunSuite
class ClockMockingTest extends FunSuite {
test("Single execution of periodic statement") {
var periodicRun: Boolean = false
val (mockedClock, trigger) = ClockMocking.mockedClockTicker
mockedClock(Milliseconds(5)) { _ =>
periodicRun = true
}
trigger(Milliseconds(5))
assert(periodicRun)
}
test("Cancelled periodic statement does not run") {
var periodicRun: Boolean = false
val (mockedClock, trigger) = ClockMocking.mockedClockTicker
val cancel = mockedClock(Milliseconds(5)) { _ =>
periodicRun = true
}
cancel()
trigger(Milliseconds(5))
assert(!periodicRun)
}
test("Single tick produces correct dt") {
val (mockedClock, trigger) = ClockMocking.mockedClockTicker
var dtProduced: Option[Time] = None
mockedClock(Milliseconds(5)) { dt =>
dtProduced = Some(dt)
}
trigger(Milliseconds(5))
assert(dtProduced.contains(Milliseconds(5)))
}
test("Single execution is correctly executed") {
val (mockedClock, trigger) = ClockMocking.mockedClockTicker
var executed = false
mockedClock.singleExecution(Milliseconds(5)) {
executed = true
}
trigger(Milliseconds(1))
assert(!executed)
trigger(Milliseconds(5))
assert(executed)
}
test("Single execution can be canceled correctly") {
val (mockedClock, trigger) = ClockMocking.mockedClockTicker
var executed = false
val cancelExecution = mockedClock.singleExecution(Milliseconds(5)) {
executed = true
}
trigger(Milliseconds(1))
assert(!executed)
cancelExecution()
trigger(Milliseconds(5))
assert(!executed)
}
test("Periodic update does in fact update periodically") {
val (mockedClock, trigger) = ClockMocking.mockedClockTicker
var executionCounts = 0
mockedClock(Seconds(1)) { _ =>
executionCounts = executionCounts + 1
}
trigger(Seconds(1))
assert(executionCounts == 1)
trigger(Seconds(1))
assert(executionCounts == 2)
}
test("Single execution still functions when clock update don't exactly coincide with scheduled time") {
val (mockedClock, trigger) = ClockMocking.mockedClockTicker
var executed = false
mockedClock.singleExecution(Seconds(10)) {
executed = true
}
trigger(Seconds(5))
assert(!executed)
trigger(Seconds(6))
// At time 11 seconds, thunk scheduled for 10 seconds should be executed
assert(executed)
}
test("Periodic update still functions when clock update don't exactly coincide with scheduled time") {
val (mockedClock, trigger) = ClockMocking.mockedClockTicker
var executed = false
mockedClock(Seconds(1)) { _ =>
executed = true
}
trigger(Seconds(0.5))
assert(!executed)
trigger(Seconds(0.6))
// At time 1.1 seconds, thunk scheduled for 1 seconds should be executed
assert(executed)
executed = false
trigger(Seconds(0.4))
// At time 1.5 seconds, thunk scheduled for 2 seconds shouldn't be executed
assert(!executed)
trigger(Seconds(0.6))
// At time 2.1 seconds, thunk scheduled for 2 seconds should be executed
assert(executed)
}
}
| Team846/potassium | core/shared/src/test/scala/com/lynbrookrobotics/potassium/ClockMockingTest.scala | Scala | mit | 3,337 |
/*
active-learning-scala: Active Learning library for Scala
Copyright (c) 2014 Davi Pereira dos Santos
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package al.strategies
import ml.Pattern
import ml.models.Model
trait MarginMeasure {
protected def margin0(a: Array[Double]) = {
val rank = a.sorted.reverse
rank(0) - rank(1)
}
protected def margin(current_model: Model)(pa: Pattern) = margin0(current_model.distribution(pa))
}
| active-learning/active-learning-scala | src/main/scala/al/strategies/MarginMeasure.scala | Scala | gpl-2.0 | 1,054 |
package beam.router.gtfs
import java.io._
import java.nio.file.{Files, Path, Paths}
import java.util.zip.ZipFile
import beam.router.gtfs.FareCalculator._
import beam.sim.config.BeamConfig
import com.conveyal.gtfs.GTFSFeed
import javax.inject.Inject
class FareCalculator @Inject()(beamConfig: BeamConfig) {
private val dataDirectory: Path = Paths.get(beamConfig.beam.routing.r5.directory)
private val cacheFile: File = dataDirectory.resolve("fares.dat").toFile
/**
* agencies is a Map of FareRule by agencyId
*/
val agencies: Map[String, Vector[BeamFareRule]] = loadBeamFares
private def loadBeamFares = {
if (cacheFile.exists()) {
new ObjectInputStream(new FileInputStream(cacheFile))
.readObject()
.asInstanceOf[Map[String, Vector[BeamFareRule]]]
} else {
val agencies = fromDirectory(dataDirectory)
val stream = new ObjectOutputStream(new FileOutputStream(cacheFile))
stream.writeObject(agencies)
stream.close()
agencies
}
}
/**
* Use to initialize the calculator by loading GTFS feeds and populates agencies map.
*
* @param directory Path of the directory that contains gtfs files to load
*/
private def fromDirectory(directory: Path): Map[String, Vector[BeamFareRule]] = {
var agencies: Map[String, Vector[BeamFareRule]] = Map()
/**
* Checks whether its a valid gtfs feed and has fares data.
*
*/
val hasFares: FileFilter = file => {
var isFareExist = false
if (file.getName.endsWith(".zip")) {
try {
val zip = new ZipFile(file)
isFareExist = zip.getEntry("fare_attributes.txt") != null
zip.close()
} catch {
case _: Throwable => // do nothing
}
}
isFareExist
}
/**
* Takes GTFSFeed and loads agencies map with fare and its rules.
*
* @param feed GTFSFeed
*/
def loadFares(feed: GTFSFeed): Unit = {
var fares: Map[String, BeamFare] = Map()
var routes: Map[String, Vector[BeamFareRule]] = Map()
var agencyRules: Vector[BeamFareRule] = Vector()
val agencyId = feed.agency.values().stream().findFirst().get().agency_id
feed.fares.forEach((id, fare) => {
val attr = fare.fare_attribute
fares += (id -> BeamFare(
attr.fare_id,
attr.price,
attr.currency_type,
attr.payment_method,
if (attr.transfer_duration > 0 && attr.transfers == 0) Int.MaxValue else attr.transfers,
attr.transfer_duration
))
fare.fare_rules.forEach(r => {
val rule: BeamFareRule = BeamFareRule(
fares.get(r.fare_id).head,
agencyId,
r.route_id,
r.origin_id,
r.destination_id,
r.contains_id
)
if (r.route_id == null) {
agencyRules = agencyRules :+ rule
} else {
var rules = routes.getOrElse(r.route_id, Vector())
rules = rules :+ rule
routes += (r.route_id -> rules)
}
})
})
feed.agency.forEach((id, _) => {
feed.routes
.values()
.stream()
.filter(_.agency_id == id)
.forEach(route => {
agencyRules ++= routes.getOrElse(route.route_id, Vector())
})
agencies += id -> agencyRules
})
}
if (Files.isDirectory(directory)) {
directory.toFile
.listFiles(hasFares)
.map(_.getAbsolutePath)
.foreach(p => {
val feed = GTFSFeed.fromFile(p)
loadFares(feed)
feed.close()
})
}
agencies
}
def getFareSegments(
agencyId: String,
routeId: String,
fromId: String,
toId: String,
containsIds: Set[String] = null
): Vector[BeamFareSegment] = {
val _containsIds =
if (containsIds == null || containsIds.isEmpty) Set(fromId, toId) else containsIds
val rules = agencies.getOrElse(agencyId, Vector()).partition(_.containsId == null)
(rules._1.filter(baseRule(_, routeId, fromId, toId)) ++
rules._2.groupBy(_.fare).view.filter(containsRule(_, routeId, _containsIds)).map(_._2.last))
.map(f => BeamFareSegment(f.fare, agencyId))
}
def calcFare(
agencyId: String,
routeId: String,
fromId: String,
toId: String,
containsIds: Set[String] = null
): Double = {
sumFares(getFareSegments(agencyId, routeId, fromId, toId, containsIds))
}
}
object FareCalculator {
/**
* A FareAttribute (defined in fare_attributes.txt) defines a fare class. A FareAttribute has a price,
* currency and whether it must be purchased on board the service or before boarding.
* It also defines the number of transfers it can be used for, and the duration it is valid.
*
* @param fareId Contains an ID that uniquely identifies a fare class. The fare_id is dataset unique. Its a required attribute.
* @param price Contains the fare price, in the unit specified by currency_type. Its a required attribute.
* @param currencyType Defines the currency used to pay the fare. Its a required attribute.
* @param paymentMethod The payment_method field indicates when the fare must be paid. Its a required attribute. Valid values for this field are:
* 0: Fare is paid on board.
* 1: Fare must be paid before boarding.
* @param transfers Specifies the number of transfers permitted on this fare. Its a required attribute. Valid values for this field are:
* 0: No transfers permitted on this fare.
* 1: Passenger may transfer once.
* 2: Passenger may transfer twice.
* Int.MaxValue/(empty in gtfs): If this field is empty, unlimited transfers are permitted.
* @param transferDuration Specifies the length of time in seconds before a transfer expires.
*/
case class BeamFare(
fareId: String,
price: Double,
currencyType: String,
paymentMethod: Int,
transfers: Int,
transferDuration: Int
)
/**
* The FareRule lets you specify how fares in fare_attributes.txt apply to an itinerary.
* Most fare structures use some combination of the following rules:
* Fare depends on origin or destination stations.
* Fare depends on which zones the itinerary passes through.
* Fare depends on which route the itinerary uses.
*
* @param fare Contains a fare object from fare_attributes.
* @param agencyId Defines an agency for the specified route. This value is referenced from the agency.txt file.
* @param routeId Associates the fare ID with a route. Route IDs are referenced from the routes.txt file.
* @param originId Associates the fare ID with an origin zone ID (referenced from the stops.txt file).
* @param destinationId Associates the fare ID with a destination zone ID (referenced from the stops.txt file).
* @param containsId Associates the fare ID with a zone ID (referenced from the stops.txt file.
* The fare ID is then associated with itineraries that pass through every contains_id zone.
*/
case class BeamFareRule(
fare: BeamFare,
agencyId: String,
routeId: String,
originId: String,
destinationId: String,
containsId: String
)
/**
*
* @param fare Contains a fare object from fare_attributes.
* @param agencyId Defines an agency for the specified route. This value is referenced from the agency.txt file.
* @param patternIndex Represents the pattern index from TransitJournyID to locate SegmentPattern from a specific TransitSegment
* @param segmentDuration Defines the leg duration from start of itinerary to end of segment leg
*/
case class BeamFareSegment(
fare: BeamFare,
agencyId: String,
patternIndex: Int,
segmentDuration: Long
)
object BeamFareSegment {
def apply(fare: BeamFare, agencyId: String): BeamFareSegment =
new BeamFareSegment(fare, agencyId, 0, 0)
def apply(
fareSegment: BeamFareSegment,
patternIndex: Int,
segmentDuration: Long
): BeamFareSegment =
new BeamFareSegment(fareSegment.fare, fareSegment.agencyId, patternIndex, segmentDuration)
def apply(fareSegment: BeamFareSegment, segmentDuration: Long): BeamFareSegment =
new BeamFareSegment(
fareSegment.fare,
fareSegment.agencyId,
fareSegment.patternIndex,
segmentDuration
)
}
// lazy val containRules = agencies.map(a => a._1 -> a._2.filter(r => r.containsId != null).groupBy(_.fare))
// Fare depends on which route the itinerary uses AND Fare depends on origin or destination stations
// BUT Fare depends on which zones the itinerary passes through, is group rule and apply separately
private def baseRule(r: BeamFareRule, routeId: String, fromId: String, toId: String): Boolean =
(r.routeId == routeId || r.routeId == null) &&
(r.originId == fromId || r.originId == null) &&
(r.destinationId == toId || r.destinationId == null)
//Fare depends on which zones the itinerary passes through
private def containsRule(
t: (BeamFare, Vector[BeamFareRule]),
routeId: String,
containsIds: Set[String]
) =
t._2.view.map(_.routeId).distinct.forall(id => id == routeId || id == null) &&
t._2.view.map(_.containsId).toSet.equals(containsIds)
/**
* Take an itinerary specific collection of @BeamFareSegment and apply transfer rules
* across segment fares based of GTFS specs (https://developers.google.com/transit/gtfs/reference/#fare_attributestxt)
*
* @param fareSegments collection of all @BeamFareSegment for a specific itinerary
* @return collection of @BeamFareSegment for an itinerary after applying transfer rules
*/
def filterFaresOnTransfers(
fareSegments: IndexedSeq[BeamFareSegment]
): IndexedSeq[BeamFareSegment] = {
/**
* Apply filter on fare segments, agency by agency in order
*
* @param fareSegments collection of all @BeamFareSegment for a specific itinerary
* @return a resultant collection of @BeamFareSegment
*/
def groupFaresByAgencyAndProceed(
fareSegments: IndexedSeq[BeamFareSegment]
): IndexedSeq[BeamFareSegment] = {
if (fareSegments.isEmpty)
Vector()
else {
val agencyRules = fareSegments.span(_.agencyId == fareSegments.head.agencyId)
// for first agency fare/rules start filter iteration
// and for rest of fares continue grouping and processing
iterateTransfers(agencyRules._1) ++ groupFaresByAgencyAndProceed(agencyRules._2)
}
}
/**
* A helper method to iterate different parts of fare segment collection
*
* @param fareSegments collection of @BeamFareSegment to apply transfer rule/filter
* @param trans transfer number under processing
* @return processed collection of @BeamFareSegment
*/
def iterateTransfers(
fareSegments: IndexedSeq[BeamFareSegment],
trans: Int = 0
): IndexedSeq[BeamFareSegment] = {
/**
* Generate a next transfer number /option
*
* 0 - No transfers permitted on this fare.
* 1 - Passenger may transfer once.
* 2 - Passenger may transfer twice.
* (empty) - If this field is empty, unlimited transfers are permitted
* Int.MaxValue is used to represent empty
*
* @return next transfer option
*/
def next: Int =
if (trans == Int.MaxValue) 0
else
trans match {
case 0 | 1 => trans + 1
case 2 => Int.MaxValue
case _ => 0
}
/**
* Apply transfer rules on fare segments
* @param lhs takes fare segments
* @return
*/
def applyTransferRules(lhs: IndexedSeq[BeamFareSegment]): IndexedSeq[BeamFareSegment] = {
// when permitted transfers are 0, then return as is
// otherwise take the first segment and reiterate for the rest
// having higher segment duration from permitted transfer duration
// or transfer limit exceeded
trans match {
case 0 => lhs
case _ =>
Vector(lhs.head) ++ iterateTransfers(
lhs.view.tail.zipWithIndex
.filter(
fst => fst._1.segmentDuration > lhs.head.fare.transferDuration || fst._2 > trans
)
.map(s => BeamFareSegment(s._1, s._1.segmentDuration - lhs.head.segmentDuration))
.toVector
)
}
}
// separate fare segments with current transfer number as lhs then apply transfer rules
// and reiterate for rest of the fare segments (rhs) with next iteration number
fareSegments.span(_.fare.transfers == trans) match {
case (IndexedSeq(), IndexedSeq()) => Vector()
case (IndexedSeq(), rhs) => iterateTransfers(rhs, next)
case (lhs, IndexedSeq()) => applyTransferRules(lhs)
case (lhs, rhs) => applyTransferRules(lhs) ++ iterateTransfers(rhs, next)
}
}
groupFaresByAgencyAndProceed(fareSegments)
}
def sumFares(rules: Vector[BeamFareSegment]): Double = {
filterFaresOnTransfers(rules).view.map(_.fare.price).sum
}
}
| colinsheppard/beam | src/main/scala/beam/router/gtfs/FareCalculator.scala | Scala | gpl-3.0 | 13,538 |
package scala.macros
package object inputs extends inputs.Api
| xeno-by/scalamacros | core/src/main/scala/scala/macros/inputs/package.scala | Scala | bsd-3-clause | 63 |
package io.aos.ebnf.spl.driver.es
import org.elasticsearch.common.xcontent.XContentFactory
import org.elasticsearch.search.builder.SearchSourceBuilder
import io.aos.ebnf.spl.ast.SplQuery
import io.aos.ebnf.spl.driver.GeneratedQuery
/**
* Implementation of Elastic Search query object
*/
trait ElasticSearchQuery extends GeneratedQuery {
def searchSourceBuilder: SearchSourceBuilder
def indexName: String
def setResultLimit(limit: Int): Unit
def fields: Seq[String]
}
object ElasticSearchQuery {
val OtherTerm = "__other__"
val NullTerm = "__null__"
val TotalsTerm = "__totals__"
val ConversionsTerm = "__conversions__"
def apply(
searchSource: SearchSourceBuilder,
index: String,
fields: Seq[String],
ast: SplQuery,
returnRawResults: Boolean = false): ElasticSearchQuery = new ElasticSearchQueryImpl(searchSource, index, fields, ast, returnRawResults)
private class ElasticSearchQueryImpl(searchSource: SearchSourceBuilder, index: String, fieldList: Seq[String], ast: SplQuery, returnRaw: Boolean) extends ElasticSearchQuery {
override val searchSourceBuilder: SearchSourceBuilder = searchSource
override def setResultLimit(limit: Int): Unit = searchSource.size(limit)
override def queryAst: SplQuery = ast
override val indexName = index
override val fields = fieldList
override val returnRawResults = returnRaw
override def asString(): String = {
val jsonFactory = XContentFactory.jsonBuilder()
searchSource.toXContent(jsonFactory, null).string()
}
}
}
| echalkpad/t4f-data | parser/ebnf/src/main/scala/io/aos/ebnf/spl/driver/es/ElasticSearchQuery.scala | Scala | apache-2.0 | 1,559 |
package com.codechef.problems.holes
object Main {
def main(args: Array[String]): Unit = {
val holes = Map[Char, Int](
'A' -> 1, 'B' -> 2, 'C' -> 0, 'D' -> 1, 'E' -> 0, 'F' -> 0, 'G' -> 0, 'H' -> 0, 'I' -> 0,
'J' -> 0, 'K' -> 0, 'L' -> 0, 'M' -> 0, 'N' -> 0, 'O' -> 1, 'P' -> 1, 'Q' -> 1, 'R' -> 1,
'S' -> 0, 'T' -> 0, 'U' -> 0, 'V' -> 0, 'W' -> 0, 'X' -> 0, 'Y' -> 0, 'Z' -> 0)
import java.io._
val reader = new BufferedReader(new InputStreamReader(System.in))
val tests = reader.readLine.toInt
var i = 0
while(i < tests) {
//println((for(c <- reader.readLine) yield holes(c)).sum)
var j = 0
val str = reader.readLine
var count = 0
while(j < str.length) {
count += holes(str.charAt(j))
j += 1
}
println(count)
i += 1
}
}
} | pamu/CodeChef | src/main/scala/com/codechef/problems/holes/Main.scala | Scala | apache-2.0 | 843 |
package com.thecookiezen.business.containers.control
import java.time.LocalDateTime
import java.util.UUID
import akka.actor.{FSM, Props, Terminated}
import akka.http.scaladsl.Http
import akka.http.scaladsl.model.{HttpRequest, HttpResponse}
import akka.pattern.ask
import akka.routing.{ActorRefRoutee, RoundRobinRoutingLogic, Router}
import akka.stream.ActorMaterializer
import akka.util
import akka.util.Timeout
import com.thecookiezen.business.containers.control.Cluster._
import com.thecookiezen.business.containers.control.Deployment.DeployJob
import com.thecookiezen.business.containers.control.Host.ListContainers
import com.thecookiezen.integration.docker.DockerHost
import scala.concurrent.Future
import scala.concurrent.duration._
class Cluster(name: String, maxContainers: Int = 50) extends FSM[ClusterState, Data] {
import context.dispatcher
implicit val timeout: util.Timeout = Timeout(5 seconds)
implicit val materializer = ActorMaterializer()
val http = Http(context.system)
val router = Router(RoundRobinRoutingLogic())
startWith(Stopped, Uninitialized)
when(Stopped) {
case Event(StartCluster, Uninitialized) => goto(Empty) using HostsList(Seq.empty)
}
when(Empty) {
case Event(host: AddNewHost, HostsList(_)) => {
goto(NewHostInitialization) using HostsList(hosts = Seq(
HostIdentity(apiVersion = host.apiVersion, daemonUrl = host.daemonUrl)))
}
}
when(NewHostInitialization) {
case Event(NewHostInitialized(), _) => goto(Active)
}
when(Active) {
case Event(host: AddNewHost, h @ HostsList(hosts)) => {
goto(NewHostInitialization) using h.copy(hosts = hosts :+ HostIdentity(
apiVersion = host.apiVersion, daemonUrl = host.daemonUrl))
}
case Event(deployment @ DeployJob, HostsList(hosts)) if hosts.nonEmpty => {
router.route(deployment, self)
stay
}
}
onTransition {
case Stopped -> Empty => {
log.info("Cluster {} started...", name)
}
case _ -> NewHostInitialization => {
log.info("Cluster {} creating new actor", name)
val newHost = nextStateData match {
case HostsList(hosts) => hosts.find(host => context.child(host.id).isEmpty)
case _ => None
}
createNewHostAndRegisterAsRoute(newHost)
self ! NewHostInitialized()
}
case NewHostInitialization -> Active => {
log.info("Cluster {} get new host, hosts: {}", name, nextStateData.asInstanceOf[HostsList].hosts)
}
}
private def createNewHostAndRegisterAsRoute(newHost: Option[HostIdentity]) = {
newHost match {
case Some(host) =>
val child = context.actorOf(getProperties(host, (req: HttpRequest) => http.singleRequest(req)), host.id)
context.watch(child)
router.addRoutee(ActorRefRoutee(child))
}
}
private def getProperties(host: HostIdentity, http: HttpRequest => Future[HttpResponse]): Props = {
Props(classOf[DockerHost], host.apiVersion, host.daemonUrl, http, dispatcher, materializer)
}
whenUnhandled {
case Event(Terminated(child), h @ HostsList(hosts)) => {
log.info("Cluster {}: child {} was terminated", name, child.path.name)
router.removeRoutee(child)
stay using HostsList(hosts.filterNot(host => host.id == child.path.name))
}
case Event(SizeOfCluster(), HostsList(hosts)) => stay replying hosts.size
case Event(label @ ListContainers(_), _) => stay replying Future.sequence(context.children.map(child => child ? label.copy()))
case Event(ListHosts(), HostsList(hosts)) => stay replying hosts.map(_.id)
case Event(GetHost(id), _) => stay replying context.child(id).map(actor => actor.path.name).getOrElse("There is no actor with specified id.")
case Event(e, s) =>
log.warning("received unhandled request {} in state {}/{}", e, stateName, s)
stay
}
initialize()
}
object Cluster {
sealed trait ClusterState
case object Empty extends ClusterState
case object Stopped extends ClusterState
case object Active extends ClusterState
case object NewHostInitialization extends ClusterState
sealed trait Data
case object Uninitialized extends Data
case class HostsList(hosts: Seq[HostIdentity]) extends Data
final case class StartCluster()
final case class SizeOfCluster()
final case class ListHosts()
final case class GetHost(id: String)
final case class AddNewHost(apiVersion: String, daemonUrl: String, created: LocalDateTime = LocalDateTime.now())
final case class HostIdentity(id: String = UUID.randomUUID().toString, apiVersion: String, daemonUrl: String)
final case class NewHostInitialized()
}
object Deployment {
final case class DeployJob()
}
| nikom1337/ClusterActor | src/main/scala/com/thecookiezen/business/containers/control/Cluster.scala | Scala | apache-2.0 | 4,680 |
/*
* Copyright 2013-2015 Websudos, Limited.
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* - Explicit consent must be obtained from the copyright owner, Outworkers Limited before any redistribution is made.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
package com.websudos.phantom.thrift.suites
import com.websudos.phantom.dsl._
import com.websudos.phantom.finagle._
import com.websudos.phantom.tables.ThriftDatabase
import com.websudos.util.testing._
import org.scalatest.FlatSpec
import org.scalatest.time.SpanSugar._
class ThriftMapColumnTest extends FlatSpec with ThriftTestSuite {
override def beforeAll(): Unit = {
super.beforeAll()
ThriftDatabase.thriftColumnTable.create.ifNotExists().future().block(5.seconds)
}
it should "put an item to a thrift map column" in {
val id = gen[UUID]
val sample = gen[ThriftTest]
val sample2 = gen[ThriftTest]
val map = Map(gen[String] -> sample)
val toAdd = gen[String] -> sample2
val expected = map + toAdd
val insert = ThriftDatabase.thriftColumnTable.insert
.value(_.id, id)
.value(_.name, sample.name)
.value(_.ref, sample)
.value(_.thriftSet, Set(sample))
.value(_.thriftList, List(sample))
.value(_.thriftMap, map)
.future()
val operation = for {
insertDone <- insert
update <- ThriftDatabase.thriftColumnTable.update.where(_.id eqs id).modify(_.thriftMap put toAdd).future()
select <- ThriftDatabase.thriftColumnTable.select(_.thriftMap).where(_.id eqs id).one
} yield {
select
}
operation.successful {
items => {
items shouldBe defined
items.value shouldBe expected
}
}
}
it should "put an item to a thrift map column with Twitter Futures" in {
val id = gen[UUID]
val sample = gen[ThriftTest]
val sample2 = gen[ThriftTest]
val map = Map(gen[String] -> sample)
val toAdd = gen[String] -> sample2
val expected = map + toAdd
val insert = ThriftDatabase.thriftColumnTable.insert
.value(_.id, id)
.value(_.name, sample.name)
.value(_.ref, sample)
.value(_.thriftSet, Set(sample))
.value(_.thriftList, List(sample))
.value(_.thriftMap, map)
.execute()
val operation = for {
insertDone <- insert
update <- ThriftDatabase.thriftColumnTable.update.where(_.id eqs id).modify(_.thriftMap put toAdd).execute()
select <- ThriftDatabase.thriftColumnTable.select(_.thriftMap).where(_.id eqs id).get
} yield select
operation.successful {
items => {
items shouldBe defined
items.value shouldBe expected
}
}
}
it should "put several items to a thrift map column" in {
val id = gen[UUID]
val sample = gen[ThriftTest]
val sample2 = gen[ThriftTest]
val sample3 = gen[ThriftTest]
val map = Map(gen[String] -> sample)
val toAdd = Map(gen[String] -> sample2, gen[String] -> sample3)
val expected = map ++ toAdd
val insert = ThriftDatabase.thriftColumnTable.insert
.value(_.id, id)
.value(_.name, sample.name)
.value(_.ref, sample)
.value(_.thriftSet, Set(sample))
.value(_.thriftList, List(sample))
.value(_.thriftMap, map)
.future()
val operation = for {
insertDone <- insert
update <- ThriftDatabase.thriftColumnTable.update.where(_.id eqs id).modify(_.thriftMap putAll toAdd).future()
select <- ThriftDatabase.thriftColumnTable.select(_.thriftMap).where(_.id eqs id).one
} yield {
select
}
operation.successful {
items => {
items shouldBe defined
items.value shouldBe expected
}
}
}
it should "put several items to a thrift map column with Twitter Futures" in {
val id = gen[UUID]
val sample = gen[ThriftTest]
val sample2 = gen[ThriftTest]
val sample3 = gen[ThriftTest]
val map = Map(gen[String] -> sample)
val toAdd = Map(gen[String] -> sample2, gen[String] -> sample3)
val expected = map ++ toAdd
val insert = ThriftDatabase.thriftColumnTable.insert
.value(_.id, id)
.value(_.name, sample.name)
.value(_.ref, sample)
.value(_.thriftSet, Set(sample))
.value(_.thriftList, List(sample))
.value(_.thriftMap, map)
.execute()
val operation = for {
insertDone <- insert
update <- ThriftDatabase.thriftColumnTable.update.where(_.id eqs id).modify(_.thriftMap putAll toAdd).execute()
select <- ThriftDatabase.thriftColumnTable.select(_.thriftMap).where(_.id eqs id).get
} yield select
operation.successful {
items => {
items shouldBe defined
items.value shouldBe expected
}
}
}
}
| levinson/phantom | phantom-thrift/src/test/scala/com/websudos/phantom/thrift/suites/ThriftMapColumnTest.scala | Scala | bsd-2-clause | 5,970 |
package org.scalameter
package collections
import collection._
import Key._
class SeqBenchmarks extends PerformanceTest.Regression with Collections {
def persistor = new persistence.SerializationPersistor()
/* tests */
performance of "Seq" in {
measure method "apply" config (
exec.benchRuns -> 36,
exec.independentSamples -> 9,
reports.regression.significance -> 1e-13
) in {
val from = 100000
val to = 1000000
val by = 200000
var sideeffect = 0
using(arrays(from, to, by)) curve("Array") in { xs =>
var i = 0
var sum = 0
val len = xs.length
val until = len * 3
while (i < until) {
sum += xs.apply(i % len)
i += 1
}
sideeffect = sum
}
using(arraybuffers(from, to, by)) curve("ArrayBuffer") in { xs =>
var i = 0
var sum = 0
val len = xs.length
val until = len * 3
while (i < until) {
sum += xs.apply(i % len)
i += 1
}
sideeffect = sum
}
using(vectors(from, to, by)) curve("Vector") in { xs =>
var i = 0
var sum = 0
val len = xs.length
val until = len * 3
while (i < until) {
sum += xs.apply(i % len)
i += 1
}
sideeffect = sum
}
using(ranges(from, to, by)) curve("Range") in { xs =>
var i = 0
var sum = 0
val len = xs.length
val until = len * 3
while (i < until) {
sum += xs.apply(i % len)
i += 1
}
sideeffect = sum
}
}
measure method "update" config (
exec.benchRuns -> 36,
exec.independentSamples -> 9,
reports.regression.significance -> 1e-13
) in {
val from = 100000
val to = 1000000
val by = 200000
var sideeffect = 0
using(arrays(from, to, by)) curve("Array") in { xs =>
var i = 0
var sum = 0
val len = xs.length
val until = len * 3
while (i < until) {
xs.update(i % len, i)
i += 1
}
sideeffect = sum
}
using(arraybuffers(from, to, by)) curve("ArrayBuffer") in { xs =>
var i = 0
var sum = 0
val len = xs.length
val until = len * 3
while (i < until) {
xs.update(i % len, i)
i += 1
}
sideeffect = sum
}
}
measure method "append" config (
exec.benchRuns -> 36,
exec.independentSamples -> 9,
reports.regression.significance -> 1e-13
) in {
val from = 50000
val to = 500000
val by = 100000
using(sizes(from, to, by)) curve("Vector") config (
exec.benchRuns -> 32,
exec.independentSamples -> 4,
exec.outliers.suspectPercent -> 66,
exec.outliers.covMultiplier -> 1.4,
exec.noise.magnitude -> 1.0
) in { len =>
var i = 0
var vector = Vector.empty[Int]
while (i < len) {
vector = vector :+ i
i += 1
}
}
}
measure method "prepend" config (
exec.minWarmupRuns -> 20,
exec.benchRuns -> 36,
exec.independentSamples -> 9,
reports.regression.significance -> 1e-13
) in {
val from = 50000
val to = 1000000
val by = 200000
using(sizes(from, to, by)) curve("Vector") config (
exec.benchRuns -> 32,
exec.independentSamples -> 4,
exec.outliers.suspectPercent -> 66,
exec.outliers.covMultiplier -> 1.4,
exec.noise.magnitude -> 1.0
) in { len =>
var i = 0
var vector = Vector.empty[Int]
while (i < len) {
vector = i +: vector
i += 1
}
}
using(sizes(from, to, by)) curve("List") config (
exec.independentSamples -> 6,
exec.outliers.suspectPercent -> 60,
exec.outliers.covMultiplier -> 1.4,
exec.noise.magnitude -> 1.0
) in { len =>
var i = 0
var list = List.empty[Int]
while (i < len) {
list = i :: list
i += 1
}
}
}
measure method "sorted" config (
exec.minWarmupRuns -> 20,
exec.benchRuns -> 36,
exec.independentSamples -> 9,
reports.regression.significance -> 1e-13
) in {
val from = 400000
val to = 1000000
val by = 200000
using(arrays(from, to, by)) curve("Array") in {
_.sorted
}
using(vectors(from, to, by)) curve("Vector") in {
_.sorted
}
using(lists(from, to, by)) curve("List") config (
exec.benchRuns -> 32,
exec.independentSamples -> 4,
exec.outliers.suspectPercent -> 50,
exec.outliers.covMultiplier -> 1.6,
exec.noise.magnitude -> 1.0
) in {
_.sorted
}
}
}
}
| lossyrob/scalpel | src/test/scala/org/scalameter/collections/SeqBenchmarks.scala | Scala | bsd-3-clause | 4,933 |
package sbthadoop
import java.io.File
import sbt.PathFinder
object PathFinderUtils {
def getAllPaths(dir: File): Seq[File] = {
PathFinder(dir).***.get
}
}
| Tapad/sbt-hadoop-oss | src/main/scala-sbt-0.13/sbthadoop/PathFinderUtils.scala | Scala | bsd-3-clause | 166 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql
import org.scalatest.GivenWhenThen
import org.apache.spark.sql.catalyst.expressions.{DynamicPruningExpression, Expression}
import org.apache.spark.sql.catalyst.expressions.CodegenObjectFactoryMode._
import org.apache.spark.sql.catalyst.plans.ExistenceJoin
import org.apache.spark.sql.execution._
import org.apache.spark.sql.execution.adaptive._
import org.apache.spark.sql.execution.exchange.{BroadcastExchangeLike, ReusedExchangeExec}
import org.apache.spark.sql.execution.joins.BroadcastHashJoinExec
import org.apache.spark.sql.execution.streaming.{MemoryStream, StreamingQueryWrapper}
import org.apache.spark.sql.functions._
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.test.SharedSparkSession
/**
* Test suite for the filtering ratio policy used to trigger dynamic partition pruning (DPP).
*/
abstract class DynamicPartitionPruningSuiteBase
extends QueryTest
with SharedSparkSession
with GivenWhenThen
with AdaptiveSparkPlanHelper {
val tableFormat: String = "parquet"
import testImplicits._
override def beforeAll(): Unit = {
super.beforeAll()
val factData = Seq[(Int, Int, Int, Int)](
(1000, 1, 1, 10),
(1010, 2, 1, 10),
(1020, 2, 1, 10),
(1030, 3, 2, 10),
(1040, 3, 2, 50),
(1050, 3, 2, 50),
(1060, 3, 2, 50),
(1070, 4, 2, 10),
(1080, 4, 3, 20),
(1090, 4, 3, 10),
(1100, 4, 3, 10),
(1110, 5, 3, 10),
(1120, 6, 4, 10),
(1130, 7, 4, 50),
(1140, 8, 4, 50),
(1150, 9, 1, 20),
(1160, 10, 1, 20),
(1170, 11, 1, 30),
(1180, 12, 2, 20),
(1190, 13, 2, 20),
(1200, 14, 3, 40),
(1200, 15, 3, 70),
(1210, 16, 4, 10),
(1220, 17, 4, 20),
(1230, 18, 4, 20),
(1240, 19, 5, 40),
(1250, 20, 5, 40),
(1260, 21, 5, 40),
(1270, 22, 5, 50),
(1280, 23, 1, 50),
(1290, 24, 1, 50),
(1300, 25, 1, 50)
)
val storeData = Seq[(Int, String, String)](
(1, "North-Holland", "NL"),
(2, "South-Holland", "NL"),
(3, "Bavaria", "DE"),
(4, "California", "US"),
(5, "Texas", "US"),
(6, "Texas", "US")
)
val storeCode = Seq[(Int, Int)](
(1, 10),
(2, 20),
(3, 30),
(4, 40),
(5, 50),
(6, 60)
)
spark.range(1000)
.select($"id" as "product_id", ($"id" % 10) as "store_id", ($"id" + 1) as "code")
.write
.format(tableFormat)
.mode("overwrite")
.saveAsTable("product")
factData.toDF("date_id", "store_id", "product_id", "units_sold")
.write
.format(tableFormat)
.saveAsTable("fact_np")
factData.toDF("date_id", "store_id", "product_id", "units_sold")
.write
.partitionBy("store_id")
.format(tableFormat)
.saveAsTable("fact_sk")
factData.toDF("date_id", "store_id", "product_id", "units_sold")
.write
.partitionBy("store_id")
.format(tableFormat)
.saveAsTable("fact_stats")
storeData.toDF("store_id", "state_province", "country")
.write
.format(tableFormat)
.saveAsTable("dim_store")
storeData.toDF("store_id", "state_province", "country")
.write
.format(tableFormat)
.saveAsTable("dim_stats")
storeCode.toDF("store_id", "code")
.write
.partitionBy("store_id")
.format(tableFormat)
.saveAsTable("code_stats")
sql("ANALYZE TABLE fact_stats COMPUTE STATISTICS FOR COLUMNS store_id")
sql("ANALYZE TABLE dim_stats COMPUTE STATISTICS FOR COLUMNS store_id")
sql("ANALYZE TABLE code_stats COMPUTE STATISTICS FOR COLUMNS store_id")
}
override def afterAll(): Unit = {
try {
sql("DROP TABLE IF EXISTS fact_np")
sql("DROP TABLE IF EXISTS fact_sk")
sql("DROP TABLE IF EXISTS product")
sql("DROP TABLE IF EXISTS dim_store")
sql("DROP TABLE IF EXISTS fact_stats")
sql("DROP TABLE IF EXISTS dim_stats")
} finally {
spark.sessionState.conf.unsetConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED)
spark.sessionState.conf.unsetConf(SQLConf.ADAPTIVE_EXECUTION_FORCE_APPLY)
super.afterAll()
}
}
/**
* Check if the query plan has a partition pruning filter inserted as
* a subquery duplicate or as a custom broadcast exchange.
*/
def checkPartitionPruningPredicate(
df: DataFrame,
withSubquery: Boolean,
withBroadcast: Boolean): Unit = {
df.collect()
val plan = df.queryExecution.executedPlan
val dpExprs = collectDynamicPruningExpressions(plan)
val hasSubquery = dpExprs.exists {
case InSubqueryExec(_, _: SubqueryExec, _, _) => true
case _ => false
}
val subqueryBroadcast = dpExprs.collect {
case InSubqueryExec(_, b: SubqueryBroadcastExec, _, _) => b
}
val hasFilter = if (withSubquery) "Should" else "Shouldn't"
assert(hasSubquery == withSubquery,
s"$hasFilter trigger DPP with a subquery duplicate:\\n${df.queryExecution}")
val hasBroadcast = if (withBroadcast) "Should" else "Shouldn't"
assert(subqueryBroadcast.nonEmpty == withBroadcast,
s"$hasBroadcast trigger DPP with a reused broadcast exchange:\\n${df.queryExecution}")
subqueryBroadcast.foreach { s =>
s.child match {
case _: ReusedExchangeExec => // reuse check ok.
case BroadcastQueryStageExec(_, _: ReusedExchangeExec, _) => // reuse check ok.
case b: BroadcastExchangeLike =>
val hasReuse = plan.find {
case ReusedExchangeExec(_, e) => e eq b
case _ => false
}.isDefined
assert(hasReuse, s"$s\\nshould have been reused in\\n$plan")
case _ =>
fail(s"Invalid child node found in\\n$s")
}
}
val isMainQueryAdaptive = plan.isInstanceOf[AdaptiveSparkPlanExec]
subqueriesAll(plan).filterNot(subqueryBroadcast.contains).foreach { s =>
val subquery = s match {
case r: ReusedSubqueryExec => r.child
case o => o
}
assert(subquery.find(_.isInstanceOf[AdaptiveSparkPlanExec]).isDefined == isMainQueryAdaptive)
}
}
/**
* Check if the plan has the given number of distinct broadcast exchange subqueries.
*/
def checkDistinctSubqueries(df: DataFrame, n: Int): Unit = {
df.collect()
val buf = collectDynamicPruningExpressions(df.queryExecution.executedPlan).collect {
case InSubqueryExec(_, b: SubqueryBroadcastExec, _, _) =>
b.index
}
assert(buf.distinct.size == n)
}
/**
* Collect the children of all correctly pushed down dynamic pruning expressions in a spark plan.
*/
private def collectDynamicPruningExpressions(plan: SparkPlan): Seq[Expression] = {
flatMap(plan) {
case s: FileSourceScanExec => s.partitionFilters.collect {
case d: DynamicPruningExpression => d.child
}
case _ => Nil
}
}
/**
* Check if the plan contains unpushed dynamic pruning filters.
*/
def checkUnpushedFilters(df: DataFrame): Boolean = {
find(df.queryExecution.executedPlan) {
case FilterExec(condition, _) =>
splitConjunctivePredicates(condition).exists {
case _: DynamicPruningExpression => true
case _ => false
}
case _ => false
}.isDefined
}
/**
* Test the result of a simple join on mock-up tables
*/
test("simple inner join triggers DPP with mock-up tables",
DisableAdaptiveExecution("DPP in AQE must reuse broadcast")) {
withSQLConf(SQLConf.DYNAMIC_PARTITION_PRUNING_ENABLED.key -> "true",
SQLConf.DYNAMIC_PARTITION_PRUNING_REUSE_BROADCAST_ONLY.key -> "false",
SQLConf.EXCHANGE_REUSE_ENABLED.key -> "false") {
val df = sql(
"""
|SELECT f.date_id, f.store_id FROM fact_sk f
|JOIN dim_store s ON f.store_id = s.store_id AND s.country = 'NL'
""".stripMargin)
checkPartitionPruningPredicate(df, true, false)
checkAnswer(df, Row(1000, 1) :: Row(1010, 2) :: Row(1020, 2) :: Nil)
}
}
/**
* Test DPP is triggered by a self-join on a partitioned table
*/
test("self-join on a partitioned table should not trigger DPP") {
withSQLConf(SQLConf.DYNAMIC_PARTITION_PRUNING_ENABLED.key -> "true",
SQLConf.DYNAMIC_PARTITION_PRUNING_REUSE_BROADCAST_ONLY.key -> "false",
SQLConf.EXCHANGE_REUSE_ENABLED.key -> "false") {
withTable("fact") {
sql(
s"""
|CREATE TABLE fact (
| col1 varchar(14), col2 bigint, col3 bigint, col4 decimal(18,8), partCol1 varchar(1)
|) USING $tableFormat PARTITIONED BY (partCol1)
""".stripMargin)
val df = sql(
"""
|SELECT b.col1 FROM fact a
|JOIN
|(SELECT * FROM (
| SELECT *, Lag(col4) OVER (PARTITION BY partCol1, col1 ORDER BY col2) prev_col4
| FROM (SELECT partCol1, col1, col2, col3, col4 FROM fact) subquery) subquery2
| WHERE col3 = 0 AND col4 = prev_col4
|) b
|ON a.partCol1 = b.partCol1
""".stripMargin)
checkPartitionPruningPredicate(df, false, false)
}
}
}
/**
* Check the static scan metrics with and without DPP
*/
test("static scan metrics",
DisableAdaptiveExecution("DPP in AQE must reuse broadcast")) {
withSQLConf(SQLConf.DYNAMIC_PARTITION_PRUNING_ENABLED.key -> "true",
SQLConf.DYNAMIC_PARTITION_PRUNING_REUSE_BROADCAST_ONLY.key -> "false",
SQLConf.EXCHANGE_REUSE_ENABLED.key -> "false") {
withTable("fact", "dim") {
val numPartitions = 10
spark.range(10)
.map { x => Tuple3(x, x + 1, 0) }
.toDF("did", "d1", "d2")
.write
.format(tableFormat)
.mode("overwrite")
.saveAsTable("dim")
spark.range(100)
.map { x => Tuple2(x, x % numPartitions) }
.toDF("f1", "fid")
.write.partitionBy("fid")
.format(tableFormat)
.mode("overwrite")
.saveAsTable("fact")
def getFactScan(plan: SparkPlan): SparkPlan = {
val scanOption =
find(plan) {
case s: FileSourceScanExec =>
s.output.exists(_.find(_.argString(maxFields = 100).contains("fid")).isDefined)
case _ => false
}
assert(scanOption.isDefined)
scanOption.get
}
// No dynamic partition pruning, so no static metrics
// All files in fact table are scanned
val df1 = sql("SELECT sum(f1) FROM fact")
df1.collect()
val scan1 = getFactScan(df1.queryExecution.executedPlan)
assert(!scan1.metrics.contains("staticFilesNum"))
assert(!scan1.metrics.contains("staticFilesSize"))
val allFilesNum = scan1.metrics("numFiles").value
val allFilesSize = scan1.metrics("filesSize").value
assert(scan1.metrics("numPartitions").value === numPartitions)
assert(scan1.metrics("pruningTime").value === -1)
// No dynamic partition pruning, so no static metrics
// Only files from fid = 5 partition are scanned
val df2 = sql("SELECT sum(f1) FROM fact WHERE fid = 5")
df2.collect()
val scan2 = getFactScan(df2.queryExecution.executedPlan)
assert(!scan2.metrics.contains("staticFilesNum"))
assert(!scan2.metrics.contains("staticFilesSize"))
val partFilesNum = scan2.metrics("numFiles").value
val partFilesSize = scan2.metrics("filesSize").value
assert(0 < partFilesNum && partFilesNum < allFilesNum)
assert(0 < partFilesSize && partFilesSize < allFilesSize)
assert(scan2.metrics("numPartitions").value === 1)
assert(scan2.metrics("pruningTime").value === -1)
// Dynamic partition pruning is used
// Static metrics are as-if reading the whole fact table
// "Regular" metrics are as-if reading only the "fid = 5" partition
val df3 = sql("SELECT sum(f1) FROM fact, dim WHERE fid = did AND d1 = 6")
df3.collect()
val scan3 = getFactScan(df3.queryExecution.executedPlan)
assert(scan3.metrics("staticFilesNum").value == allFilesNum)
assert(scan3.metrics("staticFilesSize").value == allFilesSize)
assert(scan3.metrics("numFiles").value == partFilesNum)
assert(scan3.metrics("filesSize").value == partFilesSize)
assert(scan3.metrics("numPartitions").value === 1)
assert(scan3.metrics("pruningTime").value !== -1)
}
}
}
test("DPP should not be rewritten as an existential join") {
withSQLConf(SQLConf.DYNAMIC_PARTITION_PRUNING_ENABLED.key -> "true",
SQLConf.DYNAMIC_PARTITION_PRUNING_FALLBACK_FILTER_RATIO.key -> "1.5",
SQLConf.DYNAMIC_PARTITION_PRUNING_REUSE_BROADCAST_ONLY.key -> "false",
SQLConf.EXCHANGE_REUSE_ENABLED.key -> "false") {
val df = sql(
s"""
|SELECT * FROM product p WHERE p.store_id NOT IN
| (SELECT f.store_id FROM fact_sk f JOIN dim_store d ON f.store_id = d.store_id
| WHERE d.state_province = 'NL'
| )
""".stripMargin)
val found = df.queryExecution.executedPlan.find {
case BroadcastHashJoinExec(_, _, p: ExistenceJoin, _, _, _, _, _) => true
case _ => false
}
assert(found.isEmpty)
}
}
/**
* (1) DPP should be disabled when the large (fact) table isn't partitioned by the join key
* (2) DPP should be triggered only for certain join types
* (3) DPP should trigger only when we have attributes on both sides of the join condition
*/
test("DPP triggers only for certain types of query",
DisableAdaptiveExecution("DPP in AQE must reuse broadcast")) {
withSQLConf(
SQLConf.DYNAMIC_PARTITION_PRUNING_REUSE_BROADCAST_ONLY.key -> "false",
SQLConf.DYNAMIC_PARTITION_PRUNING_PRUNING_SIDE_EXTRA_FILTER_RATIO.key -> "1") {
Given("dynamic partition pruning disabled")
withSQLConf(SQLConf.DYNAMIC_PARTITION_PRUNING_ENABLED.key -> "false") {
val df = sql(
"""
|SELECT * FROM fact_sk f
|LEFT SEMI JOIN dim_store s
|ON f.store_id = s.store_id AND s.country = 'NL'
""".stripMargin)
checkPartitionPruningPredicate(df, false, false)
}
Given("not a partition column")
withSQLConf(SQLConf.DYNAMIC_PARTITION_PRUNING_ENABLED.key -> "true") {
val df = sql(
"""
|SELECT * FROM fact_np f
|JOIN dim_store s
|ON f.date_id = s.store_id WHERE s.country = 'NL'
""".stripMargin)
checkPartitionPruningPredicate(df, false, false)
}
Given("no predicate on the dimension table")
withSQLConf(SQLConf.DYNAMIC_PARTITION_PRUNING_ENABLED.key -> "true") {
val df = sql(
"""
|SELECT * FROM fact_sk f
|JOIN dim_store s
|ON f.store_id = s.store_id
""".stripMargin)
checkPartitionPruningPredicate(df, false, false)
}
Given("left-semi join with partition column on the left side")
withSQLConf(SQLConf.DYNAMIC_PARTITION_PRUNING_ENABLED.key -> "true",
SQLConf.EXCHANGE_REUSE_ENABLED.key -> "false") {
val df = sql(
"""
|SELECT * FROM fact_sk f
|LEFT SEMI JOIN dim_store s
|ON f.store_id = s.store_id AND s.country = 'NL'
""".stripMargin)
checkPartitionPruningPredicate(df, true, false)
}
Given("left-semi join with partition column on the right side")
withSQLConf(SQLConf.DYNAMIC_PARTITION_PRUNING_ENABLED.key -> "true",
SQLConf.EXCHANGE_REUSE_ENABLED.key -> "false") {
val df = sql(
"""
|SELECT * FROM dim_store s
|LEFT SEMI JOIN fact_sk f
|ON f.store_id = s.store_id AND s.country = 'NL'
""".stripMargin)
checkPartitionPruningPredicate(df, true, false)
}
Given("left outer with partition column on the left side")
withSQLConf(SQLConf.DYNAMIC_PARTITION_PRUNING_ENABLED.key -> "true") {
val df = sql(
"""
|SELECT * FROM fact_sk f
|LEFT OUTER JOIN dim_store s
|ON f.store_id = s.store_id WHERE f.units_sold = 10
""".stripMargin)
checkPartitionPruningPredicate(df, false, false)
}
Given("right outer join with partition column on the left side")
withSQLConf(SQLConf.DYNAMIC_PARTITION_PRUNING_ENABLED.key -> "true",
SQLConf.EXCHANGE_REUSE_ENABLED.key -> "false") {
val df = sql(
"""
|SELECT * FROM fact_sk f RIGHT OUTER JOIN dim_store s
|ON f.store_id = s.store_id WHERE s.country = 'NL'
""".stripMargin)
checkPartitionPruningPredicate(df, true, false)
}
}
}
/**
* The filtering policy has a fallback when the stats are unavailable
*/
test("filtering ratio policy fallback",
DisableAdaptiveExecution("DPP in AQE must reuse broadcast")) {
withSQLConf(
SQLConf.DYNAMIC_PARTITION_PRUNING_REUSE_BROADCAST_ONLY.key -> "false",
SQLConf.EXCHANGE_REUSE_ENABLED.key -> "false") {
Given("no stats and selective predicate")
withSQLConf(SQLConf.DYNAMIC_PARTITION_PRUNING_ENABLED.key -> "true",
SQLConf.DYNAMIC_PARTITION_PRUNING_USE_STATS.key -> "true") {
val df = sql(
"""
|SELECT f.date_id, f.product_id, f.units_sold, f.store_id FROM fact_sk f
|JOIN dim_store s
|ON f.store_id = s.store_id WHERE s.country LIKE '%C_%'
""".stripMargin)
checkPartitionPruningPredicate(df, true, false)
}
Given("no stats and selective predicate with the size of dim too large")
withSQLConf(SQLConf.DYNAMIC_PARTITION_PRUNING_ENABLED.key -> "true",
SQLConf.DYNAMIC_PARTITION_PRUNING_USE_STATS.key -> "true") {
sql(
"""
|SELECT f.date_id, f.product_id, f.units_sold, f.store_id
|FROM fact_sk f WHERE store_id < 5
""".stripMargin)
.write
.partitionBy("store_id")
.saveAsTable("fact_aux")
val df = sql(
"""
|SELECT f.date_id, f.product_id, f.units_sold, f.store_id
|FROM fact_aux f JOIN dim_store s
|ON f.store_id = s.store_id WHERE s.country = 'US'
""".stripMargin)
checkPartitionPruningPredicate(df, false, false)
checkAnswer(df,
Row(1070, 2, 10, 4) ::
Row(1080, 3, 20, 4) ::
Row(1090, 3, 10, 4) ::
Row(1100, 3, 10, 4) :: Nil
)
}
Given("no stats and selective predicate with the size of dim small")
withSQLConf(SQLConf.DYNAMIC_PARTITION_PRUNING_ENABLED.key -> "true",
SQLConf.DYNAMIC_PARTITION_PRUNING_USE_STATS.key -> "true") {
val df = sql(
"""
|SELECT f.date_id, f.product_id, f.units_sold, f.store_id FROM fact_sk f
|JOIN dim_store s
|ON f.store_id = s.store_id WHERE s.country = 'NL'
""".stripMargin)
checkPartitionPruningPredicate(df, true, false)
checkAnswer(df,
Row(1010, 1, 10, 2) ::
Row(1020, 1, 10, 2) ::
Row(1000, 1, 10, 1) :: Nil
)
}
}
}
/**
* The filtering ratio policy performs best when it uses cardinality estimates
*/
test("filtering ratio policy with stats when the broadcast pruning is disabled",
DisableAdaptiveExecution("DPP in AQE must reuse broadcast")) {
withSQLConf(
SQLConf.DYNAMIC_PARTITION_PRUNING_REUSE_BROADCAST_ONLY.key -> "false",
SQLConf.EXCHANGE_REUSE_ENABLED.key -> "false") {
Given("disabling the use of stats in the DPP heuristic")
withSQLConf(SQLConf.DYNAMIC_PARTITION_PRUNING_ENABLED.key -> "true",
SQLConf.DYNAMIC_PARTITION_PRUNING_USE_STATS.key -> "false") {
val df = sql(
"""
|SELECT f.date_id, f.product_id, f.units_sold, f.store_id FROM fact_stats f
|JOIN dim_stats s
|ON f.store_id = s.store_id WHERE s.country = 'DE'
""".stripMargin)
checkPartitionPruningPredicate(df, true, false)
}
Given("filtering ratio with stats disables pruning")
withSQLConf(SQLConf.DYNAMIC_PARTITION_PRUNING_ENABLED.key -> "true",
SQLConf.DYNAMIC_PARTITION_PRUNING_USE_STATS.key -> "true") {
val df = sql(
"""
|SELECT f.date_id, f.product_id, f.units_sold, f.store_id FROM fact_stats f
|JOIN dim_stats s
|ON (f.store_id = s.store_id) WHERE s.store_id > 0 AND s.store_id IN
|(SELECT p.store_id FROM product p)
""".stripMargin)
checkPartitionPruningPredicate(df, false, false)
}
Given("filtering ratio with stats enables pruning")
withSQLConf(SQLConf.DYNAMIC_PARTITION_PRUNING_ENABLED.key -> "true",
SQLConf.DYNAMIC_PARTITION_PRUNING_USE_STATS.key -> "true") {
val df = sql(
"""
|SELECT f.date_id, f.product_id, f.units_sold, f.store_id FROM fact_stats f
|JOIN dim_stats s
|ON f.store_id = s.store_id WHERE s.country = 'DE'
""".stripMargin)
checkPartitionPruningPredicate(df, true, false)
checkAnswer(df,
Row(1030, 2, 10, 3) ::
Row(1040, 2, 50, 3) ::
Row(1050, 2, 50, 3) ::
Row(1060, 2, 50, 3) :: Nil
)
}
Given("join condition more complex than fact.attr = dim.attr")
withSQLConf(SQLConf.DYNAMIC_PARTITION_PRUNING_ENABLED.key -> "true",
SQLConf.DYNAMIC_PARTITION_PRUNING_USE_STATS.key -> "true") {
val df = sql(
"""
|SELECT f.date_id, f.product_id, f.units_sold, f.store_id
|FROM fact_stats f JOIN dim_stats s
|ON f.store_id + 1 = s.store_id WHERE s.country = 'DE'
""".stripMargin)
checkPartitionPruningPredicate(df, true, false)
checkAnswer(df,
Row(1010, 1, 10, 2) ::
Row(1020, 1, 10, 2) :: Nil
)
}
}
}
test("partition pruning in broadcast hash joins with non-deterministic probe part") {
Given("alias with simple join condition, and non-deterministic query")
withSQLConf(SQLConf.DYNAMIC_PARTITION_PRUNING_REUSE_BROADCAST_ONLY.key -> "true") {
val df = sql(
"""
|SELECT f.date_id, f.pid, f.sid FROM
|(SELECT date_id, product_id AS pid, store_id AS sid
| FROM fact_stats WHERE RAND() > 0.5) AS f
|JOIN dim_stats s
|ON f.sid = s.store_id WHERE s.country = 'DE'
""".stripMargin)
checkPartitionPruningPredicate(df, false, false)
}
Given("alias over multiple sub-queries with simple join condition")
withSQLConf(SQLConf.DYNAMIC_PARTITION_PRUNING_REUSE_BROADCAST_ONLY.key -> "true") {
val df = sql(
"""
|SELECT f.date_id, f.pid, f.sid FROM
|(SELECT date_id, pid_d AS pid, sid_d AS sid FROM
| (SELECT date_id, product_id AS pid_d, store_id AS sid_d FROM fact_stats
| WHERE RAND() > 0.5) fs
| JOIN dim_store ds ON fs.sid_d = ds.store_id) f
|JOIN dim_stats s
|ON f.sid = s.store_id WHERE s.country = 'DE'
""".stripMargin)
checkPartitionPruningPredicate(df, false, false)
}
}
test("partition pruning in broadcast hash joins with aliases") {
Given("alias with simple join condition, using attribute names only")
withSQLConf(SQLConf.DYNAMIC_PARTITION_PRUNING_REUSE_BROADCAST_ONLY.key -> "true") {
val df = sql(
"""
|SELECT f.date_id, f.pid, f.sid FROM
|(select date_id, product_id as pid, store_id as sid from fact_stats) as f
|JOIN dim_stats s
|ON f.sid = s.store_id WHERE s.country = 'DE'
""".stripMargin)
checkPartitionPruningPredicate(df, false, true)
checkAnswer(df,
Row(1030, 2, 3) ::
Row(1040, 2, 3) ::
Row(1050, 2, 3) ::
Row(1060, 2, 3) :: Nil
)
}
Given("alias with expr as join condition")
withSQLConf(SQLConf.DYNAMIC_PARTITION_PRUNING_REUSE_BROADCAST_ONLY.key -> "true") {
val df = sql(
"""
|SELECT f.date_id, f.pid, f.sid FROM
|(SELECT date_id, product_id AS pid, store_id AS sid FROM fact_stats) AS f
|JOIN dim_stats s
|ON f.sid + 1 = s.store_id + 1 WHERE s.country = 'DE'
""".stripMargin)
checkPartitionPruningPredicate(df, false, true)
checkAnswer(df,
Row(1030, 2, 3) ::
Row(1040, 2, 3) ::
Row(1050, 2, 3) ::
Row(1060, 2, 3) :: Nil
)
}
Given("alias over multiple sub-queries with simple join condition")
withSQLConf(SQLConf.DYNAMIC_PARTITION_PRUNING_REUSE_BROADCAST_ONLY.key -> "true") {
val df = sql(
"""
|SELECT f.date_id, f.pid, f.sid FROM
|(SELECT date_id, pid_d AS pid, sid_d AS sid FROM
| (select date_id, product_id AS pid_d, store_id AS sid_d FROM fact_stats) fs
| JOIN dim_store ds ON fs.sid_d = ds.store_id) f
|JOIN dim_stats s
|ON f.sid = s.store_id WHERE s.country = 'DE'
""".stripMargin)
checkPartitionPruningPredicate(df, false, true)
checkAnswer(df,
Row(1030, 2, 3) ::
Row(1040, 2, 3) ::
Row(1050, 2, 3) ::
Row(1060, 2, 3) :: Nil
)
}
Given("alias over multiple sub-queries with simple join condition")
withSQLConf(SQLConf.DYNAMIC_PARTITION_PRUNING_REUSE_BROADCAST_ONLY.key -> "true") {
val df = sql(
"""
|SELECT f.date_id, f.pid_d as pid, f.sid_d as sid FROM
| (SELECT date_id, pid_dd AS pid_d, sid_dd AS sid_d FROM
| (
| (select date_id, product_id AS pid_dd, store_id AS sid_dd FROM fact_stats) fss
| JOIN dim_store ds ON fss.sid_dd = ds.store_id
| ) fs
| JOIN dim_store ds ON fs.sid_dd = ds.store_id
| ) f
|JOIN dim_stats s
|ON f.sid_d = s.store_id WHERE s.country = 'DE'
""".stripMargin)
checkPartitionPruningPredicate(df, false, true)
checkAnswer(df,
Row(1030, 2, 3) ::
Row(1040, 2, 3) ::
Row(1050, 2, 3) ::
Row(1060, 2, 3) :: Nil
)
}
}
test("partition pruning in broadcast hash joins",
DisableAdaptiveExecution("DPP in AQE must reuse broadcast")) {
Given("disable broadcast pruning and disable subquery duplication")
withSQLConf(
SQLConf.DYNAMIC_PARTITION_PRUNING_REUSE_BROADCAST_ONLY.key -> "true",
SQLConf.EXCHANGE_REUSE_ENABLED.key -> "false") {
val df = sql(
"""
|SELECT f.date_id, f.product_id, f.units_sold, f.store_id FROM fact_stats f
|JOIN dim_stats s
|ON f.store_id = s.store_id WHERE s.country = 'DE'
""".stripMargin)
checkPartitionPruningPredicate(df, false, false)
checkAnswer(df,
Row(1030, 2, 10, 3) ::
Row(1040, 2, 50, 3) ::
Row(1050, 2, 50, 3) ::
Row(1060, 2, 50, 3) :: Nil
)
}
Given("disable reuse broadcast results and enable subquery duplication")
withSQLConf(
SQLConf.DYNAMIC_PARTITION_PRUNING_REUSE_BROADCAST_ONLY.key -> "false",
SQLConf.DYNAMIC_PARTITION_PRUNING_USE_STATS.key -> "true",
SQLConf.DYNAMIC_PARTITION_PRUNING_FALLBACK_FILTER_RATIO.key -> "0.5",
SQLConf.EXCHANGE_REUSE_ENABLED.key -> "false") {
val df = sql(
"""
|SELECT f.date_id, f.product_id, f.units_sold, f.store_id FROM fact_stats f
|JOIN dim_stats s
|ON f.store_id = s.store_id WHERE s.country = 'DE'
""".stripMargin)
checkPartitionPruningPredicate(df, true, false)
checkAnswer(df,
Row(1030, 2, 10, 3) ::
Row(1040, 2, 50, 3) ::
Row(1050, 2, 50, 3) ::
Row(1060, 2, 50, 3) :: Nil
)
}
Given("enable reuse broadcast results and disable query duplication")
withSQLConf(SQLConf.DYNAMIC_PARTITION_PRUNING_REUSE_BROADCAST_ONLY.key -> "true") {
val df = sql(
"""
|SELECT f.date_id, f.product_id, f.units_sold, f.store_id FROM fact_stats f
|JOIN dim_stats s
|ON f.store_id = s.store_id WHERE s.country = 'DE'
""".stripMargin)
checkPartitionPruningPredicate(df, false, true)
checkAnswer(df,
Row(1030, 2, 10, 3) ::
Row(1040, 2, 50, 3) ::
Row(1050, 2, 50, 3) ::
Row(1060, 2, 50, 3) :: Nil
)
}
Given("disable broadcast hash join and disable query duplication")
withSQLConf(
SQLConf.DYNAMIC_PARTITION_PRUNING_REUSE_BROADCAST_ONLY.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1") {
val df = sql(
"""
|SELECT f.date_id, f.product_id, f.units_sold, f.store_id FROM fact_stats f
|JOIN dim_stats s
|ON f.store_id = s.store_id WHERE s.country = 'DE'
""".stripMargin)
checkPartitionPruningPredicate(df, false, false)
checkAnswer(df,
Row(1030, 2, 10, 3) ::
Row(1040, 2, 50, 3) ::
Row(1050, 2, 50, 3) ::
Row(1060, 2, 50, 3) :: Nil
)
}
Given("disable broadcast hash join and enable query duplication")
withSQLConf(SQLConf.DYNAMIC_PARTITION_PRUNING_REUSE_BROADCAST_ONLY.key -> "false",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1",
SQLConf.DYNAMIC_PARTITION_PRUNING_USE_STATS.key -> "true") {
val df = sql(
"""
|SELECT f.date_id, f.product_id, f.units_sold, f.store_id FROM fact_stats f
|JOIN dim_stats s
|ON f.store_id = s.store_id WHERE s.country = 'DE'
""".stripMargin)
checkPartitionPruningPredicate(df, true, false)
checkAnswer(df,
Row(1030, 2, 10, 3) ::
Row(1040, 2, 50, 3) ::
Row(1050, 2, 50, 3) ::
Row(1060, 2, 50, 3) :: Nil
)
}
}
test("broadcast a single key in a HashedRelation") {
withSQLConf(SQLConf.DYNAMIC_PARTITION_PRUNING_REUSE_BROADCAST_ONLY.key -> "true") {
withTable("fact", "dim") {
spark.range(100).select(
$"id",
($"id" + 1).cast("int").as("one"),
($"id" + 2).cast("byte").as("two"),
($"id" + 3).cast("short").as("three"),
(($"id" * 20) % 100).as("mod"),
($"id" + 1).cast("string").as("str"))
.write.partitionBy("one", "two", "three", "str")
.format(tableFormat).mode("overwrite").saveAsTable("fact")
spark.range(10).select(
$"id",
($"id" + 1).cast("int").as("one"),
($"id" + 2).cast("byte").as("two"),
($"id" + 3).cast("short").as("three"),
($"id" * 10).as("prod"),
($"id" + 1).cast("string").as("str"))
.write.format(tableFormat).mode("overwrite").saveAsTable("dim")
// broadcast a single Long key
val dfLong = sql(
"""
|SELECT f.id, f.one, f.two, f.str FROM fact f
|JOIN dim d
|ON (f.one = d.one)
|WHERE d.prod > 80
""".stripMargin)
checkAnswer(dfLong, Row(9, 10, 11, "10") :: Nil)
// reuse a single Byte key
val dfByte = sql(
"""
|SELECT f.id, f.one, f.two, f.str FROM fact f
|JOIN dim d
|ON (f.two = d.two)
|WHERE d.prod > 80
""".stripMargin)
checkAnswer(dfByte, Row(9, 10, 11, "10") :: Nil)
// reuse a single String key
val dfStr = sql(
"""
|SELECT f.id, f.one, f.two, f.str FROM fact f
|JOIN dim d
|ON (f.str = d.str)
|WHERE d.prod > 80
""".stripMargin)
checkAnswer(dfStr, Row(9, 10, 11, "10") :: Nil)
}
}
}
test("broadcast multiple keys in a LongHashedRelation") {
withSQLConf(SQLConf.DYNAMIC_PARTITION_PRUNING_REUSE_BROADCAST_ONLY.key -> "true") {
withTable("fact", "dim") {
spark.range(100).select(
$"id",
($"id" + 1).cast("int").as("one"),
($"id" + 2).cast("byte").as("two"),
($"id" + 3).cast("short").as("three"),
(($"id" * 20) % 100).as("mod"),
($"id" % 10).cast("string").as("str"))
.write.partitionBy("one", "two", "three")
.format(tableFormat).mode("overwrite").saveAsTable("fact")
spark.range(10).select(
$"id",
($"id" + 1).cast("int").as("one"),
($"id" + 2).cast("byte").as("two"),
($"id" + 3).cast("short").as("three"),
($"id" * 10).as("prod"))
.write.format(tableFormat).mode("overwrite").saveAsTable("dim")
// broadcast multiple keys
val dfLong = sql(
"""
|SELECT f.id, f.one, f.two, f.str FROM fact f
|JOIN dim d
|ON (f.one = d.one and f.two = d.two and f.three = d.three)
|WHERE d.prod > 80
""".stripMargin)
checkAnswer(dfLong, Row(9, 10, 11, "9") :: Nil)
}
}
}
test("broadcast multiple keys in an UnsafeHashedRelation") {
withSQLConf(SQLConf.DYNAMIC_PARTITION_PRUNING_REUSE_BROADCAST_ONLY.key -> "true") {
withTable("fact", "dim") {
spark.range(100).select(
$"id",
($"id" + 1).cast("string").as("one"),
($"id" + 2).cast("string").as("two"),
($"id" + 3).cast("string").as("three"),
(($"id" * 20) % 100).as("mod"),
($"id" % 10).cast("string").as("str"))
.write.partitionBy("one", "two", "three")
.format(tableFormat).mode("overwrite").saveAsTable("fact")
spark.range(10).select(
$"id",
($"id" + 1).cast("string").as("one"),
($"id" + 2).cast("string").as("two"),
($"id" + 3).cast("string").as("three"),
($"id" * 10).as("prod"))
.write.format(tableFormat).mode("overwrite").saveAsTable("dim")
// broadcast multiple keys
val df = sql(
"""
|SELECT f.id, f.one, f.two, f.str FROM fact f
|JOIN dim d
|ON (f.one = d.one and f.two = d.two and f.three = d.three)
|WHERE d.prod > 80
""".stripMargin)
checkAnswer(df, Row(9, "10", "11", "9") :: Nil)
}
}
}
test("different broadcast subqueries with identical children") {
withSQLConf(SQLConf.DYNAMIC_PARTITION_PRUNING_REUSE_BROADCAST_ONLY.key -> "true") {
withTable("fact", "dim") {
spark.range(100).select(
$"id",
($"id" + 1).cast("string").as("one"),
($"id" + 2).cast("string").as("two"),
($"id" + 3).cast("string").as("three"),
(($"id" * 20) % 100).as("mod"),
($"id" % 10).cast("string").as("str"))
.write.partitionBy("one", "two", "three")
.format(tableFormat).mode("overwrite").saveAsTable("fact")
spark.range(10).select(
$"id",
($"id" + 1).cast("string").as("one"),
($"id" + 2).cast("string").as("two"),
($"id" + 3).cast("string").as("three"),
($"id" * 10).as("prod"))
.write.partitionBy("one", "two", "three", "prod")
.format(tableFormat).mode("overwrite").saveAsTable("dim")
// we are expecting three filters on different keys to be pushed down
val df = sql(
"""
|SELECT f.id, f.one, f.two, f.str FROM fact f
|JOIN dim d
|ON (f.one = d.one and f.two = d.two and f.three = d.three)
|WHERE d.prod > 80
""".stripMargin)
checkDistinctSubqueries(df, 3)
checkAnswer(df, Row(9, "10", "11", "9") :: Nil)
}
}
}
test("no partition pruning when the build side is a stream") {
withTable("fact") {
val input = MemoryStream[Int]
val stream = input.toDF.select($"value" as "one", ($"value" * 3) as "code")
spark.range(100).select(
$"id",
($"id" + 1).as("one"),
($"id" + 2).as("two"),
($"id" + 3).as("three"))
.write.partitionBy("one")
.format(tableFormat).mode("overwrite").saveAsTable("fact")
val table = sql("SELECT * from fact f")
// join a partitioned table with a stream
val joined = table.join(stream, Seq("one")).where("code > 40")
val query = joined.writeStream.format("memory").queryName("test").start()
input.addData(1, 10, 20, 40, 50)
try {
query.processAllAvailable()
} finally {
query.stop()
}
// search dynamic pruning predicates on the executed plan
val plan = query.asInstanceOf[StreamingQueryWrapper].streamingQuery.lastExecution.executedPlan
val ret = plan.find {
case s: FileSourceScanExec => s.partitionFilters.exists {
case _: DynamicPruningExpression => true
case _ => false
}
case _ => false
}
assert(ret.isDefined == false)
}
}
test("avoid reordering broadcast join keys to match input hash partitioning",
DisableAdaptiveExecution("DPP in AQE must reuse broadcast")) {
withSQLConf(SQLConf.DYNAMIC_PARTITION_PRUNING_REUSE_BROADCAST_ONLY.key -> "false",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1") {
withTable("large", "dimTwo", "dimThree") {
spark.range(100).select(
$"id",
($"id" + 1).as("A"),
($"id" + 2).as("B"))
.write.partitionBy("A")
.format(tableFormat).mode("overwrite").saveAsTable("large")
spark.range(10).select(
$"id",
($"id" + 1).as("C"),
($"id" + 2).as("D"))
.write.format(tableFormat).mode("overwrite").saveAsTable("dimTwo")
spark.range(10).select(
$"id",
($"id" + 1).as("E"),
($"id" + 2).as("F"),
($"id" + 3).as("G"))
.write.format(tableFormat).mode("overwrite").saveAsTable("dimThree")
val fact = sql("SELECT * from large")
val dim = sql("SELECT * from dimTwo")
val prod = sql("SELECT * from dimThree")
// The query below first joins table fact with table dim on keys (A, B), and then joins
// table fact with table prod on keys (B, A). The join key reordering in EnsureRequirements
// ensured that the order of the keys stays the same (A, B) in both joins. The keys in a
// broadcast shuffle should not be reordered in order to trigger broadcast reuse.
val df = fact.join(dim,
fact.col("A") === dim.col("C") && fact.col("B") === dim.col("D"), "LEFT")
.join(broadcast(prod),
fact.col("B") === prod.col("F") && fact.col("A") === prod.col("E"))
.where(prod.col("G") > 5)
checkPartitionPruningPredicate(df, false, true)
}
}
}
/**
* This test is a small reproduction of the Query-23 of the TPCDS benchmark.
* The query employs an aggregation on the result of a join between a store table and a
* date dimension table which is further joined with item, date, and store tables using
* a disjoint filter. The outcome of this query is a sequence of nested joins that have
* duplicated partitioning keys, also used to uniquely identify the dynamic pruning filters.
*/
test("dynamic partition pruning ambiguity issue across nested joins") {
withSQLConf(SQLConf.DYNAMIC_PARTITION_PRUNING_REUSE_BROADCAST_ONLY.key -> "true") {
withTable("store", "date", "item") {
spark.range(500)
.select((($"id" + 30) % 50).as("ss_item_sk"),
($"id" % 20).as("ss_sold_date_sk"), ($"id" * 3).as("price"))
.write.partitionBy("ss_sold_date_sk")
.format("parquet").mode("overwrite").saveAsTable("store")
spark.range(20)
.select($"id".as("d_date_sk"), ($"id").as("d_year"))
.write.format("parquet").mode("overwrite").saveAsTable("date")
spark.range(20)
.select(($"id" + 30).as("i_item_sk"))
.write.format("parquet").mode("overwrite").saveAsTable("item")
val df = sql(
"""
|WITH aux AS
|(SELECT i_item_sk as frequent_item_sk FROM store, item, date
|WHERE ss_sold_date_sk = d_date_sk
|AND ss_item_sk = i_item_sk
|AND d_year IN (2, 4, 6, 8, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19)
|GROUP BY i_item_sk HAVING count(*) > 0)
|SELECT sum(sales) a
| FROM (SELECT price sales FROM item, date, aux, store
| WHERE d_year IN (1, 3, 5, 7)
| AND ss_sold_date_sk = d_date_sk
| AND ss_item_sk = i_item_sk
| AND i_item_sk = frequent_item_sk) x
""".stripMargin)
checkAnswer(df, Row(28080) :: Nil)
}
}
}
test("cleanup any DPP filter that isn't pushed down due to expression id clashes") {
withSQLConf(SQLConf.DYNAMIC_PARTITION_PRUNING_REUSE_BROADCAST_ONLY.key -> "true") {
withTable("fact", "dim") {
spark.range(20).select($"id".as("A"), $"id".as("AA"))
.write.partitionBy("A").format(tableFormat).mode("overwrite").saveAsTable("fact")
spark.range(10).select($"id".as("B"), $"id".as("BB"))
.write.format(tableFormat).mode("overwrite").saveAsTable("dim")
val df = sql(
"""
|SELECT A, AA FROM
| (SELECT A, AA from fact
| JOIN dim ON (A = B AND AA = BB) WHERE BB > 1)
| JOIN dim ON (AA = BB AND A = B)
|WHERE BB < 5
""".stripMargin)
assert(!checkUnpushedFilters(df))
}
}
}
test("cleanup any DPP filter that isn't pushed down due to non-determinism") {
withSQLConf(SQLConf.DYNAMIC_PARTITION_PRUNING_REUSE_BROADCAST_ONLY.key -> "true") {
val df = sql(
"""
|SELECT f.date_id, f.pid, f.sid FROM
|(SELECT date_id, product_id AS pid, store_id AS sid
| FROM fact_stats WHERE RAND() > 0.5) AS f
|JOIN dim_stats s
|ON f.sid = s.store_id WHERE s.country = 'DE'
""".stripMargin)
assert(!checkUnpushedFilters(df))
}
}
test("join key with multiple references on the filtering plan",
DisableAdaptiveExecution("DPP in AQE must reuse broadcast")) {
withSQLConf(SQLConf.DYNAMIC_PARTITION_PRUNING_REUSE_BROADCAST_ONLY.key -> "true") {
// when enable AQE, the reusedExchange is inserted when executed.
withTable("fact", "dim") {
spark.range(100).select(
$"id",
($"id" + 1).cast("string").as("a"),
($"id" + 2).cast("string").as("b"))
.write.partitionBy("a", "b")
.format(tableFormat).mode("overwrite").saveAsTable("fact")
spark.range(10).select(
$"id",
($"id" + 1).cast("string").as("x"),
($"id" + 2).cast("string").as("y"),
($"id" + 2).cast("string").as("z"),
($"id" + 2).cast("string").as("w"))
.write
.format(tableFormat).mode("overwrite").saveAsTable("dim")
val df = sql(
"""
|SELECT f.id, f.a, f.b FROM fact f
|JOIN dim d
|ON f.b + f.a = d.y + d.z
|WHERE d.x = (SELECT avg(p.w) FROM dim p)
""".stripMargin)
checkPartitionPruningPredicate(df, false, true)
}
}
}
test("Make sure dynamic pruning works on uncorrelated queries") {
withSQLConf(SQLConf.DYNAMIC_PARTITION_PRUNING_REUSE_BROADCAST_ONLY.key -> "true") {
val df = sql(
"""
|SELECT d.store_id,
| SUM(f.units_sold),
| (SELECT SUM(f.units_sold)
| FROM fact_stats f JOIN dim_stats d ON d.store_id = f.store_id
| WHERE d.country = 'US') AS total_prod
|FROM fact_stats f JOIN dim_stats d ON d.store_id = f.store_id
|WHERE d.country = 'US'
|GROUP BY 1
""".stripMargin)
checkAnswer(df, Row(4, 50, 70) :: Row(5, 10, 70) :: Row(6, 10, 70) :: Nil)
val plan = df.queryExecution.executedPlan
val countSubqueryBroadcasts =
collectWithSubqueries(plan)({ case _: SubqueryBroadcastExec => 1 }).sum
if (conf.getConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED)) {
val countReusedSubqueryBroadcasts =
collectWithSubqueries(plan)({ case ReusedSubqueryExec(_: SubqueryBroadcastExec) => 1}).sum
assert(countSubqueryBroadcasts == 1)
assert(countReusedSubqueryBroadcasts == 1)
} else {
assert(countSubqueryBroadcasts == 2)
}
}
}
test("SPARK-32509: Unused Dynamic Pruning filter shouldn't affect " +
"canonicalization and exchange reuse",
DisableAdaptiveExecution("DPP in AQE must reuse broadcast")) {
withSQLConf(SQLConf.DYNAMIC_PARTITION_PRUNING_REUSE_BROADCAST_ONLY.key -> "true") {
withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1") {
val df = sql(
""" WITH view1 as (
| SELECT f.store_id FROM fact_stats f WHERE f.units_sold = 70
| )
|
| SELECT * FROM view1 v1 join view1 v2 WHERE v1.store_id = v2.store_id
""".stripMargin)
checkPartitionPruningPredicate(df, false, false)
val reuseExchangeNodes = df.queryExecution.executedPlan.collect {
case se: ReusedExchangeExec => se
}
assert(reuseExchangeNodes.size == 1, "Expected plan to contain 1 ReusedExchangeExec " +
s"nodes. Found ${reuseExchangeNodes.size}")
checkAnswer(df, Row(15, 15) :: Nil)
}
}
}
test("Plan broadcast pruning only when the broadcast can be reused") {
Given("dynamic pruning filter on the build side")
withSQLConf(SQLConf.DYNAMIC_PARTITION_PRUNING_REUSE_BROADCAST_ONLY.key -> "true") {
val df = sql(
"""
|SELECT f.date_id, f.store_id, f.product_id, f.units_sold FROM fact_np f
|JOIN code_stats s
|ON f.store_id = s.store_id WHERE f.date_id <= 1030
""".stripMargin)
checkPartitionPruningPredicate(df, false, false)
checkAnswer(df,
Row(1000, 1, 1, 10) ::
Row(1010, 2, 1, 10) ::
Row(1020, 2, 1, 10) ::
Row(1030, 3, 2, 10) :: Nil
)
}
Given("dynamic pruning filter on the probe side")
withSQLConf(SQLConf.DYNAMIC_PARTITION_PRUNING_REUSE_BROADCAST_ONLY.key -> "true") {
val df = sql(
"""
|SELECT /*+ BROADCAST(f)*/
|f.date_id, f.store_id, f.product_id, f.units_sold FROM fact_np f
|JOIN code_stats s
|ON f.store_id = s.store_id WHERE f.date_id <= 1030
""".stripMargin)
checkPartitionPruningPredicate(df, false, true)
checkAnswer(df,
Row(1000, 1, 1, 10) ::
Row(1010, 2, 1, 10) ::
Row(1020, 2, 1, 10) ::
Row(1030, 3, 2, 10) :: Nil
)
}
}
test("SPARK-32659: Fix the data issue when pruning DPP on non-atomic type") {
Seq(NO_CODEGEN, CODEGEN_ONLY).foreach { mode =>
Seq(true, false).foreach { pruning =>
withSQLConf(
SQLConf.CODEGEN_FACTORY_MODE.key -> mode.toString,
SQLConf.DYNAMIC_PARTITION_PRUNING_ENABLED.key -> s"$pruning") {
Seq("struct", "array").foreach { dataType =>
val df = sql(
s"""
|SELECT f.date_id, f.product_id, f.units_sold, f.store_id FROM fact_stats f
|JOIN dim_stats s
|ON $dataType(f.store_id) = $dataType(s.store_id) WHERE s.country = 'DE'
""".stripMargin)
if (pruning) {
checkPartitionPruningPredicate(df, false, true)
} else {
checkPartitionPruningPredicate(df, false, false)
}
checkAnswer(df,
Row(1030, 2, 10, 3) ::
Row(1040, 2, 50, 3) ::
Row(1050, 2, 50, 3) ::
Row(1060, 2, 50, 3) :: Nil
)
}
}
}
}
}
test("SPARK-32817: DPP throws error when the broadcast side is empty") {
withSQLConf(
SQLConf.DYNAMIC_PARTITION_PRUNING_ENABLED.key -> "true",
SQLConf.DYNAMIC_PARTITION_PRUNING_REUSE_BROADCAST_ONLY.key -> "true",
SQLConf.ADAPTIVE_OPTIMIZER_EXCLUDED_RULES.key -> EliminateUnnecessaryJoin.ruleName) {
val df = sql(
"""
|SELECT * FROM fact_sk f
|JOIN dim_store s
|ON f.store_id = s.store_id WHERE s.country = 'XYZ'
""".stripMargin)
checkPartitionPruningPredicate(df, false, true)
checkAnswer(df, Nil)
}
}
test("SPARK-34436: DPP support LIKE ANY/ALL expression") {
withSQLConf(SQLConf.DYNAMIC_PARTITION_PRUNING_ENABLED.key -> "true") {
val df = sql(
"""
|SELECT date_id, product_id FROM fact_sk f
|JOIN dim_store s
|ON f.store_id = s.store_id WHERE s.country LIKE ANY ('%D%E%', '%A%B%')
""".stripMargin)
checkPartitionPruningPredicate(df, false, true)
checkAnswer(df,
Row(1030, 2) ::
Row(1040, 2) ::
Row(1050, 2) ::
Row(1060, 2) :: Nil
)
}
}
test("SPARK-34595: DPP support RLIKE expression") {
withSQLConf(SQLConf.DYNAMIC_PARTITION_PRUNING_ENABLED.key -> "true") {
val df = sql(
"""
|SELECT date_id, product_id FROM fact_sk f
|JOIN dim_store s
|ON f.store_id = s.store_id WHERE s.country RLIKE '[DE|US]'
""".stripMargin)
checkPartitionPruningPredicate(df, false, true)
checkAnswer(df,
Row(1030, 2) ::
Row(1040, 2) ::
Row(1050, 2) ::
Row(1060, 2) ::
Row(1070, 2) ::
Row(1080, 3) ::
Row(1090, 3) ::
Row(1100, 3) ::
Row(1110, 3) ::
Row(1120, 4) :: Nil
)
}
}
test("SPARK-32855: Filtering side can not broadcast by join type",
DisableAdaptiveExecution("DPP in AQE must reuse broadcast")) {
withSQLConf(
SQLConf.DYNAMIC_PARTITION_PRUNING_REUSE_BROADCAST_ONLY.key -> "false",
SQLConf.DYNAMIC_PARTITION_PRUNING_USE_STATS.key -> "false",
SQLConf.DYNAMIC_PARTITION_PRUNING_PRUNING_SIDE_EXTRA_FILTER_RATIO.key -> "1") {
val sqlStr =
"""
|SELECT s.store_id,f. product_id FROM dim_store s
|LEFT JOIN fact_sk f
|ON f.store_id = s.store_id WHERE s.country = 'NL'
""".stripMargin
// DPP will only apply if disable reuseBroadcastOnly
Seq(true, false).foreach { reuseBroadcastOnly =>
withSQLConf(
SQLConf.DYNAMIC_PARTITION_PRUNING_REUSE_BROADCAST_ONLY.key -> s"$reuseBroadcastOnly") {
val df = sql(sqlStr)
checkPartitionPruningPredicate(df, !reuseBroadcastOnly, false)
}
}
// DPP will only apply if left side can broadcast by size
Seq(1L, 100000L).foreach { threshold =>
withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> s"$threshold") {
val df = sql(sqlStr)
checkPartitionPruningPredicate(df, threshold > 10L, false)
}
}
}
}
}
class DynamicPartitionPruningSuiteAEOff extends DynamicPartitionPruningSuiteBase
with DisableAdaptiveExecutionSuite
class DynamicPartitionPruningSuiteAEOn extends DynamicPartitionPruningSuiteBase
with EnableAdaptiveExecutionSuite
| BryanCutler/spark | sql/core/src/test/scala/org/apache/spark/sql/DynamicPartitionPruningSuite.scala | Scala | apache-2.0 | 52,290 |
/*
* Copyright (c) 2014-2016
* nonblocking.at gmbh [http://www.nonblocking.at]
*
* This file is part of Cliwix.
*
* Cliwix is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package at.nonblocking.cliwix.core.expression
import at.nonblocking.cliwix.core.Reporting
import at.nonblocking.cliwix.core.command.GetByDBIdCommand
import at.nonblocking.cliwix.core.handler.DispatchHandler
import at.nonblocking.cliwix.core.util.GroupUtil
import at.nonblocking.cliwix.model.{GroupMember, LiferayEntityWithUniquePathIdentifier, LiferayEntity}
import com.typesafe.scalalogging.slf4j.LazyLogging
import scala.beans.BeanProperty
trait ExpressionGenerator {
def createExpression(dbId: Long, propertyName: String, entityClass: Class[_ <: LiferayEntity]): Option[String]
}
class ExpressionGeneratorImpl extends ExpressionGenerator with ExpressionUtils with LazyLogging with Reporting {
@BeanProperty
var handler: DispatchHandler = _
@BeanProperty
var groupUtil: GroupUtil = _
override def createExpression(dbId: Long, propertyName: String, entityClass: Class[_ <: LiferayEntity]) = {
val entity = this.handler.execute(GetByDBIdCommand(dbId, entityClass)).result
if (entity == null) {
report.addWarning(s"Unable to replace '$dbId' by an expression, because an ${entityClass.getSimpleName} with this DB ID doesn't exist.")
None
} else {
val naturalIdentifier = entity match {
case pathItem: LiferayEntityWithUniquePathIdentifier => pathItem.getPath
case e => e.identifiedBy()
}
entity match {
case groupMember: GroupMember =>
assert(groupMember.getOwnerGroupId != null, s"ownerGroupId != null for entity: $entity")
val ownerGroupId = groupMember.getOwnerGroupId
if (ownerGroupId == 0) {
Some(EXPRESSION_START_DELIMITER + entityClass.getSimpleName + "(" + EXPRESSION_GROUP_ID_ZERO + ",'" + naturalIdentifier + "')." + propertyName + EXPRESSION_END_DELIMITER)
} else {
val ownerEntityClassAndId = this.getGroupUtil.getLiferayEntityForGroupId(ownerGroupId)
if (ownerEntityClassAndId.isDefined) {
val ownerEntityClass = ownerEntityClassAndId.get._1.asInstanceOf[Class[LiferayEntity]]
val ownerEntity = this.handler.execute(new GetByDBIdCommand[LiferayEntity](ownerEntityClassAndId.get._2, ownerEntityClass)).result
val ownerEntityExpression = ownerEntity.getClass.getSimpleName + "('" + ownerEntity.identifiedBy() + "')"
Some(EXPRESSION_START_DELIMITER + entityClass.getSimpleName + "(" + ownerEntityExpression + ",'" + naturalIdentifier + "')." + propertyName + EXPRESSION_END_DELIMITER)
} else {
report.addWarning(s"Unable to replace DB ID '$dbId' by an expression, since an owner group with id $ownerGroupId doesn't exist.")
None
}
}
case _ =>
Some(EXPRESSION_START_DELIMITER + entityClass.getSimpleName + "('" + naturalIdentifier + "')." + propertyName + EXPRESSION_END_DELIMITER)
}
}
}
}
| nonblocking/cliwix | cliwix-core/src/main/scala/at/nonblocking/cliwix/core/expression/ExpressionGenerator.scala | Scala | agpl-3.0 | 3,697 |
package com.wavesplatform.lang.v1.task
object imports extends TaskMTFunctions with TaskMTInstances
| wavesplatform/Waves | lang/shared/src/main/scala/com/wavesplatform/lang/v1/task/imports.scala | Scala | mit | 100 |
package controllers
import java.util.UUID
import com.google.inject.AbstractModule
import com.mohiva.play.silhouette.api.{ Environment, LoginInfo }
import com.mohiva.play.silhouette.impl.authenticators.CookieAuthenticator
import com.mohiva.play.silhouette.test._
import models.User
import net.codingwell.scalaguice.ScalaModule
import org.specs2.mock.Mockito
import org.specs2.specification.Scope
import play.api.inject.guice.GuiceApplicationBuilder
import play.api.libs.concurrent.Execution.Implicits._
import play.api.test.{ FakeRequest, PlaySpecification, WithApplication }
import utils.auth.DefaultEnv
/**
* Test case for the [[controllers.ApplicationController]] class.
*/
class ApplicationControllerSpec extends PlaySpecification with Mockito {
sequential
"The `index` action" should {
"redirect to login page if user is unauthorized" in new Context {
new WithApplication(application) {
val Some(redirectResult) = route(app, FakeRequest(routes.ApplicationController.index())
.withAuthenticator[DefaultEnv](LoginInfo("invalid", "invalid"))
)
status(redirectResult) must be equalTo SEE_OTHER
val redirectURL = redirectLocation(redirectResult).getOrElse("")
redirectURL must contain(routes.SignInController.view().toString)
val Some(unauthorizedResult) = route(app, FakeRequest(GET, redirectURL))
status(unauthorizedResult) must be equalTo OK
contentType(unauthorizedResult) must beSome("text/html")
contentAsString(unauthorizedResult) must contain("Silhouette - Sign In")
}
}
"return 200 if user is authorized" in new Context {
new WithApplication(application) {
val Some(result) = route(app, FakeRequest(routes.ApplicationController.index())
.withAuthenticator[DefaultEnv](identity.loginInfo)
)
status(result) must beEqualTo(OK)
}
}
}
/**
* The context.
*/
trait Context extends Scope {
/**
* A fake Guice module.
*/
class FakeModule extends AbstractModule with ScalaModule {
def configure() = {
bind[Environment[DefaultEnv]].toInstance(env)
}
}
/**
* An identity.
*/
val identity = User(
userID = UUID.randomUUID(),
loginInfo = LoginInfo("facebook", "user@facebook.com"),
firstName = None,
lastName = None,
fullName = None,
email = None,
avatarURL = None,
activated = true
)
/**
* A Silhouette fake environment.
*/
implicit val env: Environment[DefaultEnv] = new FakeEnvironment[DefaultEnv](Seq(identity.loginInfo -> identity))
/**
* The application.
*/
lazy val application = new GuiceApplicationBuilder()
.overrides(new FakeModule)
.build()
}
}
| GITBSB/Blackjack-silh | test/controllers/ApplicationControllerSpec.scala | Scala | apache-2.0 | 2,802 |
/*
# Copyright 2016 Georges Lipka
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
*/
package com.glipka.easyReactJS.react
import scala.scalajs.js
import scala.scalajs.js._
import org.scalajs.dom.html
import js.{ UndefOr, Any, Function => JFn }
import js.annotation.{ JSBracketAccess, JSName }
import js.{ Any => jAny }
// https://github.com/DefinitelyTyped/DefinitelyTyped/blob/master/react/react.d.ts
@js.native
abstract class FormEvent extends SyntheticEvent with js.Any {
}
| glipka/Easy-React-With-ScalaJS | src/main/scala/com/glipka/easyReactJS/react/FormEvent.scala | Scala | apache-2.0 | 981 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this opt except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest
import SharedHelpers.thisLineNumber
import Matchers._
import exceptions.TestFailedException
import org.scalactic.Prettifier
class ShouldBeDefinedStructuralLogicalOrSpec extends FunSpec {
private val prettifier = Prettifier.default
val fileName: String = "ShouldBeDefinedStructuralLogicalOrSpec.scala"
def wasEqualTo(left: Any, right: Any): String =
FailureMessages.wasEqualTo(prettifier, left, right)
def wasNotEqualTo(left: Any, right: Any): String =
FailureMessages.wasNotEqualTo(prettifier, left, right)
def equaled(left: Any, right: Any): String =
FailureMessages.equaled(prettifier, left, right)
def didNotEqual(left: Any, right: Any): String =
FailureMessages.didNotEqual(prettifier, left, right)
def wasNotDefined(left: Any): String =
FailureMessages.wasNotDefined(prettifier, left)
def wasDefined(left: Any): String =
FailureMessages.wasDefined(prettifier, left)
describe("Defined matcher") {
describe("when work with arbitrary object with isDefined() method") {
class MyDefinition(value: Boolean) {
def isDefined(): Boolean = value
override def toString = "definition"
}
val objTrue = new MyDefinition(true)
val objFalse = new MyDefinition(false)
it("should do nothing for when both passed") {
objTrue should (be (defined) or equal (objTrue))
objTrue should (equal (objTrue) or be (defined))
objTrue should (be (defined) or be (objTrue))
objTrue should (be (objTrue) or be (defined))
}
it("should do nothing when first check failed") {
objFalse should (be (defined) or equal (objFalse))
objTrue should (equal (objFalse) or be (defined))
objFalse should (be (defined) or be (objFalse))
objTrue should (be (objFalse) or be (defined))
}
it("should do nothing when second check failed") {
objTrue should (be (defined) or equal (objFalse))
objFalse should (equal (objFalse) or be (defined))
objTrue should (be (defined) or be (objFalse))
objFalse should (be (objFalse) or be (defined))
}
it("should throw correct TFE when both check failed") {
val caught1 = intercept[TestFailedException] {
objFalse should (be (defined) or equal (objTrue))
}
assert(caught1.message === Some(wasNotDefined(objFalse) + ", and " + didNotEqual(objFalse, objTrue)))
assert(caught1.failedCodeFileName === Some(fileName))
assert(caught1.failedCodeLineNumber === Some(thisLineNumber - 4))
val caught2 = intercept[TestFailedException] {
objFalse should (equal (objTrue) or be (defined))
}
assert(caught2.message === Some(didNotEqual(objFalse, objTrue) + ", and " + wasNotDefined(objFalse)))
assert(caught2.failedCodeFileName === Some(fileName))
assert(caught2.failedCodeLineNumber === Some(thisLineNumber - 4))
val caught3 = intercept[TestFailedException] {
objFalse should (be (defined) or be (objTrue))
}
assert(caught3.message === Some(wasNotDefined(objFalse) + ", and " + wasNotEqualTo(objFalse, objTrue)))
assert(caught3.failedCodeFileName === Some(fileName))
assert(caught3.failedCodeLineNumber === Some(thisLineNumber - 4))
val caught4 = intercept[TestFailedException] {
objFalse should (be (objTrue) or be (defined))
}
assert(caught4.message === Some(wasNotEqualTo(objFalse, objTrue) + ", and " + wasNotDefined(objFalse)))
assert(caught4.failedCodeFileName === Some(fileName))
assert(caught4.failedCodeLineNumber === Some(thisLineNumber - 4))
}
}
describe("when work with arbitrary object with isDefined method") {
class MyDefinition(value: Boolean) {
def isDefined: Boolean = value
override def toString = "definition"
}
val objTrue = new MyDefinition(true)
val objFalse = new MyDefinition(false)
it("should do nothing for when both passed") {
objTrue should (be (defined) or equal (objTrue))
objTrue should (equal (objTrue) or be (defined))
objTrue should (be (defined) or be (objTrue))
objTrue should (be (objTrue) or be (defined))
}
it("should do nothing when first check failed") {
objFalse should (be (defined) or equal (objFalse))
objTrue should (equal (objFalse) or be (defined))
objFalse should (be (defined) or be (objFalse))
objTrue should (be (objFalse) or be (defined))
}
it("should do nothing when second check failed") {
objTrue should (be (defined) or equal (objFalse))
objFalse should (equal (objFalse) or be (defined))
objTrue should (be (defined) or be (objFalse))
objFalse should (be (objFalse) or be (defined))
}
it("should throw correct TFE when both check failed") {
val caught1 = intercept[TestFailedException] {
objFalse should (be (defined) or equal (objTrue))
}
assert(caught1.message === Some(wasNotDefined(objFalse) + ", and " + didNotEqual(objFalse, objTrue)))
assert(caught1.failedCodeFileName === Some(fileName))
assert(caught1.failedCodeLineNumber === Some(thisLineNumber - 4))
val caught2 = intercept[TestFailedException] {
objFalse should (equal (objTrue) or be (defined))
}
assert(caught2.message === Some(didNotEqual(objFalse, objTrue) + ", and " + wasNotDefined(objFalse)))
assert(caught2.failedCodeFileName === Some(fileName))
assert(caught2.failedCodeLineNumber === Some(thisLineNumber - 4))
val caught3 = intercept[TestFailedException] {
objFalse should (be (defined) or be (objTrue))
}
assert(caught3.message === Some(wasNotDefined(objFalse) + ", and " + wasNotEqualTo(objFalse, objTrue)))
assert(caught3.failedCodeFileName === Some(fileName))
assert(caught3.failedCodeLineNumber === Some(thisLineNumber - 4))
val caught4 = intercept[TestFailedException] {
objFalse should (be (objTrue) or be (defined))
}
assert(caught4.message === Some(wasNotEqualTo(objFalse, objTrue) + ", and " + wasNotDefined(objFalse)))
assert(caught4.failedCodeFileName === Some(fileName))
assert(caught4.failedCodeLineNumber === Some(thisLineNumber - 4))
}
}
describe("when work with arbitrary object with isDefined val") {
class MyDefinition(value: Boolean) {
val isDefined: Boolean = value
override def toString = "definition"
}
val objTrue = new MyDefinition(true)
val objFalse = new MyDefinition(false)
it("should do nothing for when both passed") {
objTrue should (be (defined) or equal (objTrue))
objTrue should (equal (objTrue) or be (defined))
objTrue should (be (defined) or be (objTrue))
objTrue should (be (objTrue) or be (defined))
}
it("should do nothing when first check failed") {
objFalse should (be (defined) or equal (objFalse))
objTrue should (equal (objFalse) or be (defined))
objFalse should (be (defined) or be (objFalse))
objTrue should (be (objFalse) or be (defined))
}
it("should do nothing when second check failed") {
objTrue should (be (defined) or equal (objFalse))
objFalse should (equal (objFalse) or be (defined))
objTrue should (be (defined) or be (objFalse))
objFalse should (be (objFalse) or be (defined))
}
it("should throw correct TFE when both check failed") {
val caught1 = intercept[TestFailedException] {
objFalse should (be (defined) or equal (objTrue))
}
assert(caught1.message === Some(wasNotDefined(objFalse) + ", and " + didNotEqual(objFalse, objTrue)))
assert(caught1.failedCodeFileName === Some(fileName))
assert(caught1.failedCodeLineNumber === Some(thisLineNumber - 4))
val caught2 = intercept[TestFailedException] {
objFalse should (equal (objTrue) or be (defined))
}
assert(caught2.message === Some(didNotEqual(objFalse, objTrue) + ", and " + wasNotDefined(objFalse)))
assert(caught2.failedCodeFileName === Some(fileName))
assert(caught2.failedCodeLineNumber === Some(thisLineNumber - 4))
val caught3 = intercept[TestFailedException] {
objFalse should (be (defined) or be (objTrue))
}
assert(caught3.message === Some(wasNotDefined(objFalse) + ", and " + wasNotEqualTo(objFalse, objTrue)))
assert(caught3.failedCodeFileName === Some(fileName))
assert(caught3.failedCodeLineNumber === Some(thisLineNumber - 4))
val caught4 = intercept[TestFailedException] {
objFalse should (be (objTrue) or be (defined))
}
assert(caught4.message === Some(wasNotEqualTo(objFalse, objTrue) + ", and " + wasNotDefined(objFalse)))
assert(caught4.failedCodeFileName === Some(fileName))
assert(caught4.failedCodeLineNumber === Some(thisLineNumber - 4))
}
}
}
}
| dotty-staging/scalatest | scalatest-test/src/test/scala/org/scalatest/ShouldBeDefinedStructuralLogicalOrSpec.scala | Scala | apache-2.0 | 9,971 |
/* Compile with
dotc implicits2.scala -Xprint:front -Xprint-types -verbose
and verify that the inserted wrapString comes from Predef. You should see
val x: <root>.scala.collection.immutable.WrappedString =
<
<scala.Predef.wrapString:
((s: java.lang.String)scala.collection.immutable.WrappedString)
>
(<"abc":java.lang.String("abc")>):scala.collection.immutable.WrappedString
>
*/
object implicits2 {
val x: scala.collection.immutable.WrappedString = "abc"
}
| yusuke2255/dotty | tests/pos/implicits2.scala | Scala | bsd-3-clause | 523 |
/*
* Copyright (C) 2014 Stratio (http://stratio.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.stratio.cassandra.lucene
import com.stratio.cassandra.lucene.IndexOptions._
import com.stratio.cassandra.lucene.partitioning.{PartitionerOnNone, PartitionerOnToken}
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
/** Tests for [[IndexOptions]].
*
* @author Andres de la Pena `adelapena@stratio.com`
*/
@RunWith(classOf[JUnitRunner])
class IndexOptionsTest extends BaseScalaTest {
// Refresh seconds option tests
test("parse refresh seconds option with default") {
parseRefresh(Map()) shouldBe DEFAULT_REFRESH_SECONDS
}
test("parse refresh seconds option with integer") {
parseRefresh(Map(REFRESH_SECONDS_OPTION -> "1")) shouldBe 1
}
test("parse refresh seconds option with decimal") {
parseRefresh(Map(REFRESH_SECONDS_OPTION -> "0.1")) shouldBe 0.1
}
test("parse refresh seconds option with failing non numeric value") {
intercept[IndexException] {
parseRefresh(Map(REFRESH_SECONDS_OPTION -> "a"))
}.getMessage shouldBe s"'$REFRESH_SECONDS_OPTION' must be a strictly positive decimal, found: a"
}
test("parse refresh seconds option with failing zero value") {
intercept[IndexException] {
parseRefresh(Map(REFRESH_SECONDS_OPTION -> "0"))
}.getMessage shouldBe s"'$REFRESH_SECONDS_OPTION' must be strictly positive, found: 0.0"
}
test("parse refresh seconds option with failing negative value") {
intercept[IndexException] {
parseRefresh(Map(REFRESH_SECONDS_OPTION -> "-1"))
}.getMessage shouldBe s"'$REFRESH_SECONDS_OPTION' must be strictly positive, found: -1.0"
}
// RAM buffer MB option tests
test("parse RAM buffer MB option with default") {
parseRamBufferMB(Map()) shouldBe DEFAULT_RAM_BUFFER_MB
}
test("parse RAM buffer MB option with integer") {
parseRamBufferMB(Map(RAM_BUFFER_MB_OPTION -> "1")) shouldBe 1
}
test("parse RAM buffer MB option with failing decimal") {
intercept[IndexException] {
parseRamBufferMB(Map(RAM_BUFFER_MB_OPTION -> "0.1"))
}.getMessage shouldBe s"'$RAM_BUFFER_MB_OPTION' must be a strictly positive integer, found: 0.1"
}
test("parse RAM buffer MB option with failing non numeric value") {
intercept[IndexException] {
parseRamBufferMB(Map(RAM_BUFFER_MB_OPTION -> "a"))
}.getMessage shouldBe s"'$RAM_BUFFER_MB_OPTION' must be a strictly positive integer, found: a"
}
test("parse RAM buffer MB option with failing zero value") {
intercept[IndexException] {
parseRamBufferMB(Map(RAM_BUFFER_MB_OPTION -> "0"))
}.getMessage shouldBe s"'$RAM_BUFFER_MB_OPTION' must be strictly positive, found: 0"
}
test("parse RAM buffer MB option with failing negative value") {
intercept[IndexException] {
parseRamBufferMB(Map(RAM_BUFFER_MB_OPTION -> "-1"))
}.getMessage shouldBe s"'$RAM_BUFFER_MB_OPTION' must be strictly positive, found: -1"
}
// Max merge MB option tests
test("parse max merge MB option with default") {
parseMaxMergeMB(Map()) shouldBe DEFAULT_MAX_MERGE_MB
}
test("parse max merge MB option with integer") {
parseMaxMergeMB(Map(MAX_MERGE_MB_OPTION -> "1")) shouldBe 1
}
test("parse max merge MB option with failing decimal") {
intercept[IndexException] {
parseMaxMergeMB(Map(MAX_MERGE_MB_OPTION -> "0.1"))
}.getMessage shouldBe s"'$MAX_MERGE_MB_OPTION' must be a strictly positive integer, found: 0.1"
}
test("parse max merge MB option with failing non numeric value") {
intercept[IndexException] {
parseMaxMergeMB(Map(MAX_MERGE_MB_OPTION -> "a"))
}.getMessage shouldBe s"'$MAX_MERGE_MB_OPTION' must be a strictly positive integer, found: a"
}
test("parse max merge MB option with failing zero value") {
intercept[IndexException] {
parseMaxMergeMB(Map(MAX_MERGE_MB_OPTION -> "0"))
}.getMessage shouldBe s"'$MAX_MERGE_MB_OPTION' must be strictly positive, found: 0"
}
test("parse max merge MB option with failing negative value") {
intercept[IndexException] {
parseMaxMergeMB(Map(MAX_MERGE_MB_OPTION -> "-1"))
}.getMessage shouldBe s"'$MAX_MERGE_MB_OPTION' must be strictly positive, found: -1"
}
// Max cached MB option tests
test("parse max cached MB option with default") {
parseMaxCachedMB(Map()) shouldBe DEFAULT_MAX_CACHED_MB
}
test("parse max cached MB option with integer") {
parseMaxCachedMB(Map(MAX_CACHED_MB_OPTION -> "1")) shouldBe 1
}
test("parse max cached MB option with failing decimal") {
intercept[IndexException] {
parseMaxCachedMB(Map(MAX_CACHED_MB_OPTION -> "0.1"))
}.getMessage shouldBe s"'$MAX_CACHED_MB_OPTION' must be a strictly positive integer, found: 0.1"
}
test("parse max cached MB option with failing non numeric value") {
intercept[IndexException] {
parseMaxCachedMB(Map(MAX_CACHED_MB_OPTION -> "a"))
}.getMessage shouldBe s"'$MAX_CACHED_MB_OPTION' must be a strictly positive integer, found: a"
}
test("parse max cached MB option with failing zero value") {
intercept[IndexException] {
parseMaxCachedMB(Map(MAX_CACHED_MB_OPTION -> "0"))
}.getMessage shouldBe s"'$MAX_CACHED_MB_OPTION' must be strictly positive, found: 0"
}
test("parse max cached MB option with failing negative value") {
intercept[IndexException] {
parseMaxCachedMB(Map(MAX_CACHED_MB_OPTION -> "-1"))
}.getMessage shouldBe s"'$MAX_CACHED_MB_OPTION' must be strictly positive, found: -1"
}
// Indexing threads option tests
test("parse indexing threads option with default") {
parseIndexingThreads(Map()) shouldBe DEFAULT_INDEXING_THREADS
}
test("parse indexing threads option with integer") {
parseIndexingThreads(Map(INDEXING_THREADS_OPTION -> "1")) shouldBe 1
}
test("parse indexing threads option with failing decimal") {
intercept[IndexException] {
parseIndexingThreads(Map(INDEXING_THREADS_OPTION -> "0.1"))
}.getMessage shouldBe s"'$INDEXING_THREADS_OPTION' must be an integer, found: 0.1"
}
test("parse indexing threads option with failing non numeric value") {
intercept[IndexException] {
parseIndexingThreads(Map(INDEXING_THREADS_OPTION -> "a"))
}.getMessage shouldBe s"'$INDEXING_THREADS_OPTION' must be an integer, found: a"
}
test("parse indexing threads option with zero value") {
parseIndexingThreads(Map(INDEXING_THREADS_OPTION -> "-1")) shouldBe -1
}
test("parse indexing threads option with negative value") {
parseIndexingThreads(Map(INDEXING_THREADS_OPTION -> "-1")) shouldBe -1
}
// Indexing queues size option tests
test("parse indexing queues size option with default") {
parseIndexingQueuesSize(Map()) shouldBe DEFAULT_INDEXING_QUEUES_SIZE
}
test("parse indexing queues size option with integer") {
parseIndexingQueuesSize(Map(INDEXING_QUEUES_SIZE_OPTION -> "1")) shouldBe 1
}
test("parse indexing queues size option with failing decimal") {
intercept[IndexException] {
parseIndexingQueuesSize(Map(INDEXING_QUEUES_SIZE_OPTION -> "0.1"))
}.getMessage shouldBe
s"'$INDEXING_QUEUES_SIZE_OPTION' must be a strictly positive integer, found: 0.1"
}
test("parse indexing queues size option with failing non numeric value") {
intercept[IndexException] {
parseIndexingQueuesSize(Map(INDEXING_QUEUES_SIZE_OPTION -> "a"))
}.getMessage shouldBe
s"'$INDEXING_QUEUES_SIZE_OPTION' must be a strictly positive integer, found: a"
}
test("parse indexing queues size option with failing zero value") {
intercept[IndexException] {
parseIndexingQueuesSize(Map(INDEXING_QUEUES_SIZE_OPTION -> "0"))
}.getMessage shouldBe s"'$INDEXING_QUEUES_SIZE_OPTION' must be strictly positive, found: 0"
}
test("parse indexing queues size option with failing negative value") {
intercept[IndexException] {
parseIndexingQueuesSize(Map(INDEXING_QUEUES_SIZE_OPTION -> "-1"))
}.getMessage shouldBe s"'$INDEXING_QUEUES_SIZE_OPTION' must be strictly positive, found: -1"
}
// Excluded data centers size option tests
test("parse excluded data centers option with default") {
parseExcludedDataCenters(Map()) shouldBe DEFAULT_EXCLUDED_DATA_CENTERS
}
test("parse excluded data centers option with empty list") {
parseExcludedDataCenters(Map(EXCLUDED_DATA_CENTERS_OPTION -> "")) shouldBe List()
}
test("parse excluded data centers option with singleton list") {
parseExcludedDataCenters(Map(EXCLUDED_DATA_CENTERS_OPTION -> "dc1")) shouldBe List("dc1")
}
test("parse excluded data centers option with multiple list") {
val options = Map(EXCLUDED_DATA_CENTERS_OPTION -> " dc1,dc2 ")
parseExcludedDataCenters(options) shouldBe List("dc1", "dc2")
}
test("parse excluded data centers option with multiple list and spaces") {
val options = Map(EXCLUDED_DATA_CENTERS_OPTION -> " dc1 , dc2 ")
parseExcludedDataCenters(options) shouldBe List("dc1", "dc2")
}
// Partitioner option tests
test("parse partitioner option with default") {
parsePartitioner(Map(), null) shouldBe DEFAULT_PARTITIONER
}
test("parse partitioner with none partitioner") {
val json = "{type:\"none\"}"
parsePartitioner(Map(PARTITIONER_OPTION -> json), null) shouldBe PartitionerOnNone()
}
test("parse partitioner with token partitioner") {
val json = "{type:\"token\", partitions: 10}"
parsePartitioner(Map(PARTITIONER_OPTION -> json), null) shouldBe PartitionerOnToken(10)
}
}
| adelapena/cassandra-lucene-index | plugin/src/test/scala/com/stratio/cassandra/lucene/IndexOptionsTest.scala | Scala | apache-2.0 | 10,070 |
/*
* Copyright 2017 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.ct600.v3
import uk.gov.hmrc.ct.box._
import uk.gov.hmrc.ct.ct600.v3.retriever.AboutThisReturnBoxRetriever
case class B65(value: Option[Boolean]) extends CtBoxIdentifier("Notice of disclosable avoidance schemes") with CtOptionalBoolean with Input with ValidatableBox[AboutThisReturnBoxRetriever] {
override def validate(boxRetriever: AboutThisReturnBoxRetriever): Set[CtValidation] = validateAsMandatory(this)
}
| pncampbell/ct-calculations | src/main/scala/uk/gov/hmrc/ct/ct600/v3/B65.scala | Scala | apache-2.0 | 1,043 |
package redmine4s.api.resource
import org.scalatest.{DiagrammedAssertions, FlatSpec}
class NewsResourceSpec extends FlatSpec with DiagrammedAssertions {
}
| tomingtoming/redmine4s | src/test/scala/redmine4s/api/resource/NewsResourceSpec.scala | Scala | apache-2.0 | 157 |
package com.xored.scalajs.react.comp
import com.xored.scalajs.react._
object HelloWorld extends TypedReactSpec {
case class Props()
case class State()
def getInitialState(self: This) = State()
@scalax
def render(self: This) = {
<h1>Hello World!</h1>
}
}
| Aste88/scala-js-react | scalajs-react-tests/src/test/scala/com/xored/scalajs/react/comp/HelloWorld.scala | Scala | apache-2.0 | 274 |
package keycloak
import io.gatling.core.Predef._
import io.gatling.http.Predef._
import keycloak.OIDCScenarioBuilder._
import org.keycloak.performance.TestConfig
class OIDCLoginAndLogoutSimulation extends CommonSimulation {
override def printSpecificTestParameters {
println(" refreshTokenCount: " + TestConfig.refreshTokenCount)
println(" badLoginAttempts: " + TestConfig.badLoginAttempts)
}
val usersScenario = scenario("Logging-in Users").exec(loginAndLogoutScenario.chainBuilder)
setUp(usersScenario.inject(defaultInjectionProfile).protocols(httpDefault))
.assertions(
global.failedRequests.count.lessThan(TestConfig.maxFailedRequests + 1),
global.responseTime.mean.lessThan(TestConfig.maxMeanReponseTime)
)
}
| keycloak/keycloak | testsuite/performance/tests/src/test/scala/keycloak/OIDCLoginAndLogoutSimulation.scala | Scala | apache-2.0 | 759 |
package controllers
import play.api._
import play.api.mvc._
import play.api.data._
import play.api.data.Forms._
import models._
import views._
/**
* Manage projects related operations.
*/
class Projects extends Controller with Secured {
/**
* Display the dashboard.
*/
def index = IsAuthenticated { username => _ =>
User.findByEmail(username).map { user =>
Ok(
html.dashboard(
Project.findInvolving(username),
Task.findTodoInvolving(username),
user
)
)
}.getOrElse(Forbidden)
}
// -- Projects
/**
* Add a project.
*/
def add = IsAuthenticated { username => implicit request =>
Form("group" -> nonEmptyText).bindFromRequest.fold(
errors => BadRequest,
folder => Ok(
views.html.projects.item(
Project.create(
Project(None, folder, "New project"),
Seq(username)
)
)
)
)
}
/**
* Delete a project.
*/
def delete(project: Long) = IsMemberOf(project) { username => _ =>
Project.delete(project)
Ok
}
/**
* Rename a project.
*/
def rename(project: Long) = IsMemberOf(project) { _ => implicit request =>
Form("name" -> nonEmptyText).bindFromRequest.fold(
errors => BadRequest,
newName => {
Project.rename(project, newName)
Ok(newName)
}
)
}
// -- Project groups
/**
* Add a new project group.
*/
def addGroup = IsAuthenticated { _ => _ =>
Ok(html.projects.group("New group"))
}
/**
* Delete a project group.
*/
def deleteGroup(folder: String) = IsAuthenticated { _ => _ =>
Project.deleteInFolder(folder)
Ok
}
/**
* Rename a project group.
*/
def renameGroup(folder: String) = IsAuthenticated { _ => implicit request =>
Form("name" -> nonEmptyText).bindFromRequest.fold(
errors => BadRequest,
newName => { Project.renameFolder(folder, newName); Ok(newName) }
)
}
// -- Members
/**
* Add a project member.
*/
def addUser(project: Long) = IsMemberOf(project) { _ => implicit request =>
Form("user" -> nonEmptyText).bindFromRequest.fold(
errors => BadRequest,
user => { Project.addMember(project, user); Ok }
)
}
/**
* Remove a project member.
*/
def removeUser(project: Long) = IsMemberOf(project) { _ => implicit request =>
Form("user" -> nonEmptyText).bindFromRequest.fold(
errors => BadRequest,
user => { Project.removeMember(project, user); Ok }
)
}
}
| scoverage/scoverage-maven-samples | playframework/singlemodule/zentasks/zentasks-scala-2.10/app/controllers/Projects.scala | Scala | apache-2.0 | 2,556 |
package org.scalatra
package scalate
import java.io.PrintWriter
import javax.servlet.http.{ HttpServletRequest, HttpServletResponse }
import org.fusesource.scalate.{ Binding, RenderContext }
import org.scalatra.i18n.{ I18nSupport, Messages }
trait ScalateI18nSupport extends ScalateSupport with I18nSupport {
/*
* Binding done here seems to work all the time.
*
* If it were placed in createRenderContext, it wouldn't work for "view" templates
* on first access. However, on subsequent accesses, it worked fine.
*/
before() {
templateEngine.bindings ::= Binding("messages", classOf[Messages].getName, true, isImplicit = true)
}
/**
* Added "messages" into the template context so it can be accessed like:
* #{messages("hello")}
*/
override protected def createRenderContext(out: PrintWriter)(implicit request: HttpServletRequest, response: HttpServletResponse): RenderContext = {
val context = super.createRenderContext(out)
context.attributes("messages") = messages(request)
context
}
}
| lightvector/scalatra | scalate/src/main/scala/org/scalatra/scalate/ScalateI18nSupport.scala | Scala | bsd-2-clause | 1,048 |
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
package org.deeplearning4j.scalnet.layers.core
import org.nd4j.linalg.lossfunctions.LossFunctions.LossFunction
/**
* Trait for output layers in DL4J neural networks and computational graphs.
*
* @author David Kale
*/
case class Output(isOutput: Boolean, lossFunction: LossFunction)
| deeplearning4j/deeplearning4j | scalnet/src/main/scala/org/deeplearning4j/scalnet/layers/core/Output.scala | Scala | apache-2.0 | 1,067 |
package com.novus.salat.examples
import salat._
import salat.annotations._
import salat.dao._
import com.mongodb.casbah.Imports._
import org.json4s._
import org.bson.types._
/* https://github.com/salat/salat/wiki/CustomContext */
import com.novus.salat.examples.globals._
/**
* Case class with a JValue field.
* https://groups.google.com/forum/#!topic/scala-salat/BM3GpdjlFRE
*/
case class Something(_id: ObjectId = new ObjectId, name: String, jval: JString)
object Something {
def apply(_id: ObjectId, name: String, jval: List[String]) = {
if(jval.size != 1) sys.error(s"Unsupported JValue: $jval")
new Something(_id, name, JString(jval(0)))
}
}
@Salat
object SomethingDAO extends SalatDAO[Something, ObjectId](collection = MongoClient()("test")("somethings"))
case class Location(x: Double, y: Double)
case class Venue(@Key("_id") id: Int,
// location: Tuple2[Double, Double], // Salat doesn't support Tuples
location: Location,
name: String)
object VenueDAO extends SalatDAO[Venue, Int](collection = MongoClient()("ec")("venue"))
/** Exploring limitations */
case class EitherHolder(either: Either[String, Int])
/** Map of lists...not supported in 1.9.x */
case class NestedCollHolder(lists: Map[String, List[String]] = Map.empty)
/** Not supported. You should just make that maybeList a plain old List[String] with default value Nil. */
case class OptionalColl(_id: ObjectId = new ObjectId, maybeList: Option[List[String]])
case class DoublesHolder(values: List[Double])
case class OptionalDoublesHolder(values: List[Option[Double]])
class IntHolder(val i: Int) extends AnyVal
case class IntHolderHolder(i: IntHolder)
case class Weird(profile: String, indexingMappings: List[String])
@Salat
object MaybeIntDAO extends SalatDAO[MaybeIntHolder, ObjectId](collection = MongoClient()("test")("numbers"))
case class MaybeIntHolder(_id: ObjectId, i: Option[Int], n: Int, data: Map[String, Int])
// Issue #154 Reproduction attempt
@Salat abstract class AbsClass(name: String)
case object ExampleClass extends AbsClass("Example")
@Salat abstract class TestExample(val abs: AbsClass, val maybeNum: Option[Int])
case class ConcreteTestExample(num: Int) extends TestExample(maybeNum = Option(num), abs = ExampleClass)
case class ContainerClass(example: TestExample, debug: String)
@Salat trait Color
case object Red extends Color
case object Yellow extends Color
case object Blue extends Color
case class Pallet(paint: List[Color])
/**
* All the examples.
* TODO Split these out into distict classes.
*/
object SalatExamples {
import org.slf4j._
val log = LoggerFactory.getLogger(this.getClass)
import scala.util.control.NonFatal
def tryAndLogErrors(fn: () => Unit) = try {
fn()
} catch {
case NonFatal(ex) => log.error("An error occurred:", ex)
}
def main(args: Array[String]) {
println("Running all examples...\\n\\n\\n")
tryAndLogErrors(casbahInsert)
tryAndLogErrors(daoExample)
tryAndLogErrors(floatNumberQueryExample)
tryAndLogErrors(nestedCollections)
tryAndLogErrors(optionalCollection)
tryAndLogErrors(listOfDoubles)
tryAndLogErrors(listOfOptionalDoubles)
tryAndLogErrors(listOfNulls)
tryAndLogErrors(anyValHolder)
tryAndLogErrors(weird)
tryAndLogErrors(badData)
tryAndLogErrors(eitherHolder)
println("\\n\\n...Done")
}
def casbahInsert() {
import com.mongodb.casbah.Imports._
val coll = MongoClient()("test")("somethings")
val value = JString("foo")
val doc = MongoDBObject("jstring" -> value)
coll.insert(doc)
// This works because Casbah converts the case class as follows (see ScalaProductSerializer)
import scala.collection.JavaConversions._
val list: java.util.List[Any] = value.productIterator.toList
println(s"Just inserted the following document: $list")
}
def daoExample() {
println("==== SalatDAO EXAMPLE ====")
val sin = Something(name = "abc", jval = JString("123"))
println(s"New Something instance: $sin")
// Unsupported JSON transformation for class='org.json4s.JsonAST $JString', value='JString(123)'
// println(grater[Something].toPrettyJSON(sin))
println(s"Saving Something instance to $SomethingDAO...")
val id = SomethingDAO.insert(sin)
println(s"...saved with _id = $id")
println("Finding Something instance by ID...")
val sout = id.flatMap(SomethingDAO.findOneById)
println(s"Got Something from database: $sout")
// Same error as above
val json = sout.map(grater[Something].toPrettyJSON)
println(json.getOrElse("<Not Found>"))
}
def floatNumberQueryExample() {
val venue = Venue(1, Location(1.0, 1.0), "NYC")
VenueDAO.save(venue)
println(s"Saved: $venue")
val found = VenueDAO.findOne(MongoDBObject("location.x" -> 1.0, "location.y" -> 1.0))
println(s"Found: $found")
}
def nestedCollections() {
val lists = NestedCollHolder(lists = Map(
"foo" -> List("a","b","c"),
"bar" -> List("d","e","f")))
val json = grater[NestedCollHolder].toPrettyJSON(lists)
println(json)
val fromJson = grater[NestedCollHolder].fromJSON(json)
println(fromJson)
}
def optionalCollection() {
// this actually works...
val obj = OptionalColl(maybeList = None)
val json = grater[OptionalColl].toPrettyJSON(obj)
println(json)
val fromJson = grater[OptionalColl].fromJSON(json)
println(fromJson)
// This...not so much
// https://github.com/salat/salat/wiki/SupportedTypes
val obj2 = OptionalColl(maybeList = Some(List("a","b","c")))
println(obj2)
val json2 = grater[OptionalColl].toPrettyJSON(obj2)
println(json2)
val fromJson2 = grater[OptionalColl].fromJSON(json2)
println(fromJson2)
}
def listOfDoubles() {
val jsonWithDoubles = """{"values":[1,2,3]}"""
val fromJson = grater[DoublesHolder].fromJSON(jsonWithDoubles)
println(fromJson)
}
def listOfOptionalDoubles() {
val jsonWithDoubles = """{"values":[1,2,3]}"""
val fromJson = grater[OptionalDoublesHolder].fromJSON(jsonWithDoubles)
println(fromJson)
val head: Option[Double] = fromJson.values.head
println("First element = " + head)
}
def listOfNulls() {
val jsonWithNulls = """{"values":[null, null, null]}"""
val fromJson = grater[DoublesHolder].fromJSON(jsonWithNulls)
println(fromJson)
}
def anyValHolder() {
val intHolder = new IntHolder(1)
val model = IntHolderHolder(intHolder)
val json = grater[IntHolderHolder].toCompactJSON(model)
println(json)
}
/** Attempt to reproduce issue #19 */
def weird() {
println("**** Issue #19 reproduction attempt")
val w = Weird("profile1", List("a","b","c"))
val json = grater[Weird].toCompactJSON(w)
println(w)
}
/**
* Reproduction of Salat Issue #148.
* Bad data in the db survives past de-serialization
* and doesn't throw until you attempt to operate on it
* (it's "booby-trapped").
*/
def badData() = {
import com.mongodb.casbah.Imports._
val coll = MongoClient()("test")("numbers")
try {
// Insert some bad data...MaybeIntHolder shouldn't have doubles in the db...
println("Saving a MaybeIntHolder having value of 2.01...")
val doc = MongoDBObject("i" -> 2.01, "n" -> 5.01, "data" -> MongoDBObject("x" -> 1.01))
val wr = coll.insert(doc)
println(s"$wr")
val cursor = MaybeIntDAO.find(MongoDBObject())
if (cursor.hasNext) {
val holder = cursor.next
println(s"${holder}")
println("Accessing field 'i' of object (which is an Option[Int]):")
println(s"i: Option[Int] = ${holder.i}")
println("Accessing field 'n' of object (which is an Int):")
println(s"n: Int = ${holder.n}")
println("Accessing field 'data' of object (which is Map[String, Int])")
println(s"data: Map[String, Int] = ${holder.data}")
println("Accessing field 'n' of object (which is an Int):")
println(s"i: Option[Int] = ${holder.n}")
println("...Now for some math...")
// Suprise! Salat will narrow the double value that we stuffed into Int type locations
// Prints out Result = 6
println("""Attempting holder.n + 1""")
println(s"holder.n + 1 = ${holder.n + 1}")
// Prints out "Result = 2"
println("""Attempting holder.data("x") + 1""")
println(s"""holder.data("x") + 1 = ${holder.data("x") + 1}""")
// The following line throws a ClassCastException
// because i holds a List[Double](???) instead of an Int
// (due to shenanigans with the Mongo collection, above)
println("Attempting holder.i + 1")
val output = holder.i.map(_ + 1)
println(s"Result: $output")
// Note that this error could also occur with mongo
// lists that contain mixed data types, when the
// case class declares a list of a specific type.
// For example List[Int] but the mongo document
// contains ["a",2,"c",3, ObjectId]
} else {
println("nothing found???")
}
} catch {
case e: Throwable =>
println(s"oops...an error occurred. Details to follow: $e")
throw e
} finally {
println("Clearing test collection 'numbers'")
val result = coll.remove(MongoDBObject())
println(s"$result")
}
}
def eitherHolder() = {
val obj = EitherHolder(Left("data"))
val dbo = grater[EitherHolder].asDBObject(obj)
println(s"serialized $obj to $dbo for storage in mongo")
val json = grater[EitherHolder].toCompactJSON(obj)
// We don't get this far. Exception attempting to convert Either to JSON
println(s"serialized $obj to $json")
val fromJSON = grater[EitherHolder].fromJSON(json)
println(s"deserialized from json: $fromJSON")
}
def abstractClasses() {
val ex = ConcreteTestExample(2)
println(s"scala data: $ex")
val dbo = grater[TestExample].asDBObject(ex)
println(s"asDBObject (using TestExample spec): $dbo")
val obj = grater[ConcreteTestExample].asObject(dbo)
println(s"back to scala (using ConcreteTestExample spec): $dbo")
println("Now do the same, but with ConreteTestExample as the field of an object...")
val container = ContainerClass(debug = "test", example = ConcreteTestExample(2))
println(s"$container")
val conDbo = grater[ContainerClass].asDBObject(container)
println(s"asDBObject (using ContainerClass spec): $conDbo")
println("...attempt to deserialize this...")
val conObj = grater[ContainerClass].asObject(conDbo)
println(s"success: $conObj")
}
def caseObjectOverride() = {
ctx.registerCaseObjectOverride[Color, Red.type]("red")
ctx.registerCaseObjectOverride[Color, Yellow.type]("yellow")
ctx.registerCaseObjectOverride[Color, Blue.type]("blue")
val pallet = Pallet(List(Red, Yellow, Blue))
val json = grater[Pallet].toPrettyJSON(pallet)
println(s"colors: $json")
// TIL: toJSONArray doesn't support case object overrides
println(s"\\ntoPrettyJSONArray:")
println(grater[Color].toPrettyJSONArray(List(Red, Yellow, Blue)))
}
}
| noahlz/salat-examples | src/main/scala/com/novus/salat/examples/SalatExamples.scala | Scala | mit | 11,218 |
package com.ecfront.common
import java.util.TimeZone
import com.fasterxml.jackson.core.JsonParser
import com.fasterxml.jackson.databind.node.{ArrayNode, ObjectNode}
import com.fasterxml.jackson.databind.{DeserializationFeature, JsonNode, ObjectMapper}
import com.fasterxml.jackson.module.scala.DefaultScalaModule
import com.fasterxml.jackson.module.scala.experimental.ScalaObjectMapper
import com.typesafe.scalalogging.slf4j.LazyLogging
/**
* Scala版本的Json辅助类<br/>
* 使用<i>jackson-module-scala</i>封装
*/
object JsonHelper extends LazyLogging {
private val mapper = new ObjectMapper() with ScalaObjectMapper
mapper.registerModule(DefaultScalaModule)
mapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false)
mapper.configure(JsonParser.Feature.ALLOW_COMMENTS, true)
mapper.configure(JsonParser.Feature.ALLOW_UNQUOTED_FIELD_NAMES, true)
def setTimeZone(tz: TimeZone): Unit = {
mapper.setTimeZone(tz)
}
/**
* object 转 json字符串
*
* @param obj object
* @return json字符串
*/
def toJsonString(obj: Any): String = {
mapper.writeValueAsString(obj)
}
/**
* object 转 json
*
* @param obj object
* @return json
*/
def toJson(obj: Any): JsonNode = {
obj match {
case o: String => mapper.readTree(o)
case _ => mapper.valueToTree(obj)
}
}
/**
* json或string 转 object
*
* @param obj json或string
* @return object
*/
def toObject[E](obj: Any, clazz: Class[E]): E = {
try {
obj match {
case o: String =>
clazz match {
case c if c == classOf[String] =>
o.asInstanceOf[E]
case c if c == classOf[Int] =>
o.toInt.asInstanceOf[E]
case c if c == classOf[Long] =>
o.toLong.asInstanceOf[E]
case c if c == classOf[Double] =>
o.toDouble.asInstanceOf[E]
case c if c == classOf[Float] =>
o.toFloat.asInstanceOf[E]
case c if c == classOf[Boolean] =>
o.toBoolean.asInstanceOf[E]
case c if c == classOf[Byte] =>
o.toByte.asInstanceOf[E]
case c if c == classOf[Short] =>
o.toShort.asInstanceOf[E]
case c if c == classOf[Void] =>
null.asInstanceOf[E]
case _ =>
mapper.readValue[E](o, clazz)
}
case o: JsonNode => mapper.readValue(o.toString, clazz)
case _ => mapper.readValue(mapper.writeValueAsString(obj), clazz)
}
} catch {
case e: Throwable =>
logger.error(s"Parsing to [${clazz.getName}] error , source is :${obj.toString}")
throw e
}
}
/**
* json或string 转 generic object
*/
def toObject[E](obj: Any)(implicit m: Manifest[E]): E = {
try {
obj match {
case o: String => mapper.readValue[E](o)
case o: JsonNode => mapper.readValue[E](o.toString)
case _ => mapper.readValue[E](mapper.writeValueAsString(obj))
}
} catch {
case e: Throwable =>
logger.error(s"Parsing to [${m.toString()}] error , source is :${obj.toString}")
throw e
}
}
def createObjectNode(): ObjectNode = {
mapper.createObjectNode()
}
def createArrayNode(): ArrayNode = {
mapper.createArrayNode()
}
def getMapper: ObjectMapper = {
mapper
}
}
| gudaoxuri/ez-common | src/main/scala/com/ecfront/common/JsonHelper.scala | Scala | apache-2.0 | 3,429 |
/*
* Copyright 2017 Radek Gruchalski
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.gruchalski.utils
import scala.util.Try
/**
* A [[scala.util.Try]] wrapper object providing a version independent [[scala.util.Either]] method.
* @param underlyingTry [[scala.util.Try]]
* @tparam T Try type
* @since 1.4.0
*/
case class TryCompatible[+T](underlyingTry: Try[T]) {
def toVersionCompatibleEither: Either[Throwable, T] = {
if (underlyingTry.isSuccess) Right(underlyingTry.get) else Left(underlyingTry.failed.get)
}
}
| radekg/kafka-cluster-tools | src/main/scala/com/gruchalski/utils/TryCompatible.scala | Scala | apache-2.0 | 1,056 |
/*
* Copyright 2014 Twitter Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.twitter.zipkin.anormdb
import com.twitter.app.App
import com.twitter.zipkin.storage.SpanStore
import com.twitter.zipkin.storage.anormdb.{AnormSpanStore, SpanStoreDB}
trait AnormDBSpanStoreFactory { self: App =>
val anormDB = flag("zipkin.storage.anormdb.db", "sqlite::memory:", "JDBC location URL for the AnormDB")
val anormInstall = flag("zipkin.storage.anormdb.install", true, "Create the tables")
def newAnormSpanStore(): SpanStore = {
val db = SpanStoreDB(anormDB())
val conn = if (anormInstall()) Some(db.install()) else None
new AnormSpanStore(db, conn)
}
}
| chang2394/zipkin | zipkin-anormdb/src/main/scala/com/twitter/zipkin/anormdb/AnormDBSpanStoreFactory.scala | Scala | apache-2.0 | 1,196 |
package io.github.shogowada.scala.jsonrpc.serializers
trait JSONSerializer {
def serialize[T](value: T): Option[String] = {
throw new UnsupportedOperationException("This default implementation is here only to allow macros to be defined on child classes.")
}
def deserialize[T](json: String): Option[T] = {
throw new UnsupportedOperationException("This default implementation is here only to allow macros to be defined on child classes.")
}
}
| shogowada/scala-json-rpc | json-serializer/shared/src/main/scala/io/github/shogowada/scala/jsonrpc/serializers/JSONSerializer.scala | Scala | mit | 460 |
package views.html.blog
import lila.api.Context
import lila.app.templating.Environment._
import lila.app.ui.ScalatagsTemplate._
import lila.blog.MiniPost
import lila.common.paginator.Paginator
import controllers.routes
object index {
def apply(
pager: Paginator[io.prismic.Document]
)(implicit ctx: Context, prismic: lila.blog.BlogApi.Context) = {
val primaryPost = (pager.currentPage == 1).??(pager.currentPageResults.headOption)
views.html.base.layout(
title = "Blog",
moreCss = cssTag("blog"),
csp = bits.csp,
moreJs = infiniteScrollTag
)(
main(cls := "page-menu")(
bits.menu(none, "lichess".some),
div(cls := "blog index page-menu__content page-small box")(
div(cls := "box__top")(
h1("Lichess Official Blog"),
a(cls := "atom", st.title := "Atom RSS feed", href := routes.Blog.atom, dataIcon := "")
),
primaryPost map { post =>
frag(
latestPost(post),
h2("Previous blog posts")
)
},
div(cls := "blog-cards list infinite-scroll")(
pager.currentPageResults flatMap MiniPost.fromDocument("blog", "wide") map { post =>
primaryPost.fold(true)(_.id != post.id) option bits.postCard(post, "paginated".some, h3)
},
pagerNext(pager, np => routes.Blog.index(np).url)
)
)
)
)
}
def byYear(year: Int, posts: List[MiniPost])(implicit ctx: Context) =
views.html.base.layout(
title = s"Lichess blog posts from $year",
moreCss = cssTag("blog"),
csp = bits.csp
)(
main(cls := "page-menu")(
bits.menu(year.some, none),
div(cls := "page-menu__content box")(
div(cls := "box__top")(h1(s"Lichess blog posts from $year")),
st.section(
div(cls := "blog-cards")(posts map { bits.postCard(_) })
)
)
)
)
private def latestPost(
doc: io.prismic.Document
)(implicit ctx: Context, prismic: lila.blog.BlogApi.Context) =
st.article(
doc.getText("blog.title").map { title =>
h2(a(href := routes.Blog.show(doc.id, doc.slug, prismic.maybeRef))(title))
},
bits.metas(doc),
div(cls := "parts")(
doc.getImage("blog.image", "main").map { img =>
div(cls := "illustration")(
a(href := routes.Blog.show(doc.id, doc.slug, ref = prismic.maybeRef))(st.img(src := img.url))
)
},
div(cls := "body")(
doc.getStructuredText("blog.body").map { body =>
raw(lila.blog.BlogApi.extract(body))
},
p(cls := "more")(
a(
cls := "button",
href := routes.Blog.show(doc.id, doc.slug, ref = prismic.maybeRef),
dataIcon := ""
)(
" Continue reading this post"
)
)
)
)
)
}
| luanlv/lila | app/views/blog/index.scala | Scala | mit | 2,975 |
/*
* Copyright (c) 2021 Couchbase, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.couchbase.spark.query
import com.couchbase.client.core.error.DmlFailureException
import com.couchbase.client.scala.codec.JsonDeserializer.Passthrough
import com.couchbase.client.scala.json.JsonObject
import com.couchbase.client.scala.query.{QueryScanConsistency, QueryOptions => CouchbaseQueryOptions}
import com.couchbase.spark.DefaultConstants
import com.couchbase.spark.config.{CouchbaseConfig, CouchbaseConnection}
import org.apache.spark.api.java.function.ForeachPartitionFunction
import org.apache.spark.internal.Logging
import org.apache.spark.sql.connector.catalog.{Table, TableProvider}
import org.apache.spark.sql.connector.expressions.Transform
import org.apache.spark.sql.sources.{BaseRelation, CreatableRelationProvider, DataSourceRegister}
import org.apache.spark.sql.types.StructType
import org.apache.spark.sql.util.CaseInsensitiveStringMap
import org.apache.spark.sql.{DataFrame, Encoders, SQLContext, SaveMode, SparkSession}
import scala.collection.JavaConverters._
import java.util
import scala.concurrent.duration.Duration
class QueryTableProvider extends TableProvider with Logging with DataSourceRegister with CreatableRelationProvider {
override def shortName(): String = "couchbase.query"
private lazy val sparkSession = SparkSession.active
private lazy val conf = CouchbaseConfig(sparkSession.sparkContext.getConf)
/**
* InferSchema is always called if the user does not pass in an explicit schema.
*
* @param options the options provided from the user.
* @return the inferred schema, if possible.
*/
override def inferSchema(options: CaseInsensitiveStringMap): StructType = {
if (isWrite) {
logDebug("Not inferring schema because called from the DataFrameWriter")
return null
}
val idFieldName = Option(options.get(QueryOptions.IdFieldName)).getOrElse(DefaultConstants.DefaultIdFieldName)
val whereClause = Option(options.get(QueryOptions.Filter)).map(p => s" WHERE $p").getOrElse("")
val bucketName = conf.implicitBucketNameOr(options.get(QueryOptions.Bucket))
val inferLimit = Option(options.get(QueryOptions.InferLimit)).getOrElse(DefaultConstants.DefaultInferLimit)
val scanConsistency = Option(options.get(QueryOptions.ScanConsistency))
.getOrElse(DefaultConstants.DefaultQueryScanConsistency)
val opts = CouchbaseQueryOptions()
scanConsistency match {
case QueryOptions.NotBoundedScanConsistency => opts.scanConsistency(QueryScanConsistency.NotBounded)
case QueryOptions.RequestPlusScanConsistency => opts.scanConsistency(QueryScanConsistency.RequestPlus())
case v => throw new IllegalArgumentException("Unknown scanConsistency of " + v)
}
val scopeName = conf.implicitScopeNameOr(options.get(QueryOptions.Scope)).getOrElse(DefaultConstants.DefaultScopeName)
val collectionName = conf.implicitCollectionName(options.get(QueryOptions.Collection)).getOrElse(DefaultConstants.DefaultCollectionName)
val result = if (scopeName.equals(DefaultConstants.DefaultScopeName) && collectionName.equals(DefaultConstants.DefaultCollectionName)) {
val statement = s"SELECT META().id as $idFieldName, `$bucketName`.* FROM `$bucketName`$whereClause LIMIT $inferLimit"
logDebug(s"Inferring schema from bucket $bucketName with query '$statement'")
CouchbaseConnection().cluster(conf).query(statement, opts)
} else {
val statement = s"SELECT META().id as $idFieldName, `$collectionName`.* FROM `$collectionName`$whereClause LIMIT $inferLimit"
logDebug(s"Inferring schema from bucket/scope/collection $bucketName/$scopeName/$collectionName with query '$statement'")
CouchbaseConnection().cluster(conf).bucket(bucketName).scope(scopeName).query(statement, opts)
}
val rows = result.flatMap(result => result.rowsAs[String](Passthrough.StringConvert)).get
val ds = sparkSession.sqlContext.createDataset(rows)(Encoders.STRING)
val schema = sparkSession.sqlContext.read.json(ds).schema
logDebug(s"Inferred schema is $schema")
schema
}
/**
* This is a hack because even from the DataFrameWriter the infer schema is called - even though
* we accept any schema.
*
* So check the stack where we are coming from and it allows to bail out early since we don't care
* about the schema on a write op at all.
*
* @return true if we are in a write op, this is a hack.
*/
def isWrite: Boolean =
Thread.currentThread().getStackTrace.exists(_.getClassName.contains("DataFrameWriter"))
def readConfig(properties: util.Map[String, String]): QueryReadConfig = {
QueryReadConfig(
conf.implicitBucketNameOr(properties.get(QueryOptions.Bucket)),
conf.implicitScopeNameOr(properties.get(QueryOptions.Scope)),
conf.implicitCollectionName(properties.get(QueryOptions.Collection)),
Option(properties.get(QueryOptions.IdFieldName)).getOrElse(DefaultConstants.DefaultIdFieldName),
Option(properties.get(QueryOptions.Filter)),
Option(properties.get(QueryOptions.ScanConsistency)).getOrElse(DefaultConstants.DefaultQueryScanConsistency),
Option(properties.get(QueryOptions.Timeout)),
Option(properties.get(QueryOptions.PushDownAggregate)).getOrElse("true").toBoolean
)
}
def writeConfig(properties: util.Map[String, String]): QueryWriteConfig = {
QueryWriteConfig(
conf.implicitBucketNameOr(properties.get(QueryOptions.Bucket)),
conf.implicitScopeNameOr(properties.get(QueryOptions.Scope)),
conf.implicitCollectionName(properties.get(QueryOptions.Collection)),
Option(properties.get(QueryOptions.IdFieldName)).getOrElse(DefaultConstants.DefaultIdFieldName),
Option(properties.get(QueryOptions.Timeout))
)
}
/**
* Returns the "Table", either with an inferred schema or a user provide schema.
*
* @param schema the schema, either inferred or provided by the user.
* @param partitioning partitioning information.
* @param properties the properties for customization
* @return the table instance which performs the actual work inside it.
*/
override def getTable(schema: StructType, partitioning: Array[Transform], properties: util.Map[String, String]): Table =
new QueryTable(schema, partitioning, properties, readConfig(properties))
/**
* We allow a user passing in a custom schema.
*/
override def supportsExternalMetadata(): Boolean = true
override def createRelation(ctx: SQLContext, mode: SaveMode, properties: Map[String, String], data: DataFrame): BaseRelation = {
val writeConfig = this.writeConfig(properties.asJava)
val couchbaseConfig = CouchbaseConfig(ctx.sparkContext.getConf)
data.toJSON.foreachPartition(new RelationPartitionWriter(writeConfig, couchbaseConfig, mode))
new BaseRelation {
override def sqlContext: SQLContext = ctx
override def schema: StructType = data.schema
}
}
}
class RelationPartitionWriter(writeConfig: QueryWriteConfig, couchbaseConfig: CouchbaseConfig, mode: SaveMode)
extends ForeachPartitionFunction[String]
with Logging {
override def call(t: util.Iterator[String]): Unit = {
val scopeName = writeConfig.scope.getOrElse(DefaultConstants.DefaultScopeName)
val collectionName = writeConfig.collection.getOrElse(DefaultConstants.DefaultCollectionName)
val values = t.asScala.map(encoded => {
val decoded = JsonObject.fromJson(encoded)
val id = decoded.str(writeConfig.idFieldName)
decoded.remove(writeConfig.idFieldName)
s"VALUES ('$id', ${decoded.toString})"
}).mkString(", ")
val prefix = mode match {
case SaveMode.ErrorIfExists | SaveMode.Ignore => "INSERT"
case SaveMode.Overwrite => "UPSERT"
case SaveMode.Append => throw new IllegalArgumentException("SaveMode.Append is not support with couchbase.query " +
"DataFrame on write. Please use ErrorIfExists, Ignore or Overwrite instead.")
}
val statement = if (scopeName.equals(DefaultConstants.DefaultScopeName) &&
collectionName.equals(DefaultConstants.DefaultCollectionName)) {
s"$prefix INTO `${writeConfig.bucket}` (KEY, VALUE) $values"
} else {
s"$prefix INTO `$collectionName` (KEY, VALUE) $values"
}
logDebug("Building and running N1QL query " + statement)
val opts = buildOptions()
try {
val result = if (scopeName.equals(DefaultConstants.DefaultScopeName) && collectionName.equals(DefaultConstants.DefaultCollectionName)) {
CouchbaseConnection().cluster(couchbaseConfig).query(statement, opts).get
} else {
CouchbaseConnection().cluster(couchbaseConfig).bucket(writeConfig.bucket).scope(scopeName).query(statement, opts).get
}
logDebug("Completed query in: " + result.metaData.metrics.get)
} catch {
case e: DmlFailureException =>
if (mode == SaveMode.Ignore) {
logDebug("Failed to run query, but ignoring because of SaveMode.Ignore: ", e)
} else {
throw e
}
}
}
def buildOptions(): CouchbaseQueryOptions = {
var opts = CouchbaseQueryOptions().metrics(true)
writeConfig.timeout.foreach(t => opts = opts.timeout(Duration(t)))
opts
}
}
| couchbaselabs/couchbase-spark-connector | src/main/scala/com/couchbase/spark/query/QueryTableProvider.scala | Scala | apache-2.0 | 9,786 |
/******************************************************************************
* Copyright © 2016 Maxim Karpov *
* *
* Licensed under the Apache License, Version 2.0 (the "License"); *
* you may not use this file except in compliance with the License. *
* You may obtain a copy of the License at *
* *
* http://www.apache.org/licenses/LICENSE-2.0 *
* *
* Unless required by applicable law or agreed to in writing, software *
* distributed under the License is distributed on an "AS IS" BASIS, *
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. *
* See the License for the specific language governing permissions and *
* limitations under the License. *
******************************************************************************/
package ru.makkarpov.scalingua.plural
/**
* Represents plural suffixes that will be understood by macros. If tree typechecks to either `Suffix.S` or
* `Suffix.ES`, it's considered as plural suffix. No instances of `Suffix.*` exists.
*/
object Suffix {
sealed trait Generic extends Suffix
sealed trait S extends Suffix
sealed trait ES extends Suffix
case class GenericSuffixExtension(s: String) extends AnyVal {
def &>(plur: String): Suffix.Generic =
throw new IllegalArgumentException("&> should not remain after macro expansion")
}
def s: S = throw new IllegalArgumentException(".s or .es should not remain after macro expansion")
def es: ES = throw new IllegalArgumentException(".s or .es should not remain after macro expansion")
}
sealed trait Suffix | makkarpov/scalingua | scalingua/shared/src/main/scala/ru/makkarpov/scalingua/plural/Suffix.scala | Scala | apache-2.0 | 1,991 |
class Order[t](less:(t,t) => Boolean,equal:(t,t) => Boolean) {}
trait Map[A, B] extends scala.collection.Map[A, B] {
val factory:MapFactory[A]
def -(key1: A, key2: A, keys: A*): Map[A, B] = null
def -(key: A): Map[A, B] = null
}
abstract class MapFactory[A] {
def Empty[B]:Map[A,B];
}
class TreeMapFactory[KEY](newOrder:Order[KEY]) extends MapFactory[KEY] {
val order = newOrder;
def Empty[V] = new TreeMap[KEY,V](new TreeMapFactory[KEY](order));
}
class Tree[KEY,Entry](order:Order[KEY]) {
def size =0;
}
class TreeMap[KEY,VALUE](_factory:TreeMapFactory[KEY]) extends Tree[KEY,Tuple2[KEY,VALUE]](_factory.order) with scala.collection.DefaultMap[KEY, VALUE] with Map[KEY, VALUE] {
val factory = _factory
val order = _factory.order;
def this(newOrder:Order[KEY]) = this(new TreeMapFactory[KEY](newOrder));
def get(key:KEY) = null;
def iterator:Iterator[Tuple2[KEY,VALUE]] = null;
override def size = super[Tree].size
}
| som-snytt/dotty | tests/pos/t247.scala | Scala | apache-2.0 | 948 |
/* *\\
** Squants **
** **
** Scala Quantities and Units of Measure Library and DSL **
** (c) 2013-2015, Gary Keorkunian **
** **
\\* */
package squants.radio
import squants._
import squants.energy.Watts
import squants.space.{ Meters, SquaredRadians }
/**
* @author garyKeorkunian
* @since 0.1
*
* @param value Double
*/
final class SpectralIntensity private (val value: Double, val unit: SpectralIntensityUnit)
extends Quantity[SpectralIntensity] {
def dimension = SpectralIntensity
def *(that: Length): RadiantIntensity = WattsPerSteradian(toWattsPerSteradianPerMeter * that.toMeters)
def /(that: RadiantIntensity): Length = Meters(toWattsPerSteradianPerMeter / that.toWattsPerSteradian)
def toWattsPerSteradianPerMeter = to(WattsPerSteradianPerMeter)
}
object SpectralIntensity extends Dimension[SpectralIntensity] {
private[radio] def apply[A](n: A, unit: SpectralIntensityUnit)(implicit num: Numeric[A]) = new SpectralIntensity(num.toDouble(n), unit)
def apply = parse _
def name = "SpectralIntensity"
def primaryUnit = WattsPerSteradianPerMeter
def siUnit = WattsPerSteradianPerMeter
def units = Set(WattsPerSteradianPerMeter)
}
trait SpectralIntensityUnit extends UnitOfMeasure[SpectralIntensity] {
def apply[A](n: A)(implicit num: Numeric[A]) = SpectralIntensity(n, this)
}
object WattsPerSteradianPerMeter extends SpectralIntensityUnit with PrimaryUnit with SiUnit {
val symbol = Watts.symbol + "/" + SquaredRadians.symbol + "/" + Meters.symbol
}
object SpectralIntensityConversions {
lazy val wattPerSteradianPerMeter = WattsPerSteradianPerMeter(1)
implicit class SpectralIntensityConversions[A](n: A)(implicit num: Numeric[A]) {
def wattsPerSteradianPerMeter = WattsPerSteradianPerMeter(n)
}
implicit object SpectralIntensityNumeric extends AbstractQuantityNumeric[SpectralIntensity](SpectralIntensity.primaryUnit)
}
| rmihael/squants | shared/src/main/scala/squants/radio/SpectralIntensity.scala | Scala | apache-2.0 | 2,277 |
/*
* Copyright (C) 2009-2017 Lightbend Inc. <https://www.lightbend.com>
*/
package play.api.cache
import java.time.Instant
import javax.inject.Inject
import akka.stream.Materializer
import play.api._
import play.api.http.HeaderNames.{ ETAG, EXPIRES, IF_NONE_MATCH }
import play.api.libs.Codecs
import play.api.libs.streams.Accumulator
import play.api.mvc.Results.NotModified
import play.api.mvc._
import scala.concurrent.Future
import scala.concurrent.duration._
/**
* A helper to add caching to an Action.
*/
class Cached @Inject() (cache: AsyncCacheApi)(implicit materializer: Materializer) {
/**
* Cache an action.
*
* @param key Compute a key from the request header
* @param caching Compute a cache duration from the resource header
*/
def apply(
key: RequestHeader => String,
caching: PartialFunction[ResponseHeader, Duration]): CachedBuilder = {
new CachedBuilder(cache, key, caching)
}
/**
* Cache an action.
*
* @param key Compute a key from the request header
*/
def apply(key: RequestHeader => String): CachedBuilder = {
apply(key, duration = 0)
}
/**
* Cache an action.
*
* @param key Cache key
*/
def apply(key: String): CachedBuilder = {
apply(_ => key, duration = 0)
}
/**
* Cache an action.
*
* @param key Cache key
* @param duration Cache duration (in seconds)
*/
def apply(key: RequestHeader => String, duration: Int): CachedBuilder = {
new CachedBuilder(cache, key, { case (_: ResponseHeader) => Duration(duration, SECONDS) })
}
/**
* A cached instance caching nothing
* Useful for composition
*/
def empty(key: RequestHeader => String): CachedBuilder =
new CachedBuilder(cache, key, PartialFunction.empty)
/**
* Caches everything, forever
*/
def everything(key: RequestHeader => String): CachedBuilder =
empty(key).default(0)
/**
* Caches everything for the specified seconds
*/
def everything(key: RequestHeader => String, duration: Int): CachedBuilder =
empty(key).default(duration)
/**
* Caches the specified status, for the specified number of seconds
*/
def status(key: RequestHeader => String, status: Int, duration: Int): CachedBuilder =
empty(key).includeStatus(status, Duration(duration, SECONDS))
/**
* Caches the specified status forever
*/
def status(key: RequestHeader => String, status: Int): CachedBuilder =
empty(key).includeStatus(status)
}
/**
* Builds an action with caching behavior. Typically created with one of the methods in the `Cached`
* class. Uses both server and client caches:
*
* - Adds an `Expires` header to the response, so clients can cache response content ;
* - Adds an `Etag` header to the response, so clients can cache response content and ask the server for freshness ;
* - Cache the result on the server, so the underlying action is not computed at each call.
*
* @param cache The cache used for caching results
* @param key Compute a key from the request header
* @param caching A callback to get the number of seconds to cache results for
*/
final class CachedBuilder(
cache: AsyncCacheApi,
key: RequestHeader => String,
caching: PartialFunction[ResponseHeader, Duration])(implicit materializer: Materializer) {
/**
* Compose the cache with an action
*/
def apply(action: EssentialAction): EssentialAction = build(action)
/**
* Compose the cache with an action
*/
def build(action: EssentialAction): EssentialAction = EssentialAction { request =>
implicit val ec = materializer.executionContext
val resultKey = key(request)
val etagKey = s"$resultKey-etag"
def parseEtag(etag: String) = {
val Etag = """(?:W/)?("[^"]*")""".r
Etag.findAllMatchIn(etag).map(m => m.group(1)).toList
}
// Check if the client has a version as new as ours
Accumulator.flatten(Future.successful(request.headers.get(IF_NONE_MATCH)).flatMap {
case Some(requestEtag) =>
cache.get[String](etagKey).map {
case Some(etag) if requestEtag == "*" || parseEtag(requestEtag).contains(etag) => Some(Accumulator.done(NotModified))
case _ => None
}
case None => Future.successful(None)
}.flatMap {
case Some(result) =>
// The client has the most recent version
Future.successful(result)
case None =>
// Otherwise try to serve the resource from the cache, if it has not yet expired
cache.get[SerializableResult](resultKey).map { result =>
result collect {
case sr: SerializableResult => Accumulator.done(sr.result)
}
}.map {
case Some(cachedResource) => cachedResource
case None =>
// The resource was not in the cache, so we have to run the underlying action
val accumulatorResult = action(request)
// Add cache information to the response, so clients can cache its content
accumulatorResult.map(handleResult(_, etagKey, resultKey))
}
})
}
/**
* Eternity is one year long. Duration zero means eternity.
*/
private val cachingWithEternity = caching.andThen { duration =>
// FIXME: Surely Duration.Inf is a better marker for eternity than 0?
val zeroDuration: Boolean = duration.neg().equals(duration)
if (zeroDuration) {
Duration(60 * 60 * 24 * 365, SECONDS)
} else {
duration
}
}
private def handleResult(result: Result, etagKey: String, resultKey: String): Result = {
cachingWithEternity.andThen { duration =>
// Format expiration date according to http standard
val expirationDate = http.dateFormat.format(Instant.ofEpochMilli(System.currentTimeMillis() + duration.toMillis))
// Generate a fresh ETAG for it
// Use quoted sha1 hash of expiration date as ETAG
val etag = s""""${Codecs.sha1(expirationDate)}""""
val resultWithHeaders = result.withHeaders(ETAG -> etag, EXPIRES -> expirationDate)
// Cache the new ETAG of the resource
cache.set(etagKey, etag, duration)
// Cache the new Result of the resource
cache.set(resultKey, new SerializableResult(resultWithHeaders), duration)
resultWithHeaders
}.applyOrElse(result.header, (_: ResponseHeader) => result)
}
/**
* Whether this cache should cache the specified response if the status code match
* This method will cache the result forever
*/
def includeStatus(status: Int): CachedBuilder = includeStatus(status, Duration.Zero)
/**
* Whether this cache should cache the specified response if the status code match
* This method will cache the result for duration seconds
*
* @param status the status code to check
* @param duration the number of seconds to cache the result for
*/
def includeStatus(status: Int, duration: Int): CachedBuilder = includeStatus(status, Duration(duration, SECONDS))
/**
* Whether this cache should cache the specified response if the status code match
* This method will cache the result for duration seconds
*
* @param status the status code to check
* @param duration how long should we cache the result for
*/
def includeStatus(status: Int, duration: Duration): CachedBuilder = compose {
case e if e.status == status => {
duration
}
}
/**
* The returned cache will store all responses whatever they may contain
* @param duration how long we should store responses
*/
def default(duration: Duration): CachedBuilder = compose(PartialFunction((_: ResponseHeader) => duration))
/**
* The returned cache will store all responses whatever they may contain
* @param duration the number of seconds we should store responses
*/
def default(duration: Int): CachedBuilder = default(Duration(duration, SECONDS))
/**
* Compose the cache with new caching function
* @param alternative a closure getting the reponseheader and returning the duration
* we should cache for
*/
def compose(alternative: PartialFunction[ResponseHeader, Duration]): CachedBuilder = new CachedBuilder(
cache = cache,
key = key,
caching = caching.orElse(alternative)
)
}
| wsargent/playframework | framework/src/play-cache/src/main/scala/play/api/cache/Cached.scala | Scala | apache-2.0 | 8,215 |
package net.revenj.cache
import java.time.OffsetDateTime
import monix.eval.Task
import monix.reactive.Observable
import monix.reactive.subjects.PublishSubject
import net.revenj.extensibility.SystemState
import net.revenj.patterns.DataChangeNotification.{NotifyWith, Operation}
import net.revenj.patterns._
import scala.collection.concurrent.TrieMap
import scala.concurrent.Future
class EagerDataCache[T <: Identifiable, PK](
val name: String,
repository: Repository[T] with SearchableRepository[T],
dataChanges: DataChangeNotification,
systemState: SystemState,
extractKey: T => PK,
initialValues: scala.collection.Seq[T] = Nil
) extends DataSourceCache[T] with AutoCloseable {
protected val cache = new TrieMap[String, T]()
private var lookup: Map[PK, T] = Map.empty
private var currentVersion = 0
private val versionChangeSubject = PublishSubject[Int]()
private var lastChange = OffsetDateTime.now()
def changes: Observable[Int] = versionChangeSubject.map(identity)
def currentLookup: Map[PK, T] = lookup
systemState.change
.filter(it => it.id == "notification" && it.detail == "started")
.doOnNext(_ => Task.fromFuture(invalidateAll()))
.subscribe()(monix.execution.Scheduler.Implicits.global)
if (initialValues.nonEmpty) {
set(initialValues)
} else {
invalidateAll()
}
private val subscription = dataChanges.notifications
.filter(_.name == name)
.map { n =>
val version = currentVersion
n.operation match {
case Operation.Insert | Operation.Update =>
n match {
case nw: NotifyWith[scala.collection.Seq[T]@unchecked] =>
if (nw.info != null && nw.info.nonEmpty) {
change(nw.info, Nil, version, force = true)
}
case _ =>
//TODO: use context from ctor
implicit val global = scala.concurrent.ExecutionContext.Implicits.global
repository.find(n.uris).foreach { items => change(items, Nil, version, force = false) }
}
case Operation.Change | Operation.Delete =>
change(Nil, n.uris, version, n.isInstanceOf[NotifyWith[_]])
}
}.subscribe()(monix.execution.Scheduler.Implicits.global)
def set(instances: scala.collection.Seq[T]): Unit = change(instances, Nil, currentVersion, force = true)
def remove(uris: scala.collection.Seq[String]): Unit = change(Nil, uris, currentVersion, force = true)
private def change(newInstances: scala.collection.Seq[T], oldUris: scala.collection.Seq[String], oldVersion: Int, force: Boolean): Unit = {
if (newInstances != null && oldUris != null && (newInstances.nonEmpty || oldUris.nonEmpty)) {
val shouldInvalidateAll = if (force || oldVersion == currentVersion) {
val diff = oldUris.diff(newInstances.map(_.URI))
synchronized {
val isInvalid = currentVersion != oldVersion
lastChange = OffsetDateTime.now()
val newVersion = currentVersion + 1
newInstances.foreach(f => cache.put(f.URI, f))
diff.foreach(cache.remove)
lookup = cache.values.map{ it => extractKey(it) -> it }.toMap
currentVersion = newVersion
isInvalid
}
} else {
true
}
if (shouldInvalidateAll) {
invalidateAll()
} else {
versionChangeSubject.synchronized {
versionChangeSubject.onNext(currentVersion)
}
}
}
}
def get(uri: String): Option[T] = if (uri != null) cache.get(uri) else None
private var itemsVersion = -1
private var cachedItems: scala.collection.IndexedSeq[T] = IndexedSeq.empty
def items: scala.collection.IndexedSeq[T] = {
if (currentVersion != itemsVersion) {
synchronized {
val version = currentVersion
if (version != itemsVersion) {
cachedItems = cache.valuesIterator.toIndexedSeq
itemsVersion = version
}
}
}
cachedItems
}
def version: Int = currentVersion
def changedOn: OffsetDateTime = lastChange
override def invalidate(uris: scala.collection.Seq[String]): Future[Unit] = {
if (uris != null && uris.nonEmpty) {
//TODO: use context from ctor
implicit val global = scala.concurrent.ExecutionContext.Implicits.global
val version = currentVersion
repository.find(uris).map { found =>
change(found, uris, version, force = false)
}
} else Future.failed(new RuntimeException("invalid uris provided"))
}
override def invalidateAll(): Future[Unit] = {
//TODO: use context from ctor
implicit val global = scala.concurrent.ExecutionContext.Implicits.global
val version = currentVersion
repository.search().map { found =>
change(found, cache.keys.toIndexedSeq, version, force = false)
}
}
override def find(uri: String): Future[Option[T]] = {
Future.successful(get(uri))
}
override def find(uris: scala.collection.Seq[String]): Future[scala.collection.IndexedSeq[T]] = {
if (uris != null) {
Future.successful(uris.flatMap(get).toIndexedSeq)
} else {
Future.failed(new RuntimeException("invalid uris provided"))
}
}
override def search(specification: Option[Specification[T]], limit: Option[Int], offset: Option[Int]): Future[scala.collection.IndexedSeq[T]] = {
val filtered = if (specification.isDefined) items.filter(specification.get) else items
val skipped = if (offset.isDefined) filtered.drop(offset.get) else filtered
Future.successful(if (limit.isDefined) skipped.take(limit.get) else skipped)
}
override def count(specification: Option[Specification[T]]): Future[Long] = {
Future.successful(if (specification.isDefined) items.count(specification.get).toLong else items.size.toLong)
}
override def exists(specification: Option[Specification[T]]): Future[Boolean] = {
Future.successful(if (specification.isDefined) items.exists(specification.get) else items.nonEmpty)
}
def close(): Unit = {
subscription.cancel()
}
}
| ngs-doo/revenj | scala/revenj-core/src/main/scala/net/revenj/cache/EagerDataCache.scala | Scala | bsd-3-clause | 6,026 |
package ddc
import ddc.Date.{IsLeapYear}
class Date(y: Int, m: Int = 1, d: Int= 1) {
val year: Int = y
val month: Int = m
val day: Int = d
private def feb: Boolean =
day <= (28 + Date.IsLeapYearInt(year))
def IsValidDay: Boolean =
day > 0 && day < 32 && IsValidByMonth
def IsValidByMonth: Boolean = month match {
case 1 | 3 | 5 | 7 | 8 | 10 | 12 => day <= 31
case 4 | 6 | 9 | 11 => day <= 30
case 2 => feb
case _ => false
}
def IsValidYear: Boolean =
year != 0
def IsValid: Boolean =
IsValidDay && IsValidByMonth && IsValidYear
}
object Date {
def IsLeapYear(year: Int) : Boolean = {
(year % 400 == 0) || (year % 4 == 0 && year % 100 != 0)
}
def IsLeapYearInt(year: Int) : Int = {
if (IsLeapYear(year)) 1 else 0
}
}
| 0unit/DiaDoCuringaScala | src/main/scala/ddc/Date.scala | Scala | mit | 795 |
package com.arcusys.valamis.slide.service.export
import java.io.{File, InputStream}
import com.arcusys.valamis.content.model.{Answer, Category, PlainText, Question}
import com.arcusys.valamis.content.service.CategoryService
import com.arcusys.valamis.slide.model._
import com.arcusys.valamis.slide.service.{SlideElementService, SlideService, SlideSetService}
import com.arcusys.valamis.slide.storage.SlideElementPropertyRepository
import com.arcusys.valamis.util.FileSystemUtil
import com.arcusys.valamis.util.export.ImportProcessor
import com.arcusys.valamis.util.serialization.DateTimeSerializer
import org.joda.time.DateTime
import org.json4s.jackson.JsonMethods._
import org.json4s.{DefaultFormats, Formats}
trait SlideSetImporter {
def importItems(stream: InputStream, scopeId: Int): Unit
}
abstract class SlideSetImporterImpl
extends SlideSetExportUtils
with ImportProcessor[ExportFormat]
with SlideSetImporter {
implicit val jsonFormats: Formats = DefaultFormats + DateTimeSerializer
def slideSetService: SlideSetService
def slideService: SlideService
def slideElementService: SlideElementService
def slideElementPropertyRepository: SlideElementPropertyRepository
def categoryService: CategoryService
private def addSlides(slides: Seq[Slide],
oldSlideSet: SlideSet,
createdSlideSet: SlideSet,
slideSetVersion: Option[String],
slidesMapper: scala.collection.mutable.Map[Long, Long],
localPath: String): Unit = {
val firstSlide = slideService.getRootSlide(slides)
firstSlide foreach { slide =>
addSlide(slide, slide.leftSlideId,slide.topSlideId)
slides.find(_.leftSlideId.contains(slide.id)).foreach(addSlidesHelper)
slides.find(_.topSlideId.contains(slide.id)).foreach(addSlidesHelper)
}
def addSlide(prevSlideModel: Slide, leftSlideId: Option[Long], topSlideId: Option[Long]) = {
val createdSlide = slideService.create(
prevSlideModel.copy(
id = 0L,
slideSetId = createdSlideSet.id,
leftSlideId = leftSlideId,
topSlideId = topSlideId)
)
slidesMapper += (prevSlideModel.id -> createdSlide.id)
updateBgImage(prevSlideModel, slideSetVersion, createdSlide, localPath)
}
def addSlidesHelper(slide: Slide): Unit = {
addSlide(
slide,
slide.leftSlideId.flatMap(oldLeftSlideId => slidesMapper.get(oldLeftSlideId)),
slide.topSlideId.flatMap(oldTopSlideId => slidesMapper.get(oldTopSlideId))
)
slides.find(_.leftSlideId.contains(slide.id)).foreach(addSlidesHelper)
slides.find(_.topSlideId.contains(slide.id)).foreach(addSlidesHelper)
}
}
private def updateBgImage(prevSlideModel: Slide,
slideSetVersion: Option[String],
createdSlide: Slide,
localPath: String) = {
prevSlideModel.bgImage.flatMap {image =>
val bg = if (image.contains("/delegate/")) {
image.replaceFirst( """.+file=""", "").replaceAll( """(&date=\d+)?"?\)?(\s+.+)?""", "")
}
else {
image
}
getFromPath(bg, SlideSetHelper.filePathPrefix(prevSlideModel))}.foreach {
case (folderName, rawFileName) =>
val fileName = rawFileName.split(" ").head
val displayMode = SlideSetHelper.getDisplayMode(prevSlideModel.bgImage.get)
val url = addImageToFileService(
createdSlide,
fileName,
localPath + File.separator + getPath(folderName, fileName, slideSetVersion)
) + ' ' + displayMode
slideService.updateBgImage(createdSlide.id, Some(url))
}
}
private def addSlideElements(element: SlideElement,
questions: Seq[(Question, Seq[Answer], Option[Long])],
plaintexts: Seq[(PlainText, Option[Long])],
newSlideId: Long,
slideSetVersion: Option[String],
courseId: Long,
localPath: String,
data: String): Unit = {
val planTextFromQuestions = plaintexts.filter {
_._1.id match {
case Some(id) if id.toString == element.content => true
case _ => false
}
}
val slideElement = if (planTextFromQuestions.nonEmpty && (element.slideEntityType == SlideEntityType.Question))
element.copy(slideEntityType = SlideEntityType.PlainText)
else element
slideElement.slideEntityType match {
case SlideEntityType.Image | SlideEntityType.Pdf | SlideEntityType.Video | SlideEntityType.Webgl | SlideEntityType.Audio =>
val (slideContent, folder) =
if (slideElement.content.contains("/delegate/")) {
val content = slideElement.content.replaceFirst( """.+file=""", "").replaceAll( """(&date=\d+)?"?\)?(\s+.+)?""", "")
val folderId = slideElement.content.replaceFirst( """.+folderId=""", "").replaceAll( """(&file=.+)?"?\)?(\s+.+)?""", "")
if (folderId.isEmpty)
(content, SlideSetHelper.filePathPrefix(slideElement))
else
(content, folderId + "/")
}
else {
(slideElement.content, SlideSetHelper.filePathPrefix(slideElement))
}
val createdSlideElement = createSlideElement(slideElement, slideContent, newSlideId, data)
getFromPath(slideContent, folder).foreach { case (folderName: String, rawFileName: String) =>
val fileName = rawFileName.split(" ").head
val realFilePath = localPath + File.separator + getPath(folderName, fileName, slideSetVersion)
lazy val ext = slideElement.content.replaceFirst( """.+ext=""", "").replaceAll( """\"\)""", "")
val path = Seq(realFilePath, realFilePath + "." + ext).find(new File(_).exists)
if (path.isDefined) {
val url = addImageToFileService(
createdSlideElement,
fileName,
path.get
)
val content =
if (slideElement.slideEntityType == SlideEntityType.Pdf)
slideElement.content.replaceFirst("(.+(slide|quiz)Data)\\d+(/.+)", "$1" + createdSlideElement.id + "$3")
else
url
slideElementService.update(
SlideElement(
createdSlideElement.id,
slideElement.zIndex,
content,
slideElement.slideEntityType,
newSlideId,
slideElement.correctLinkedSlideId,
slideElement.incorrectLinkedSlideId,
slideElement.notifyCorrectAnswer,
slideElement.properties)
)
}
}
case SlideEntityType.Question =>
createSlideElement(
slideElement,
questions.find(_._1.id == Some(slideElement.content.toLong))
.flatMap(_._3)
.map(_.toString).getOrElse(""),
newSlideId,
data)
case SlideEntityType.PlainText =>
createSlideElement(
slideElement,
plaintexts.find(_._1.id == Some(slideElement.content.toLong))
.flatMap(_._2)
.map(_.toString).getOrElse(""),
newSlideId,
data)
case SlideEntityType.RandomQuestion =>
val newContent = slideElement.content
.split(",")
.map {
case e if e.startsWith(SlideConstants.PlainTextIdPrefix) =>
plaintexts
.find(_._1.id == Some(e.replace(SlideConstants.PlainTextIdPrefix, "").toLong))
.flatMap(_._2)
.map(SlideConstants.PlainTextIdPrefix + _.toString)
case e if e.startsWith(SlideConstants.QuestionIdPrefix) =>
questions
.find(_._1.id == Some(e.replace(SlideConstants.QuestionIdPrefix, "").toLong))
.flatMap(_._3)
.map(SlideConstants.QuestionIdPrefix + _.toString)
case e => throw new IllegalStateException("No object in random question with required id: " + e)
}
createSlideElement(
slideElement,
newContent.flatten.mkString(","),
newSlideId,
data)
case _ =>
createSlideElement(
slideElement,
slideElement.content,
newSlideId,
data)
}
}
private def createSlideElement(slideElement: SlideElement,
content: String,
slideId: Long,
data: String): SlideElement = {
val newSlideElement = slideElementService.create(slideElement.copy(content = content, slideId = slideId))
//if in old packages elements without properties
if (slideElement.properties.isEmpty){
val oldElements = for {
slide <- parse(data).\("slideSet").\("slides").children
slideElement <- slide.\("slideElements").extract[List[SlideOldElementModel]]
} yield slideElement
oldElements
.filter(_.id == slideElement.id)
.foreach(el =>
slideElementPropertyRepository.createFromOldValues(
deviceId = 1,
newSlideElement.id,
el.top,
el.left,
el.width,
el.height)
)
}
newSlideElement
}
override protected def importItems(items: List[ExportFormat],
courseId: Long,
tempDirectory: File,
userId: Long,
data: String): Unit = {
require(items.length == 1)
val item = items.head
val slideSet = item.slideSet.toModel
val version = item.version
val (questions, plaintexts, categories) = version match {
case Some("2.1") => (item.questions.map(QuestionExternalFormat.importQuestion),
item.plaintexts.map(QuestionExternalFormat.importPlainText),
item.categories.map(QuestionExternalFormat.importCategory))
case _ =>
val planText = item.questions.filter(q => (q.tpe == 8)||(q.tpe == 9))
.map(QuestionExternalFormat.importPlainTextLast)
val newQuestions = item.questions.filter(q => (q.tpe != 8)&&(q.tpe != 9))
.map(QuestionExternalFormat.importQuestionLast)
(newQuestions, planText, Seq[Category]())
}
val (questionMap, plainTextMap) = if (questions.size + plaintexts.size > 0) {
val rootCategory = categoryService.create(
Category(None,
s"Export for ${item.slideSet.title}",
s"Export for ${item.slideSet.title} from ${DateTime.now}",
None,
courseId
)
)
val categoryMap = categories.map { cat =>
val oldCatId = cat.id
val newCatId = categoryService.create(cat.copy(categoryId = rootCategory.id)).id
(oldCatId -> newCatId)
}
val qMap = questions.map { qPair =>
val newQuestion = questionService.createWithNewCategory(qPair._1,
qPair._2,
categoryMap.find(_._1 == qPair._1.categoryId).flatMap(_._2)
)
(qPair._1, qPair._2, newQuestion.id)
}
val pMap = plaintexts.map { pt =>
val newPT = plainTextService.create(pt.copy(categoryId = categoryMap.find(_._1 == pt.categoryId).flatMap(_._2)))
(pt -> newPT.id)
}
categoryService.moveToCourse(rootCategory.id.get, courseId, true)
(qMap, pMap)
} else {
(Seq(), Seq())
}
val topDownNavigation = item.slideSet.slides.exists(_.topSlideId.isDefined)
val createdSlideSet = slideSetService.create(
SlideSet(
title = slideSet.title,
description = slideSet.description,
courseId = courseId,
logo = slideSet.logo,
isTemplate = slideSet.isTemplate,
isSelectedContinuity = slideSet.isSelectedContinuity,
duration = slideSet.duration,
scoreLimit = slideSet.scoreLimit,
playerTitle = slideSet.playerTitle,
topDownNavigation = topDownNavigation,
activityId = slideSetService.createNewActivityId(courseId),
requiredReview = slideSet.requiredReview),
Seq()
)
slideSet.logo.map { logoString =>
val folderPrefix = version match {
case Some(v) => "resources"
case _ => "images"
}
val path =
tempDirectory.getPath +
File.separator +
folderPrefix +
File.separator +
SlideSetHelper.filePathPrefix(slideSet, version) +
File.separator +
logoString
addImageToFileService(createdSlideSet, logoString, path)
}
val slideMapper = scala.collection.mutable.Map[Long, Long]()
addSlides(item.slideSet.slides, slideSet, createdSlideSet, version, slideMapper, tempDirectory.getPath)
slideMapper.foreach { case (oldSlideId, newSlideId) =>
for {
slide <- item.slideSet.slides.filter(_.id == oldSlideId)
slideElement <- slide.slideElements
} {
val correctLinkedSlideId = slideElement.correctLinkedSlideId.flatMap(oldId => slideMapper.get(oldId))
val incorrectLinkedSlideId = slideElement.incorrectLinkedSlideId.flatMap(oldId => slideMapper.get(oldId))
addSlideElements(slideElement.copy(correctLinkedSlideId = correctLinkedSlideId, incorrectLinkedSlideId = incorrectLinkedSlideId),
questionMap,
plainTextMap,
newSlideId,
version,
courseId,
tempDirectory.getPath,
data)
}
}
}
override def importItems(stream: InputStream, scopeId: Int): Unit =
importItems(FileSystemUtil.streamToTempFile(stream, "Import", ".zip"), scopeId)
implicit class SlideModelExtension(val slideSetExport: SlideSetExportModel) {
def toModel: SlideSet = {
SlideSet(
id = slideSetExport.id,
title = slideSetExport.title,
description = slideSetExport.description,
courseId = slideSetExport.courseId,
logo = slideSetExport.logo,
isTemplate = slideSetExport.isTemplate,
isSelectedContinuity = slideSetExport.isSelectedContinuity,
themeId = slideSetExport.themeId,
duration = slideSetExport.duration,
scoreLimit = slideSetExport.scoreLimit,
playerTitle = slideSetExport.playerTitle,
topDownNavigation = slideSetExport.topDownNavigation,
activityId = slideSetExport.activityId,
status = slideSetExport.status,
version = slideSetExport.version,
modifiedDate = slideSetExport.modifiedDate,
oneAnswerAttempt = slideSetExport.oneAnswerAttempt,
requiredReview = slideSetExport.requiredReview.getOrElse(false)
)
}
}
} | arcusys/Valamis | valamis-slide/src/main/scala/com/arcusys/valamis/slide/service/export/SlideSetImporterImpl.scala | Scala | gpl-3.0 | 15,084 |
package bulu.actor.query
import akka.actor.Actor
import akka.actor.ActorLogging
import bulu.core.MeasureType
import scala.collection.mutable.ArrayBuffer
import bulu.core.BitKey
import bulu.util.CacheFilterBegin
import bulu.util.ConfigHelper
import akka.actor.Props
import bulu.util.QueryReply
import akka.routing.RoundRobinRouter
import bulu.util.CacheCell
class ParallelCache( cube : String, workerIndex : Int, dispatcherIndex : Int,
aggs : ( List[BitKey], Map[( String, MeasureType.MeasureType ), ArrayBuffer[BigDecimal]] ) ) extends Actor with ActorLogging {
def receive : Receive = {
case CacheFilterBegin( queryReply ) =>
val keyList = aggs._1
val valueList = aggs._2
val cellAggregator = context.actorOf( Props( new CellAggregator( queryReply.query, workerIndex, dispatcherIndex, queryReply.reply, keyList.size ) ) ) //, name = "cellAggregator")
val cellFilter = context.actorOf( Props( new CellFilter( cube, cellAggregator, queryReply.cuboidMask, queryReply.filterAndMatch ) ).
withRouter( RoundRobinRouter( nrOfInstances = 8 ) ) ) //, name = "cellFilter")
val fields = ConfigHelper.getMeasureFields( queryReply.query.name )
for ( i <- 0 until keyList.size ) {
val key = keyList( i )
val values = for ( ( id, value ) <- valueList if fields.contains( id._1 ) ) yield ( id, value( i ) )
cellFilter ! CacheCell( key, values.toMap )
}
}
} | hwzhao/bulu | src/main/scala/bulu/actor/query/ParallelCache.scala | Scala | apache-2.0 | 1,421 |
/**
* Copyright 2019 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.cloud.pso.kafka2avro.config
/** The configuration of the Kafka2Avro application.
*
* @constructor Create a config object
* @param broker The Kafka broker address in the form IP:port
* @param destBucket The destination Bucket in GCS, without the gs:// prefix
* @param destPath The destination path (directories) in the GCS bucket (e.g. a/b/c)
* @param kafkaTopic Messages will be pulled from this Kafka topic
* @param numDemoObjects Number of objects that will be written to Kafka for demo purposes
*/
case class Kafka2AvroConfig(
broker: String,
destBucket: String,
destPath: String,
kafkaTopic: String,
numDemoObjects: Int
)
| CloudVLab/professional-services | examples/dataflow-scala-kafka2avro/src/main/scala/com/google/cloud/pso/kafka2avro/config/Kafka2AvroConfig.scala | Scala | apache-2.0 | 1,285 |
package net.mentalarray.doozie.Internal
import scala.concurrent._
import scala.concurrent.duration.Duration
import scala.util._
trait FortuneTeller[T] {
implicit lazy val ec = ThreadPool.context
// Helper method for async execution
protected def async[U](fn : => T) : Future[T] = future(fn)
// Helper method for processing all the tasks and their results
protected def awaitAllFutures[A](futures: Traversable[Future[A]]): Try[Traversable[A]] = {
var outputValues = List.empty[A]
futures.foreach(f => {
// Wait for future to finish
Await.ready(f, Duration.Inf)
f.value.get match {
case Success(s) => outputValues ::= s
case Failure(f) => Failure(f)
}
})
Success(outputValues)
}
}
| antagonist112358/tomahawk | workflow-engine/src/net/mentalarray/doozie/Internal/FortuneTeller.scala | Scala | apache-2.0 | 758 |
package mesosphere.marathon
package core.launchqueue.impl
import akka.stream.OverflowStrategy
import akka.stream.scaladsl.{Flow, Keep, Source}
import mesosphere.AkkaUnitTest
import mesosphere.marathon.core.launcher.{InstanceOp, OfferMatchResult}
import mesosphere.marathon.core.launchqueue.impl.OfferMatchStatistics.LaunchFinished
import mesosphere.marathon.state.{AppDefinition, PathId, Timestamp}
import mesosphere.marathon.test.MarathonTestHelper
import mesosphere.mesos.NoOfferMatchReason
import org.apache.mesos.{Protos => Mesos}
import org.scalatest.Inside
import org.scalatest.concurrent.Eventually
class OfferMatchStatisticsActorTest extends AkkaUnitTest with Eventually with Inside {
import OfferMatchStatistics.{OfferMatchUpdate, MatchResult}
override def materializerSettings =
super.materializerSettings.withDispatcher(akka.testkit.CallingThreadDispatcher.Id)
"OfferMatchStatisticsActor" should {
"Collect and aggregate OfferMatchResults" in {
Given("Statistics actor with empty statistics")
val f = new Fixture
When("The sinks receive 5 events regarding 3 different apps")
val (runSpecStatisticsFold, noMatchStatisticsFold) = Source(List[OfferMatchUpdate](
f.matchedA,
f.matchedB,
f.noMatchA,
f.noMatchB,
f.noMatchBSecond,
f.matchedC
)).runWith(sinks)
val (runSpecStatistics, noMatchStatistics) =
(runSpecStatisticsFold.finalResult.futureValue, noMatchStatisticsFold.finalResult.futureValue)
runSpecStatistics should have size 3
noMatchStatistics should have size 2
Then("The actor aggregates the data correctly for app A")
val statisticsA = runSpecStatistics(f.runSpecA.id)
statisticsA.lastMatch should be(Some(f.matchedA.matchResult))
statisticsA.lastNoMatch should be(Some(f.noMatchA.matchResult))
Then("The actor aggregates the data correctly for app B")
val statisticsB = runSpecStatistics(f.runSpecB.id)
statisticsB.lastMatch should be(Some(f.matchedB.matchResult))
statisticsB.lastNoMatch should be(Some(f.noMatchBSecond.matchResult))
Then("The actor aggregates the data correctly for app C")
val statisticsC = runSpecStatistics(f.runSpecC.id)
statisticsC.lastMatch should be(Some(f.matchedC.matchResult))
statisticsC.lastNoMatch should be(empty)
And("Stores the last NoMatches per runSpec/per agent for app A and B")
val lastNoMatchesA = noMatchStatistics(f.runSpecA.id)
lastNoMatchesA should have size 1
lastNoMatchesA.values.head should be(f.noMatchA.matchResult)
val lastNoMatchesB = noMatchStatistics(f.runSpecB.id)
lastNoMatchesB should have size 1
lastNoMatchesB.values.head should be(f.noMatchBSecond.matchResult)
}
"If the launch attempt is finished, the statistics will be reset" in {
Given("Statistics actor with some statistics for app A and C")
val f = new Fixture
val (input, (runSpecStatisticsFold, noMatchStatisticsFold)) = Source.queue[OfferMatchUpdate](16, OverflowStrategy.fail)
.toMat(sinks)(Keep.both)
.run
input.offer(f.matchedA)
input.offer(f.noMatchA)
input.offer(f.matchedC)
runSpecStatisticsFold.readCurrentResult().futureValue should have size 2
noMatchStatisticsFold.readCurrentResult().futureValue should have size 1
inside(runSpecStatisticsFold.readCurrentResult().futureValue) {
case result =>
result.get(f.runSpecA.id) should be(defined)
result.get(f.runSpecC.id) should be(defined)
}
When("The launch attempt for app A finishes")
input.offer(LaunchFinished(f.runSpecA.id))
Then("The statistics for app A are removed")
runSpecStatisticsFold.readCurrentResult().futureValue should have size 1
noMatchStatisticsFold.readCurrentResult().futureValue should have size 0
inside(runSpecStatisticsFold.readCurrentResult().futureValue) {
case result =>
result.get(f.runSpecA.id) should be(empty)
result.get(f.runSpecC.id) should be(defined)
}
}
"Statistics can be queried" in {
Given("Statistics actor with some statistics for app A and C")
val f = new Fixture
val (runSpecStatisticsFold, noMatchStatisticsFold) = Source(List[OfferMatchUpdate](
f.matchedA,
f.noMatchA,
f.noMatchA,
f.matchedC)).runWith(sinks)
val (runSpecStatistics, noMatchStatistics) =
(runSpecStatisticsFold.finalResult.futureValue, noMatchStatisticsFold.finalResult.futureValue)
runSpecStatistics should have size 2
noMatchStatistics should have size 1
runSpecStatistics.get(f.runSpecA.id) should be(defined)
runSpecStatistics.get(f.runSpecC.id) should be(defined)
val infoA = runSpecStatistics(f.runSpecA.id)
infoA.lastMatch should be(Some(f.matchedA.matchResult))
infoA.lastNoMatch should be(Some(f.noMatchA.matchResult))
infoA.rejectSummary should be(Map(NoOfferMatchReason.InsufficientCpus -> 2))
}
}
def sinks = Flow[OfferMatchUpdate]
.alsoToMat(OfferMatchStatistics.runSpecStatisticsSink)(Keep.right)
.toMat(OfferMatchStatistics.noMatchStatisticsSink)(Keep.both)
class Fixture {
val runSpecA = AppDefinition(PathId("/a"))
val runSpecB = AppDefinition(PathId("/b"))
val runSpecC = AppDefinition(PathId("/c"))
def offerFrom(agent: String, cpus: Double = 4) = MarathonTestHelper.makeBasicOffer(cpus = cpus).setSlaveId(Mesos.SlaveID.newBuilder().setValue(agent)).build()
val instanceOp = mock[InstanceOp]
import mesosphere.mesos.NoOfferMatchReason._
val reasonA = Seq(InsufficientCpus, InsufficientPorts, InsufficientMemory)
val reasonB = Seq(InsufficientCpus, InsufficientPorts, InsufficientMemory, UnfulfilledConstraint)
val noMatchA = MatchResult(OfferMatchResult.NoMatch(runSpecA, offerFrom("agent1"), reasonA, Timestamp.now()))
val matchedA = MatchResult(OfferMatchResult.Match(runSpecA, offerFrom("agent1"), instanceOp, Timestamp.now()))
val noMatchB = MatchResult(OfferMatchResult.NoMatch(runSpecB, offerFrom("agent2", cpus = 0.1), reasonB, Timestamp.now()))
val noMatchBSecond = MatchResult(OfferMatchResult.NoMatch(runSpecB, offerFrom("agent2", cpus = 0.2), reasonB, Timestamp.now()))
val matchedB = MatchResult(OfferMatchResult.Match(runSpecB, offerFrom("agent2"), instanceOp, Timestamp.now()))
val matchedC = MatchResult(OfferMatchResult.Match(runSpecC, offerFrom("agent3"), instanceOp, Timestamp.now()))
}
}
| gsantovena/marathon | src/test/scala/mesosphere/marathon/core/launchqueue/impl/OfferMatchStatisticsTest.scala | Scala | apache-2.0 | 6,567 |
/*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package experiments.datautils.maritime_data.first_version_doesnt_work
/**
* Created by nkatz on 4/26/17.
*/
import com.mongodb.casbah.Imports._
import com.mongodb.casbah.MongoClient
import com.mongodb.casbah.commons.MongoDBObject
import logic.Examples.Example
/**
*
* THIS IS THE OLD VERSION THAT WORKS WITH THE DataPreProcessing CODE.
* THERE IS AN ERROR IN SUBTRACTING 1 FROM TIMES, YOU DON'T HAVE TO DO THAT.
*
*
*
*
*/
object MaritimeDataToMongo {
def main(args: Array[String]) = {
storeMaritimeData_Whole()
//val mc = MongoClient()
//mc("Maritime-Aegean-All_HLEs-Joined")("examples").find().foreach(println)
}
def storeMaritimeData_Whole() = {
val mc = MongoClient()
val exmpls = getMaritimeData_Whole("Maritime-Aegean-whole", chunkSize = 1, mc)
val newCollection = mc("Maritime-Aegean-All_HLEs-Joined")("examples")
var i = 0
for (x <- exmpls) {
println(i)
val e = x
val entry = MongoDBObject("time" -> e.time) ++ ("annotation" -> e.annotation) ++ ("narrative" -> e.narrative)
newCollection.insert(entry)
i += 1
}
}
/* Try to get all data at once, for all HLEs */
def getMaritimeData_Whole(readFromDB: String, chunkSize: Int, mc: MongoClient) = {
def getHLEs(cursor: Iterator[DBObject], lleTime: Int, initiationPoint: Boolean) = {
cursor.foldLeft(List[String]()) { (atoms, hleDbObject) =>
val hle = hleDbObject.asInstanceOf[BasicDBObject].get("hle").toString
val atom = hle match {
case "highSpeedIn" | "withinArea" =>
val vessel = hleDbObject.asInstanceOf[BasicDBObject].get("vessel").toString
val area = hleDbObject.asInstanceOf[BasicDBObject].get("area").toString
if (!initiationPoint) s"""holdsAt($hle("$vessel","$area"),"$lleTime")""" else s"""holdsAt($hle("$vessel","$area"),"${lleTime + 1}")"""
case "loitering" | "lowSpeed" | "sailing" | "stopped" =>
val vessel = hleDbObject.asInstanceOf[BasicDBObject].get("vessel").toString
if (!initiationPoint) s"""holdsAt($hle("$vessel"),"$lleTime")""" else s"""holdsAt($hle("$vessel"),"${lleTime + 1}")"""
case "rendezVouz" =>
val v1 = hleDbObject.asInstanceOf[BasicDBObject].get("vessel1").toString
val v2 = hleDbObject.asInstanceOf[BasicDBObject].get("vessel2").toString
if (!initiationPoint) s"""holdsAt($hle("$v1","$v2"),"$lleTime")""" else s"""holdsAt($hle("$v1","$v2"),"${lleTime + 1}")"""
case _ => throw new RuntimeException(s"HLE name: $hle not found")
}
atoms :+ atom
}
}
//val mc = MongoClient()
val lleCollection = mc(readFromDB)("lles")
val portsCollection = mc(readFromDB)("not-close-to-ports")
val speedLimitsCollection = mc(readFromDB)("speed-limits")
val hleCollections = List(mc(readFromDB)("high_speed"), mc(readFromDB)("within-area"),
mc(readFromDB)("loitering"), mc(readFromDB)("low-speed"), mc(readFromDB)("sailing"), mc(readFromDB)("stopped"))
/*
val hleCollection = HLE match {
case "highSpeedIn" => mc(dbName)("high_speed")
case "withinArea" => mc(dbName)("within-area")
case "loitering" => mc(dbName)("loitering")
case "lowSpeed" => mc(dbName)("low-speed")
case "sailing" => mc(dbName)("sailing")
case "stopped" => mc(dbName)("stopped")
case _ => throw new RuntimeException(s"Don't know this LLE: $HLE")
}
*/
lleCollection.createIndex(MongoDBObject("time" -> 1))
val grouped = lleCollection.find().sort(MongoDBObject("time" -> 1)).grouped(chunkSize)
val chunked = grouped.map { docs =>
val (narrative, annotation) = docs.foldLeft(List[String](), List[String]()){ (accum, dbObject) =>
val (_narrative, _annotation) = (accum._1, accum._2)
val areas = dbObject.asInstanceOf[BasicDBObject].get("areas").asInstanceOf[BasicDBList].toList.map(x => x.toString)
val vessels = dbObject.asInstanceOf[BasicDBObject].get("vessels").asInstanceOf[BasicDBList].toList.map(x => x.toString)
val lles = dbObject.asInstanceOf[BasicDBObject].get("atoms").asInstanceOf[BasicDBList].toList.map(x => x.toString)
val currentTime = dbObject.asInstanceOf[BasicDBObject].get("time").toString
val vesselQueries = vessels.map(v => MongoDBObject("vessel" -> v) ++ ("time" -> currentTime))
val vs = vesselQueries flatMap (q => portsCollection.find(q))
val portsAtoms = vs.map{ x =>
val vessel = x.asInstanceOf[BasicDBObject].get("vessel").toString
s"""notCloseToPorts("$vessel","$currentTime")"""
}
val areaQueries = areas.map(a => MongoDBObject("area" -> a))
val as = areaQueries flatMap (q => speedLimitsCollection.find(q))
val speedLimitAtoms = as.map{ x =>
val area = x.asInstanceOf[BasicDBObject].get("area").toString
val speed = x.asInstanceOf[BasicDBObject].get("limit").toString
s"""speedLimit("$area","$speed")"""
}
val query1 = ("start_time" $lte currentTime.toInt) ++ ("end_time" $gte currentTime.toInt)
val query2 = "start_time" $eq currentTime.toInt + 1
val hledocs1 = hleCollections.map(c => c.find(query1))
val hledocs2 = hleCollections.map(c => c.find(query2))
val initiationPoints = hledocs2.flatMap(x => getHLEs(x, currentTime.toInt, initiationPoint = true))
val medianPoints = hledocs1.flatMap(x => getHLEs(x, currentTime.toInt, initiationPoint = false))
(_narrative ++ lles ++ portsAtoms ++ speedLimitAtoms, (_annotation ++ initiationPoints ++ medianPoints).distinct)
}
val mergedExmplTime = docs.head.asInstanceOf[BasicDBObject].get("time").toString
val _merged = new Example(annot = annotation, nar = narrative, _time = mergedExmplTime)
//new Exmpl(_id = _merged.time, exampleWithInertia = _merged)
_merged
}
chunked
}
}
| nkatzz/OLED | src/main/scala/experiments/datautils/maritime_data/first_version_doesnt_work/MaritimeDataToMongo.scala | Scala | gpl-3.0 | 6,711 |
package benchmarks
package cec
package cec2005
import zio._
import zio.test._
import zio.random.Random
import zio.prelude.NonEmptyList
object Generators {
def genCECSized(min: Double, max: Double): Gen[Random, (NonEmptyList[Double], NonEmptyList[Double], NonEmptyList[Double], NonEmptyList[Double])] = {
def genWithSize(n: Int) =
Gen.listOfN(n)(Gen.double(min, max))
.flatMap(list =>
if (list.size <= 0) Gen.fromEffect(UIO.die(new IllegalArgumentException("invalid bounds")))
else Gen.fromEffect(UIO.succeed(NonEmptyList.fromIterable(list.head, list.tail)))
)
for {
a <- genWithSize(2)
b <- genWithSize(10)
c <- genWithSize(30)
d <- genWithSize(50)
} yield (a, b, c, d)
}
}
| cirg-up/benchmarks | src/test/scala/benchmarks/cec/cec2005/Generators.scala | Scala | apache-2.0 | 762 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.mxnet.spark.example
import org.apache.mxnet.spark.MXNet
import org.apache.mxnet.{Symbol, NDArray, Context, Shape}
import org.apache.spark.mllib.linalg.Vectors
import org.apache.spark.mllib.regression.LabeledPoint
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkContext, SparkConf}
import org.kohsuke.args4j.{Option, CmdLineParser}
import org.slf4j.{LoggerFactory, Logger}
import scala.collection.mutable.ArrayBuffer
import scala.collection.JavaConverters._
class ClassificationExample
object ClassificationExample {
private val logger: Logger = LoggerFactory.getLogger(classOf[ClassificationExample])
def main(args: Array[String]): Unit = {
val cmdLine = new CommandLine
val parser: CmdLineParser = new CmdLineParser(cmdLine)
try {
parser.parseArgument(args.toList.asJava)
cmdLine.checkArguments()
val conf = new SparkConf().setAppName("MXNet")
val sc = new SparkContext(conf)
val network = if (cmdLine.model == "mlp") getMlp else getLenet
val dimension = if (cmdLine.model == "mlp") Shape(784) else Shape(1, 28, 28)
val devs =
if (cmdLine.gpus != null) cmdLine.gpus.split(',').map(id => Context.gpu(id.trim.toInt))
else if (cmdLine.cpus != null) cmdLine.cpus.split(',').map(id => Context.cpu(id.trim.toInt))
else Array(Context.cpu(0))
val mxnet = new MXNet()
.setBatchSize(128)
.setLabelName("softmax_label")
.setContext(devs)
.setDimension(dimension)
.setNetwork(network)
.setNumEpoch(cmdLine.numEpoch)
.setNumServer(cmdLine.numServer)
.setNumWorker(cmdLine.numWorker)
.setExecutorJars(cmdLine.jars)
.setJava(cmdLine.java)
val trainData = parseRawData(sc, cmdLine.input)
val start = System.currentTimeMillis
val model = mxnet.fit(trainData)
val timeCost = System.currentTimeMillis - start
logger.info("Training cost {} milli seconds", timeCost)
model.save(sc, cmdLine.output + "/model")
logger.info("Now do validation")
val valData = parseRawData(sc, cmdLine.inputVal)
val brModel = sc.broadcast(model)
val res = valData.mapPartitions { data =>
// get real labels
import org.apache.spark.mllib.linalg.Vector
val points = ArrayBuffer.empty[Vector]
val y = ArrayBuffer.empty[Float]
while (data.hasNext) {
val evalData = data.next()
y += evalData.label.toFloat
points += evalData.features
}
// get predicted labels
val probArrays = brModel.value.predict(points.toIterator)
require(probArrays.length == 1)
val prob = probArrays(0)
val py = NDArray.argmax_channel(prob.get)
require(y.length == py.size, s"${y.length} mismatch ${py.size}")
// I'm too lazy to calculate the accuracy
val res = Iterator((y.toArray zip py.toArray).map {
case (y1, py1) => y1 + "," + py1 }.mkString("\\n"))
py.dispose()
prob.get.dispose()
res
}
res.saveAsTextFile(cmdLine.output + "/data")
sc.stop()
} catch {
case e: Throwable =>
logger.error(e.getMessage, e)
sys.exit(-1)
}
}
private def parseRawData(sc: SparkContext, path: String): RDD[LabeledPoint] = {
val raw = sc.textFile(path)
raw.map { s =>
val parts = s.split(' ')
val label = java.lang.Double.parseDouble(parts(0))
val features = Vectors.dense(parts(1).trim().split(',').map(java.lang.Double.parseDouble))
LabeledPoint(label, features)
}
}
private class CommandLine {
@Option(name = "--input", usage = "Input training file.")
val input: String = null
@Option(name = "--input-val", usage = "Input validation file.")
val inputVal: String = null
@Option(name = "--output", usage = "Output inferred result.")
val output: String = null
@Option(name = "--jars", usage = "Jars for running MXNet on other nodes.")
val jars: String = null
@Option(name = "--num-server", usage = "PS server number")
val numServer: Int = 1
@Option(name = "--num-worker", usage = "PS worker number")
val numWorker: Int = 1
@Option(name = "--num-epoch", usage = "Number of epochs")
val numEpoch: Int = 10
@Option(name = "--java", usage = "Java bin")
val java: String = "java"
@Option(name = "--model", usage = "Model definition")
val model: String = "mlp"
@Option(name = "--gpus", usage = "the gpus will be used, e.g. '0,1,2,3'")
val gpus: String = null
@Option(name = "--cpus", usage = "the cpus will be used, e.g. '0,1,2,3'")
val cpus: String = null
def checkArguments(): Unit = {
require(input != null, "Undefined input path")
require(numServer > 0, s"Invalid number of servers: $numServer")
require(numWorker > 0, s"Invalid number of workers: $numWorker")
}
}
def getMlp: Symbol = {
val data = Symbol.Variable("data")
val fc1 = Symbol.FullyConnected(name = "fc1")()(Map("data" -> data, "num_hidden" -> 128))
val act1 = Symbol.Activation(name = "relu1")()(Map("data" -> fc1, "act_type" -> "relu"))
val fc2 = Symbol.FullyConnected(name = "fc2")()(Map("data" -> act1, "num_hidden" -> 64))
val act2 = Symbol.Activation(name = "relu2")()(Map("data" -> fc2, "act_type" -> "relu"))
val fc3 = Symbol.FullyConnected(name = "fc3")()(Map("data" -> act2, "num_hidden" -> 10))
val mlp = Symbol.SoftmaxOutput(name = "softmax")()(Map("data" -> fc3))
mlp
}
// LeCun, Yann, Leon Bottou, Yoshua Bengio, and Patrick
// Haffner. "Gradient-based learning applied to document recognition."
// Proceedings of the IEEE (1998)
def getLenet: Symbol = {
val data = Symbol.Variable("data")
// first conv
val conv1 = Symbol.Convolution()()(
Map("data" -> data, "kernel" -> "(5, 5)", "num_filter" -> 20))
val tanh1 = Symbol.Activation()()(Map("data" -> conv1, "act_type" -> "tanh"))
val pool1 = Symbol.Pooling()()(Map("data" -> tanh1, "pool_type" -> "max",
"kernel" -> "(2, 2)", "stride" -> "(2, 2)"))
// second conv
val conv2 = Symbol.Convolution()()(
Map("data" -> pool1, "kernel" -> "(5, 5)", "num_filter" -> 50))
val tanh2 = Symbol.Activation()()(Map("data" -> conv2, "act_type" -> "tanh"))
val pool2 = Symbol.Pooling()()(Map("data" -> tanh2, "pool_type" -> "max",
"kernel" -> "(2, 2)", "stride" -> "(2, 2)"))
// first fullc
val flatten = Symbol.Flatten()()(Map("data" -> pool2))
val fc1 = Symbol.FullyConnected()()(Map("data" -> flatten, "num_hidden" -> 500))
val tanh3 = Symbol.Activation()()(Map("data" -> fc1, "act_type" -> "tanh"))
// second fullc
val fc2 = Symbol.FullyConnected()()(Map("data" -> tanh3, "num_hidden" -> 10))
// loss
val lenet = Symbol.SoftmaxOutput(name = "softmax")()(Map("data" -> fc2))
lenet
}
}
| CodingCat/mxnet | scala-package/spark/src/main/scala/org/apache/mxnet/spark/example/ClassificationExample.scala | Scala | apache-2.0 | 7,713 |
package org.neo4j.ace
import scala.language.implicitConversions
case class MayErr[+E, +A](e: Either[E, A]) {
def flatMap[B, EE >: E](f: A => MayErr[EE, B]): MayErr[EE, B] = {
MayErr(e.right.flatMap(a => f(a).e))
}
def map[B](f: A => B): MayErr[E, B] = {
MayErr(e.right.map(f))
}
def filter[EE >: E](p: A => Boolean, error: EE): MayErr[EE, A] = MayErr(e.right.filter(p).getOrElse(Left(error)))
def toOptionLoggingError(): Option[A] = {
e.left.map(m => { println(m.toString); m }).right.toOption
}
def get = e.fold(e => throw new RuntimeException(e.toString), a => a)
}
object MayErr {
implicit def eitherToError[E, EE >: E, A, AA >: A](e: Either[E, A]): MayErr[EE, AA] = MayErr[E, A](e)
implicit def errorToEither[E, EE >: E, A, AA >: A](e: MayErr[E, A]): Either[EE, AA] = e.e
} | fynnfeldpausch/ace | src/main/scala/org/neo4j/ace/Utils.scala | Scala | mit | 817 |
package org.openmole.site
/*
* Copyright (C) 11/05/17 // mathieu.leclaire@openmole.org
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
import scalatags.Text.all._
package object shared {
lazy val searchDiv = "search-div"
lazy val searchImg = "search-img"
lazy val blogposts = "blog-posts"
lazy val newsPosts = "news-posts"
lazy val shortTraining = "short-training"
lazy val longTraining = "long-training"
object profile {
val button = "profileTrigger"
val animation = "startProfileAnim"
}
object pse {
val button = "pseTrigger"
val animation = "startPseAnim"
}
object sensitivity {
val button = "sensitivityTrigger"
val animation = "startSensitivityAnim"
}
object rTask {
lazy val rVersion = "4.0.2"
}
def anchor(title: String) = title.filter(c ⇒ c.isLetterOrDigit)
object link {
// OpenMOLE
lazy val demo = "http://demo.openmole.org"
lazy val next = "https://next.openmole.org/"
lazy val allOpenMOLE = "http://www.openmole.org/all/"
// Contributions
lazy val issue = "https://github.com/openmole/openmole/issues"
lazy val pullRequests = "https://github.com/openmole/openmole/pulls"
lazy val howToPR = "https://help.github.com/articles/about-pull-requests/"
// Community
lazy val contact = "contact@openmole.org"
lazy val forum = "http://ask.openmole.org"
lazy val chat = "https://chat.openmole.org/channel/general"
lazy val blog = "https://blog.openmole.org"
lazy val openMOLEWiki = "https://github.com/openmole/openmole/wiki"
lazy val twitter = "https://twitter.com/OpenMOLE"
lazy val shortTrainings = "https://iscpif.fr/events/formationsjedi/"
lazy val longTrainings = "http://cnrsformation.cnrs.fr"
lazy val exmodelo = "https://exmodelo.org"
lazy val dockerHub = "https://hub.docker.com/repository/docker/openmole/openmole"
// Resources
lazy val scala = "http://www.scala-lang.org/"
lazy val scalaBook = "http://www.scala-lang.org/node/959"
lazy val scalaDoc = "http://www.scala-lang.org/api/current/index.html"
lazy val scalatex = "http://www.lihaoyi.com/Scalatex/"
lazy val sbt = "http://www.scala-sbt.org/"
lazy val intelliJ = "https://www.jetbrains.com/idea/"
lazy val git = "https://git-scm.com/"
lazy val gitlfs = "https://git-lfs.github.com/"
lazy val npm = "https://www.npmjs.com/get-npm"
lazy val osgi = "https://www.osgi.org/"
lazy val care = "https://github.com/proot-me/proot-static-build/releases/download/v5.1.1/care_2.2.2_x86_64_rc2--no-seccomp"
lazy val CAREsite = "https://proot-me.github.io/"
lazy val CAREmailing = "https://groups.google.com/forum/?fromgroups#!forum/reproducible"
lazy val egi = "http://www.egi.eu/"
lazy val singularity = "https://sylabs.io/"
lazy val rcran = "https://cran.r-project.org/"
// Models
lazy val simpluDemo = "https://simplu.openmole.org"
lazy val netlogoAnts = "http://ccl.northwestern.edu/netlogo/models/Ants"
// Additional info
lazy val branchingModel = "http://nvie.com/posts/a-successful-git-branching-model/"
lazy val batchProcessing = "https://en.wikipedia.org/wiki/Batch_processing"
lazy val batchSystem = "http://en.wikipedia.org/wiki/Portable_Batch_System"
lazy val gridEngine = "https://en.wikipedia.org/wiki/Oracle_Grid_Engine"
lazy val slurm = "https://en.wikipedia.org/wiki/Simple_Linux_Utility_for_Resource_Management"
lazy val condor = "https://en.wikipedia.org/wiki/HTCondor"
lazy val oar = "http://oar.imag.fr/dokuwiki/doku.php"
lazy val ssh = "https://en.wikipedia.org/wiki/Secure_Shell"
lazy val sshPublicKey = "https://git-scm.com/book/en/v2/Git-on-the-Server-Generating-Your-SSH-Public-Key"
lazy val geodivercity = "http://geodivercity.parisgeo.cnrs.fr/blog/"
lazy val ercSpringer = "http://www.springer.com/fr/book/9783319464954"
lazy val ggplot2 = "http://ggplot2.tidyverse.org/reference/"
lazy val sobol = "https://en.wikipedia.org/wiki/Sobol_sequence"
lazy val lhs = "https://en.wikipedia.org/wiki/Latin_hypercube_sampling"
lazy val jce = "http://www.oracle.com/technetwork/java/javase/downloads/index.html"
lazy val prootIssue106 = "https://github.com/proot-me/PRoot/issues/106"
lazy val xvfb = "https://www.x.org/releases/X11R7.7/doc/man/man1/Xvfb.1.xhtml"
lazy val prootStatic = "https://github.com/proot-me/proot-static-build/tree/master/static"
lazy val multiobjectiveOptimization = "http://en.wikipedia.org/wiki/Multiobjective_optimization"
lazy val paretoEfficency = "http://en.wikipedia.org/wiki/Pareto_efficiency"
lazy val noveltySearch = "http://eplex.cs.ucf.edu/noveltysearch/userspage/"
lazy val javaString = "https://docs.oracle.com/javase/7/docs/api/java/lang/String.html"
lazy val javaFile = "https://docs.oracle.com/javase/8/docs/api/java/io/File.html"
object paper {
lazy val jassCP = "http://jasss.soc.surrey.ac.uk/18/1/12.html"
lazy val fgcs2013 = "http://www.sciencedirect.com/science/article/pii/S0167739X13001027"
lazy val fgcs2013preprint = "https://hal-paris1.archives-ouvertes.fr/hal-00840744/"
lazy val hpcs2010 = "http://ieeexplore.ieee.org/document/5547155/"
lazy val beyondCorroboration = "http://journals.plos.org/plosone/article?id=10.1371/journal.pone.0138212"
lazy val halfBillionOA = "https://hal.archives-ouvertes.fr/hal-01118918"
lazy val halfBillionEditor = "http://journals.sagepub.com/doi/abs/10.1068/b130064p"
lazy val jass2015 = "http://jasss.soc.surrey.ac.uk/18/4/9.html"
lazy val mdpi2015 = "http://www.mdpi.com/2079-8954/3/4/348"
lazy val frontier2017 = "http://journal.frontiersin.org/article/10.3389/fninf.2017.00021/full#"
//lazy val urbanDynamics = "https://hal.archives-ouvertes.fr/view/index/docid/1583528"// TODO erroneous hal id ?
//lazy val urbanDynamicsBib = "https://hal.archives-ouvertes.fr/hal-01583528v1/bibtex"
lazy val epb2018 = "http://journals.sagepub.com/doi/abs/10.1177/2399808318774335"
lazy val epb2018arxiv = "https://arxiv.org/pdf/1804.09416.pdf"
lazy val pone2018 = "https://journals.plos.org/plosone/article?id=10.1371/journal.pone.0203516"
lazy val jasss2019 = "http://jasss.soc.surrey.ac.uk/22/4/10.html"
lazy val rcr2020 = "https://www.sciencedirect.com/science/article/pii/S0921344919304446"
}
object partner {
lazy val iscpif = "http://iscpif.fr"
lazy val parisgeo = "http://www.parisgeo.cnrs.fr/"
lazy val biomedia = "https://biomedia.doc.ic.ac.uk/"
lazy val idf = "https://www.iledefrance.fr/"
lazy val paris = "https://www.paris.fr/"
lazy val ign = "http://www.ign.fr/"
lazy val ideesrouen = "http://umr-idees.fr/"
}
object repo {
lazy val openmole = "https://github.com/openmole/openmole"
lazy val market = "https://github.com/openmole/openmole-market"
lazy val gridscale = "https://github.com/openmole/gridscale"
lazy val scaladget = "https://github.com/openmole/scaladget"
lazy val scalawui = "https://github.com/openmole/scalaWUI"
lazy val mgo = "https://github.com/openmole/mgo"
lazy val mgobench = "https://github.com/openmole/mgo-benchmark"
lazy val simplu = "https://github.com/IGNF/simplu3D"
lazy val myOpenmolePlugin = "https://github.com/openmole/myopenmoleplugin"
lazy val gamaPlugin = "https://github.com/openmole/gama-plugin"
lazy val openMOLEDockerBuild = "https://github.com/openmole/docker-build.git"
}
}
def rawFrag(content: String) = {
val builder = new scalatags.text.Builder()
raw(content).applyTo(builder)
div(textAlign := "center")(builder.children.head)
}
import link._
val links = Seq(
partner.iscpif,
partner.parisgeo,
partner.biomedia,
partner.idf,
partner.paris,
partner.ign,
partner.ideesrouen,
paper.jassCP,
paper.fgcs2013,
paper.fgcs2013preprint,
paper.hpcs2010,
paper.beyondCorroboration,
paper.halfBillionOA,
paper.halfBillionEditor,
paper.jass2015,
paper.mdpi2015,
paper.frontier2017,
//paper.urbanDynamics,
//paper.urbanDynamicsBib,
repo.openmole,
repo.market,
repo.gridscale,
repo.scaladget,
repo.scalawui,
repo.mgo,
repo.simplu,
repo.myOpenmolePlugin,
repo.gamaPlugin,
repo.openMOLEDockerBuild,
demo,
twitter,
contact,
blog,
chat,
simpluDemo,
forum,
shortTrainings,
longTrainings,
egi,
batchProcessing,
batchSystem,
gridEngine,
slurm,
condor,
oar,
ssh,
geodivercity,
ercSpringer,
git,
gitlfs,
sbt,
scala,
scalaBook,
scalaDoc,
intelliJ,
scalatex,
netlogoAnts,
branchingModel,
issue,
pullRequests,
next,
CAREsite,
CAREmailing,
ggplot2,
sobol,
lhs,
jce,
allOpenMOLE,
care,
prootIssue106,
xvfb,
prootStatic,
multiobjectiveOptimization,
paretoEfficency,
openMOLEWiki,
noveltySearch,
javaString,
javaFile
)
}
| openmole/openmole | openmole/bin/org.openmole.site/shared/src/main/scala/org/openmole/site/package.scala | Scala | agpl-3.0 | 9,723 |
package lila.round
import chess.format.{ Forsyth, Uci }
import chess.{ Centis, MoveMetrics, MoveOrDrop, Status }
import actorApi.round.{ DrawNo, ForecastPlay, HumanPlay, TakebackNo, TooManyPlies }
import lila.game.actorApi.MoveGameEvent
import lila.common.Bus
import lila.game.{ Game, Pov, Progress, UciMemo }
import lila.game.Game.PlayerId
import cats.data.Validated
final private class Player(
fishnetPlayer: lila.fishnet.FishnetPlayer,
finisher: Finisher,
scheduleExpiration: ScheduleExpiration,
uciMemo: UciMemo
)(implicit ec: scala.concurrent.ExecutionContext) {
sealed private trait MoveResult
private case object Flagged extends MoveResult
private case class MoveApplied(progress: Progress, move: MoveOrDrop) extends MoveResult
private[round] def human(play: HumanPlay, round: RoundAsyncActor)(
pov: Pov
)(implicit proxy: GameProxy): Fu[Events] =
play match {
case HumanPlay(_, uci, blur, lag, _) =>
pov match {
case Pov(game, _) if game.turns > Game.maxPlies =>
round ! TooManyPlies
fuccess(Nil)
case Pov(game, color) if game playableBy color =>
applyUci(game, uci, blur, lag)
.leftMap(e => s"$pov $e")
.fold(errs => fufail(ClientError(errs)), fuccess)
.flatMap {
case Flagged => finisher.outOfTime(game)
case MoveApplied(progress, moveOrDrop) =>
proxy.save(progress) >>
postHumanOrBotPlay(round, pov, progress, moveOrDrop)
}
case Pov(game, _) if game.finished => fufail(ClientError(s"$pov game is finished"))
case Pov(game, _) if game.aborted => fufail(ClientError(s"$pov game is aborted"))
case Pov(game, color) if !game.turnOf(color) => fufail(ClientError(s"$pov not your turn"))
case _ => fufail(ClientError(s"$pov move refused for some reason"))
}
}
private[round] def bot(uci: Uci, round: RoundAsyncActor)(pov: Pov)(implicit proxy: GameProxy): Fu[Events] =
pov match {
case Pov(game, _) if game.turns > Game.maxPlies =>
round ! TooManyPlies
fuccess(Nil)
case Pov(game, color) if game playableBy color =>
applyUci(game, uci, blur = false, botLag)
.fold(errs => fufail(ClientError(errs)), fuccess)
.flatMap {
case Flagged => finisher.outOfTime(game)
case MoveApplied(progress, moveOrDrop) =>
proxy.save(progress) >> postHumanOrBotPlay(round, pov, progress, moveOrDrop)
}
case Pov(game, _) if game.finished => fufail(GameIsFinishedError(pov))
case Pov(game, _) if game.aborted => fufail(ClientError(s"$pov game is aborted"))
case Pov(game, color) if !game.turnOf(color) => fufail(ClientError(s"$pov not your turn"))
case _ => fufail(ClientError(s"$pov move refused for some reason"))
}
private def postHumanOrBotPlay(
round: RoundAsyncActor,
pov: Pov,
progress: Progress,
moveOrDrop: MoveOrDrop
)(implicit proxy: GameProxy): Fu[Events] = {
if (pov.game.hasAi) uciMemo.add(pov.game, moveOrDrop)
notifyMove(moveOrDrop, progress.game)
if (progress.game.finished) moveFinish(progress.game) dmap { progress.events ::: _ }
else {
if (progress.game.playableByAi) requestFishnet(progress.game, round)
if (pov.opponent.isOfferingDraw) round ! DrawNo(PlayerId(pov.player.id))
if (pov.player.isProposingTakeback) round ! TakebackNo(PlayerId(pov.player.id))
if (progress.game.forecastable) moveOrDrop.left.toOption.foreach { move =>
round ! ForecastPlay(move)
}
scheduleExpiration(progress.game)
fuccess(progress.events)
}
}
private[round] def fishnet(game: Game, ply: Int, uci: Uci)(implicit proxy: GameProxy): Fu[Events] =
if (game.playable && game.player.isAi && game.playedTurns == ply) {
applyUci(game, uci, blur = false, metrics = fishnetLag)
.fold(errs => fufail(ClientError(errs)), fuccess)
.flatMap {
case Flagged => finisher.outOfTime(game)
case MoveApplied(progress, moveOrDrop) =>
proxy.save(progress) >>-
uciMemo.add(progress.game, moveOrDrop) >>-
lila.mon.fishnet.move(~game.aiLevel).increment().unit >>-
notifyMove(moveOrDrop, progress.game) >> {
if (progress.game.finished) moveFinish(progress.game) dmap { progress.events ::: _ }
else
fuccess(progress.events)
}
}
} else
fufail(
FishnetError(
s"Not AI turn move: $uci id: ${game.id} playable: ${game.playable} player: ${game.player}"
)
)
private[round] def requestFishnet(game: Game, round: RoundAsyncActor): Funit =
game.playableByAi ?? {
if (game.turns <= fishnetPlayer.maxPlies) fishnetPlayer(game)
else fuccess(round ! actorApi.round.ResignAi)
}
private val fishnetLag = MoveMetrics(clientLag = Centis(5).some)
private val botLag = MoveMetrics(clientLag = Centis(10).some)
private def applyUci(
game: Game,
uci: Uci,
blur: Boolean,
metrics: MoveMetrics
): Validated[String, MoveResult] =
(uci match {
case Uci.Move(orig, dest, prom) =>
game.chess(orig, dest, prom, metrics) map { case (ncg, move) =>
ncg -> (Left(move): MoveOrDrop)
}
case Uci.Drop(role, pos) =>
game.chess.drop(role, pos, metrics) map { case (ncg, drop) =>
ncg -> (Right(drop): MoveOrDrop)
}
}).map {
case (ncg, _) if ncg.clock.exists(_.outOfTime(game.turnColor, withGrace = false)) => Flagged
case (newChessGame, moveOrDrop) =>
MoveApplied(
game.update(newChessGame, moveOrDrop, blur),
moveOrDrop
)
}
private def notifyMove(moveOrDrop: MoveOrDrop, game: Game): Unit = {
import lila.hub.actorApi.round.{ CorresMoveEvent, MoveEvent, SimulMoveEvent }
val color = moveOrDrop.fold(_.color, _.color)
val moveEvent = MoveEvent(
gameId = game.id,
fen = Forsyth exportBoard game.board,
move = moveOrDrop.fold(_.toUci.keys, _.toUci.uci)
)
// I checked and the bus doesn't do much if there's no subscriber for a classifier,
// so we should be good here.
// also used for targeted TvBroadcast subscription
Bus.publish(MoveGameEvent(game, moveEvent.fen, moveEvent.move), MoveGameEvent makeChan game.id)
// publish correspondence moves
if (game.isCorrespondence && game.nonAi)
Bus.publish(
CorresMoveEvent(
move = moveEvent,
playerUserId = game.player(color).userId,
mobilePushable = game.mobilePushable,
alarmable = game.alarmable,
unlimited = game.isUnlimited
),
"moveEventCorres"
)
// publish simul moves
for {
simulId <- game.simulId
opponentUserId <- game.player(!color).userId
} Bus.publish(
SimulMoveEvent(move = moveEvent, simulId = simulId, opponentUserId = opponentUserId),
"moveEventSimul"
)
}
private def moveFinish(game: Game)(implicit proxy: GameProxy): Fu[Events] =
game.status match {
case Status.Mate => finisher.other(game, _.Mate, game.situation.winner)
case Status.VariantEnd => finisher.other(game, _.VariantEnd, game.situation.winner)
case status @ (Status.Stalemate | Status.Draw) => finisher.other(game, _ => status, None)
case _ => fuccess(Nil)
}
}
| luanlv/lila | modules/round/src/main/Player.scala | Scala | mit | 7,816 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.catalog
import java.net.URI
import java.util.Locale
import javax.annotation.concurrent.GuardedBy
import scala.collection.mutable
import scala.util.{Failure, Success, Try}
import com.google.common.cache.{Cache, CacheBuilder}
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.apache.spark.internal.Logging
import org.apache.spark.sql.AnalysisException
import org.apache.spark.sql.catalyst._
import org.apache.spark.sql.catalyst.analysis._
import org.apache.spark.sql.catalyst.analysis.FunctionRegistry.FunctionBuilder
import org.apache.spark.sql.catalyst.expressions.{Expression, ExpressionInfo}
import org.apache.spark.sql.catalyst.parser.{CatalystSqlParser, ParserInterface}
import org.apache.spark.sql.catalyst.plans.logical.{LogicalPlan, SubqueryAlias, View}
import org.apache.spark.sql.catalyst.util.StringUtils
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types.{StructField, StructType}
object SessionCatalog {
val DEFAULT_DATABASE = "default"
}
/**
* An internal catalog that is used by a Spark Session. This internal catalog serves as a
* proxy to the underlying metastore (e.g. Hive Metastore) and it also manages temporary
* tables and functions of the Spark Session that it belongs to.
*
* This class must be thread-safe.
*/
class SessionCatalog(
val externalCatalog: ExternalCatalog,
globalTempViewManager: GlobalTempViewManager,
functionRegistry: FunctionRegistry,
conf: SQLConf,
hadoopConf: Configuration,
parser: ParserInterface,
functionResourceLoader: FunctionResourceLoader) extends Logging {
import SessionCatalog._
import CatalogTypes.TablePartitionSpec
// For testing only.
def this(
externalCatalog: ExternalCatalog,
functionRegistry: FunctionRegistry,
conf: SQLConf) {
this(
externalCatalog,
new GlobalTempViewManager("global_temp"),
functionRegistry,
conf,
new Configuration(),
CatalystSqlParser,
DummyFunctionResourceLoader)
}
// For testing only.
def this(externalCatalog: ExternalCatalog) {
this(
externalCatalog,
new SimpleFunctionRegistry,
new SQLConf().copy(SQLConf.CASE_SENSITIVE -> true))
}
/** List of temporary tables, mapping from table name to their logical plan. */
@GuardedBy("this")
protected val tempTables = new mutable.HashMap[String, LogicalPlan]
// Note: we track current database here because certain operations do not explicitly
// specify the database (e.g. DROP TABLE my_table). In these cases we must first
// check whether the temporary table or function exists, then, if not, operate on
// the corresponding item in the current database.
@GuardedBy("this")
protected var currentDb: String = formatDatabaseName(DEFAULT_DATABASE)
/**
* Checks if the given name conforms the Hive standard ("[a-zA-z_0-9]+"),
* i.e. if this name only contains characters, numbers, and _.
*
* This method is intended to have the same behavior of
* org.apache.hadoop.hive.metastore.MetaStoreUtils.validateName.
*/
private def validateName(name: String): Unit = {
val validNameFormat = "([\\\\w_]+)".r
if (!validNameFormat.pattern.matcher(name).matches()) {
throw new AnalysisException(s"`$name` is not a valid name for tables/databases. " +
"Valid names only contain alphabet characters, numbers and _.")
}
}
/**
* Format table name, taking into account case sensitivity.
*/
protected[this] def formatTableName(name: String): String = {
if (conf.caseSensitiveAnalysis) name else name.toLowerCase(Locale.ROOT)
}
/**
* Format database name, taking into account case sensitivity.
*/
protected[this] def formatDatabaseName(name: String): String = {
if (conf.caseSensitiveAnalysis) name else name.toLowerCase(Locale.ROOT)
}
/**
* A cache of qualified table names to table relation plans.
*/
val tableRelationCache: Cache[QualifiedTableName, LogicalPlan] = {
val cacheSize = conf.tableRelationCacheSize
CacheBuilder.newBuilder().maximumSize(cacheSize).build[QualifiedTableName, LogicalPlan]()
}
/**
* This method is used to make the given path qualified before we
* store this path in the underlying external catalog. So, when a path
* does not contain a scheme, this path will not be changed after the default
* FileSystem is changed.
*/
private def makeQualifiedPath(path: URI): URI = {
val hadoopPath = new Path(path)
val fs = hadoopPath.getFileSystem(hadoopConf)
fs.makeQualified(hadoopPath).toUri
}
private def requireDbExists(db: String): Unit = {
if (!databaseExists(db)) {
throw new NoSuchDatabaseException(db)
}
}
private def requireTableExists(name: TableIdentifier): Unit = {
if (!tableExists(name)) {
val db = name.database.getOrElse(currentDb)
throw new NoSuchTableException(db = db, table = name.table)
}
}
private def requireTableNotExists(name: TableIdentifier): Unit = {
if (tableExists(name)) {
val db = name.database.getOrElse(currentDb)
throw new TableAlreadyExistsException(db = db, table = name.table)
}
}
private def checkDuplication(fields: Seq[StructField]): Unit = {
val columnNames = if (conf.caseSensitiveAnalysis) {
fields.map(_.name)
} else {
fields.map(_.name.toLowerCase)
}
if (columnNames.distinct.length != columnNames.length) {
val duplicateColumns = columnNames.groupBy(identity).collect {
case (x, ys) if ys.length > 1 => x
}
throw new AnalysisException(s"Found duplicate column(s): ${duplicateColumns.mkString(", ")}")
}
}
// ----------------------------------------------------------------------------
// Databases
// ----------------------------------------------------------------------------
// All methods in this category interact directly with the underlying catalog.
// ----------------------------------------------------------------------------
def createDatabase(dbDefinition: CatalogDatabase, ignoreIfExists: Boolean): Unit = {
val dbName = formatDatabaseName(dbDefinition.name)
if (dbName == globalTempViewManager.database) {
throw new AnalysisException(
s"${globalTempViewManager.database} is a system preserved database, " +
"you cannot create a database with this name.")
}
validateName(dbName)
val qualifiedPath = makeQualifiedPath(dbDefinition.locationUri)
externalCatalog.createDatabase(
dbDefinition.copy(name = dbName, locationUri = qualifiedPath),
ignoreIfExists)
}
def dropDatabase(db: String, ignoreIfNotExists: Boolean, cascade: Boolean): Unit = {
val dbName = formatDatabaseName(db)
if (dbName == DEFAULT_DATABASE) {
throw new AnalysisException(s"Can not drop default database")
}
externalCatalog.dropDatabase(dbName, ignoreIfNotExists, cascade)
}
def alterDatabase(dbDefinition: CatalogDatabase): Unit = {
val dbName = formatDatabaseName(dbDefinition.name)
requireDbExists(dbName)
externalCatalog.alterDatabase(dbDefinition.copy(name = dbName))
}
def getDatabaseMetadata(db: String): CatalogDatabase = {
val dbName = formatDatabaseName(db)
requireDbExists(dbName)
externalCatalog.getDatabase(dbName)
}
def databaseExists(db: String): Boolean = {
val dbName = formatDatabaseName(db)
externalCatalog.databaseExists(dbName)
}
def listDatabases(): Seq[String] = {
externalCatalog.listDatabases()
}
def listDatabases(pattern: String): Seq[String] = {
externalCatalog.listDatabases(pattern)
}
def getCurrentDatabase: String = synchronized { currentDb }
def setCurrentDatabase(db: String): Unit = {
val dbName = formatDatabaseName(db)
if (dbName == globalTempViewManager.database) {
throw new AnalysisException(
s"${globalTempViewManager.database} is a system preserved database, " +
"you cannot use it as current database. To access global temporary views, you should " +
"use qualified name with the GLOBAL_TEMP_DATABASE, e.g. SELECT * FROM " +
s"${globalTempViewManager.database}.viewName.")
}
requireDbExists(dbName)
synchronized { currentDb = dbName }
}
/**
* Get the path for creating a non-default database when database location is not provided
* by users.
*/
def getDefaultDBPath(db: String): URI = {
val database = formatDatabaseName(db)
new Path(new Path(conf.warehousePath), database + ".db").toUri
}
// ----------------------------------------------------------------------------
// Tables
// ----------------------------------------------------------------------------
// There are two kinds of tables, temporary tables and metastore tables.
// Temporary tables are isolated across sessions and do not belong to any
// particular database. Metastore tables can be used across multiple
// sessions as their metadata is persisted in the underlying catalog.
// ----------------------------------------------------------------------------
// ----------------------------------------------------
// | Methods that interact with metastore tables only |
// ----------------------------------------------------
/**
* Create a metastore table in the database specified in `tableDefinition`.
* If no such database is specified, create it in the current database.
*/
def createTable(tableDefinition: CatalogTable, ignoreIfExists: Boolean): Unit = {
val db = formatDatabaseName(tableDefinition.identifier.database.getOrElse(getCurrentDatabase))
val table = formatTableName(tableDefinition.identifier.table)
validateName(table)
val newTableDefinition = if (tableDefinition.storage.locationUri.isDefined
&& !tableDefinition.storage.locationUri.get.isAbsolute) {
// make the location of the table qualified.
val qualifiedTableLocation =
makeQualifiedPath(tableDefinition.storage.locationUri.get)
tableDefinition.copy(
storage = tableDefinition.storage.copy(locationUri = Some(qualifiedTableLocation)),
identifier = TableIdentifier(table, Some(db)))
} else {
tableDefinition.copy(identifier = TableIdentifier(table, Some(db)))
}
requireDbExists(db)
externalCatalog.createTable(newTableDefinition, ignoreIfExists)
}
/**
* Alter the metadata of an existing metastore table identified by `tableDefinition`.
*
* If no database is specified in `tableDefinition`, assume the table is in the
* current database.
*
* Note: If the underlying implementation does not support altering a certain field,
* this becomes a no-op.
*/
def alterTable(tableDefinition: CatalogTable): Unit = {
val db = formatDatabaseName(tableDefinition.identifier.database.getOrElse(getCurrentDatabase))
val table = formatTableName(tableDefinition.identifier.table)
val tableIdentifier = TableIdentifier(table, Some(db))
val newTableDefinition = tableDefinition.copy(identifier = tableIdentifier)
requireDbExists(db)
requireTableExists(tableIdentifier)
externalCatalog.alterTable(newTableDefinition)
}
/**
* Alter the schema of a table identified by the provided table identifier. The new schema
* should still contain the existing bucket columns and partition columns used by the table. This
* method will also update any Spark SQL-related parameters stored as Hive table properties (such
* as the schema itself).
*
* @param identifier TableIdentifier
* @param newSchema Updated schema to be used for the table (must contain existing partition and
* bucket columns, and partition columns need to be at the end)
*/
def alterTableSchema(
identifier: TableIdentifier,
newSchema: StructType): Unit = {
val db = formatDatabaseName(identifier.database.getOrElse(getCurrentDatabase))
val table = formatTableName(identifier.table)
val tableIdentifier = TableIdentifier(table, Some(db))
requireDbExists(db)
requireTableExists(tableIdentifier)
checkDuplication(newSchema)
val catalogTable = externalCatalog.getTable(db, table)
val oldSchema = catalogTable.schema
// not supporting dropping columns yet
val nonExistentColumnNames = oldSchema.map(_.name).filterNot(columnNameResolved(newSchema, _))
if (nonExistentColumnNames.nonEmpty) {
throw new AnalysisException(
s"""
|Some existing schema fields (${nonExistentColumnNames.mkString("[", ",", "]")}) are
|not present in the new schema. We don't support dropping columns yet.
""".stripMargin)
}
// assuming the newSchema has all partition columns at the end as required
externalCatalog.alterTableSchema(db, table, newSchema)
}
private def columnNameResolved(schema: StructType, colName: String): Boolean = {
schema.fields.map(_.name).exists(conf.resolver(_, colName))
}
/**
* Return whether a table/view with the specified name exists. If no database is specified, check
* with current database.
*/
def tableExists(name: TableIdentifier): Boolean = synchronized {
val db = formatDatabaseName(name.database.getOrElse(currentDb))
val table = formatTableName(name.table)
externalCatalog.tableExists(db, table)
}
/**
* Retrieve the metadata of an existing permanent table/view. If no database is specified,
* assume the table/view is in the current database. If the specified table/view is not found
* in the database then a [[NoSuchTableException]] is thrown.
*/
def getTableMetadata(name: TableIdentifier): CatalogTable = {
val db = formatDatabaseName(name.database.getOrElse(getCurrentDatabase))
val table = formatTableName(name.table)
requireDbExists(db)
requireTableExists(TableIdentifier(table, Some(db)))
externalCatalog.getTable(db, table)
}
/**
* Retrieve the metadata of an existing metastore table.
* If no database is specified, assume the table is in the current database.
* If the specified table is not found in the database then return None if it doesn't exist.
*/
def getTableMetadataOption(name: TableIdentifier): Option[CatalogTable] = {
val db = formatDatabaseName(name.database.getOrElse(getCurrentDatabase))
val table = formatTableName(name.table)
requireDbExists(db)
externalCatalog.getTableOption(db, table)
}
/**
* Load files stored in given path into an existing metastore table.
* If no database is specified, assume the table is in the current database.
* If the specified table is not found in the database then a [[NoSuchTableException]] is thrown.
*/
def loadTable(
name: TableIdentifier,
loadPath: String,
isOverwrite: Boolean,
isSrcLocal: Boolean): Unit = {
val db = formatDatabaseName(name.database.getOrElse(getCurrentDatabase))
val table = formatTableName(name.table)
requireDbExists(db)
requireTableExists(TableIdentifier(table, Some(db)))
externalCatalog.loadTable(db, table, loadPath, isOverwrite, isSrcLocal)
}
/**
* Load files stored in given path into the partition of an existing metastore table.
* If no database is specified, assume the table is in the current database.
* If the specified table is not found in the database then a [[NoSuchTableException]] is thrown.
*/
def loadPartition(
name: TableIdentifier,
loadPath: String,
spec: TablePartitionSpec,
isOverwrite: Boolean,
inheritTableSpecs: Boolean,
isSrcLocal: Boolean): Unit = {
val db = formatDatabaseName(name.database.getOrElse(getCurrentDatabase))
val table = formatTableName(name.table)
requireDbExists(db)
requireTableExists(TableIdentifier(table, Some(db)))
requireNonEmptyValueInPartitionSpec(Seq(spec))
externalCatalog.loadPartition(
db, table, loadPath, spec, isOverwrite, inheritTableSpecs, isSrcLocal)
}
def defaultTablePath(tableIdent: TableIdentifier): URI = {
val dbName = formatDatabaseName(tableIdent.database.getOrElse(getCurrentDatabase))
val dbLocation = getDatabaseMetadata(dbName).locationUri
new Path(new Path(dbLocation), formatTableName(tableIdent.table)).toUri
}
// ----------------------------------------------
// | Methods that interact with temp views only |
// ----------------------------------------------
/**
* Create a local temporary view.
*/
def createTempView(
name: String,
tableDefinition: LogicalPlan,
overrideIfExists: Boolean): Unit = synchronized {
val table = formatTableName(name)
if (tempTables.contains(table) && !overrideIfExists) {
throw new TempTableAlreadyExistsException(name)
}
tempTables.put(table, tableDefinition)
}
/**
* Create a global temporary view.
*/
def createGlobalTempView(
name: String,
viewDefinition: LogicalPlan,
overrideIfExists: Boolean): Unit = {
globalTempViewManager.create(formatTableName(name), viewDefinition, overrideIfExists)
}
/**
* Alter the definition of a local/global temp view matching the given name, returns true if a
* temp view is matched and altered, false otherwise.
*/
def alterTempViewDefinition(
name: TableIdentifier,
viewDefinition: LogicalPlan): Boolean = synchronized {
val viewName = formatTableName(name.table)
if (name.database.isEmpty) {
if (tempTables.contains(viewName)) {
createTempView(viewName, viewDefinition, overrideIfExists = true)
true
} else {
false
}
} else if (formatDatabaseName(name.database.get) == globalTempViewManager.database) {
globalTempViewManager.update(viewName, viewDefinition)
} else {
false
}
}
/**
* Return a local temporary view exactly as it was stored.
*/
def getTempView(name: String): Option[LogicalPlan] = synchronized {
tempTables.get(formatTableName(name))
}
/**
* Return a global temporary view exactly as it was stored.
*/
def getGlobalTempView(name: String): Option[LogicalPlan] = {
globalTempViewManager.get(formatTableName(name))
}
/**
* Drop a local temporary view.
*
* Returns true if this view is dropped successfully, false otherwise.
*/
def dropTempView(name: String): Boolean = synchronized {
tempTables.remove(formatTableName(name)).isDefined
}
/**
* Drop a global temporary view.
*
* Returns true if this view is dropped successfully, false otherwise.
*/
def dropGlobalTempView(name: String): Boolean = {
globalTempViewManager.remove(formatTableName(name))
}
// -------------------------------------------------------------
// | Methods that interact with temporary and metastore tables |
// -------------------------------------------------------------
/**
* Retrieve the metadata of an existing temporary view or permanent table/view.
*
* If a database is specified in `name`, this will return the metadata of table/view in that
* database.
* If no database is specified, this will first attempt to get the metadata of a temporary view
* with the same name, then, if that does not exist, return the metadata of table/view in the
* current database.
*/
def getTempViewOrPermanentTableMetadata(name: TableIdentifier): CatalogTable = synchronized {
val table = formatTableName(name.table)
if (name.database.isEmpty) {
getTempView(table).map { plan =>
CatalogTable(
identifier = TableIdentifier(table),
tableType = CatalogTableType.VIEW,
storage = CatalogStorageFormat.empty,
schema = plan.output.toStructType)
}.getOrElse(getTableMetadata(name))
} else if (formatDatabaseName(name.database.get) == globalTempViewManager.database) {
globalTempViewManager.get(table).map { plan =>
CatalogTable(
identifier = TableIdentifier(table, Some(globalTempViewManager.database)),
tableType = CatalogTableType.VIEW,
storage = CatalogStorageFormat.empty,
schema = plan.output.toStructType)
}.getOrElse(throw new NoSuchTableException(globalTempViewManager.database, table))
} else {
getTableMetadata(name)
}
}
/**
* Rename a table.
*
* If a database is specified in `oldName`, this will rename the table in that database.
* If no database is specified, this will first attempt to rename a temporary table with
* the same name, then, if that does not exist, rename the table in the current database.
*
* This assumes the database specified in `newName` matches the one in `oldName`.
*/
def renameTable(oldName: TableIdentifier, newName: TableIdentifier): Unit = synchronized {
val db = formatDatabaseName(oldName.database.getOrElse(currentDb))
newName.database.map(formatDatabaseName).foreach { newDb =>
if (db != newDb) {
throw new AnalysisException(
s"RENAME TABLE source and destination databases do not match: '$db' != '$newDb'")
}
}
val oldTableName = formatTableName(oldName.table)
val newTableName = formatTableName(newName.table)
if (db == globalTempViewManager.database) {
globalTempViewManager.rename(oldTableName, newTableName)
} else {
requireDbExists(db)
if (oldName.database.isDefined || !tempTables.contains(oldTableName)) {
requireTableExists(TableIdentifier(oldTableName, Some(db)))
requireTableNotExists(TableIdentifier(newTableName, Some(db)))
validateName(newTableName)
externalCatalog.renameTable(db, oldTableName, newTableName)
} else {
if (newName.database.isDefined) {
throw new AnalysisException(
s"RENAME TEMPORARY TABLE from '$oldName' to '$newName': cannot specify database " +
s"name '${newName.database.get}' in the destination table")
}
if (tempTables.contains(newTableName)) {
throw new AnalysisException(s"RENAME TEMPORARY TABLE from '$oldName' to '$newName': " +
"destination table already exists")
}
val table = tempTables(oldTableName)
tempTables.remove(oldTableName)
tempTables.put(newTableName, table)
}
}
}
/**
* Drop a table.
*
* If a database is specified in `name`, this will drop the table from that database.
* If no database is specified, this will first attempt to drop a temporary table with
* the same name, then, if that does not exist, drop the table from the current database.
*/
def dropTable(
name: TableIdentifier,
ignoreIfNotExists: Boolean,
purge: Boolean): Unit = synchronized {
val db = formatDatabaseName(name.database.getOrElse(currentDb))
val table = formatTableName(name.table)
if (db == globalTempViewManager.database) {
val viewExists = globalTempViewManager.remove(table)
if (!viewExists && !ignoreIfNotExists) {
throw new NoSuchTableException(globalTempViewManager.database, table)
}
} else {
if (name.database.isDefined || !tempTables.contains(table)) {
requireDbExists(db)
// When ignoreIfNotExists is false, no exception is issued when the table does not exist.
// Instead, log it as an error message.
if (tableExists(TableIdentifier(table, Option(db)))) {
externalCatalog.dropTable(db, table, ignoreIfNotExists = true, purge = purge)
} else if (!ignoreIfNotExists) {
throw new NoSuchTableException(db = db, table = table)
}
} else {
tempTables.remove(table)
}
}
}
/**
* Return a [[LogicalPlan]] that represents the given table or view.
*
* If a database is specified in `name`, this will return the table/view from that database.
* If no database is specified, this will first attempt to return a temporary table/view with
* the same name, then, if that does not exist, return the table/view from the current database.
*
* Note that, the global temp view database is also valid here, this will return the global temp
* view matching the given name.
*
* If the relation is a view, we generate a [[View]] operator from the view description, and
* wrap the logical plan in a [[SubqueryAlias]] which will track the name of the view.
*
* @param name The name of the table/view that we look up.
*/
def lookupRelation(name: TableIdentifier): LogicalPlan = {
synchronized {
val db = formatDatabaseName(name.database.getOrElse(currentDb))
val table = formatTableName(name.table)
if (db == globalTempViewManager.database) {
globalTempViewManager.get(table).map { viewDef =>
SubqueryAlias(table, viewDef)
}.getOrElse(throw new NoSuchTableException(db, table))
} else if (name.database.isDefined || !tempTables.contains(table)) {
val metadata = externalCatalog.getTable(db, table)
if (metadata.tableType == CatalogTableType.VIEW) {
val viewText = metadata.viewText.getOrElse(sys.error("Invalid view without text."))
// The relation is a view, so we wrap the relation by:
// 1. Add a [[View]] operator over the relation to keep track of the view desc;
// 2. Wrap the logical plan in a [[SubqueryAlias]] which tracks the name of the view.
val child = View(
desc = metadata,
output = metadata.schema.toAttributes,
child = parser.parsePlan(viewText))
SubqueryAlias(table, child)
} else {
val tableRelation = CatalogRelation(
metadata,
// we assume all the columns are nullable.
metadata.dataSchema.asNullable.toAttributes,
metadata.partitionSchema.asNullable.toAttributes)
SubqueryAlias(table, tableRelation)
}
} else {
SubqueryAlias(table, tempTables(table))
}
}
}
/**
* Return whether a table with the specified name is a temporary table.
*
* Note: The temporary table cache is checked only when database is not
* explicitly specified.
*/
def isTemporaryTable(name: TableIdentifier): Boolean = synchronized {
val table = formatTableName(name.table)
if (name.database.isEmpty) {
tempTables.contains(table)
} else if (formatDatabaseName(name.database.get) == globalTempViewManager.database) {
globalTempViewManager.get(table).isDefined
} else {
false
}
}
/**
* List all tables in the specified database, including local temporary tables.
*
* Note that, if the specified database is global temporary view database, we will list global
* temporary views.
*/
def listTables(db: String): Seq[TableIdentifier] = listTables(db, "*")
/**
* List all matching tables in the specified database, including local temporary tables.
*
* Note that, if the specified database is global temporary view database, we will list global
* temporary views.
*/
def listTables(db: String, pattern: String): Seq[TableIdentifier] = {
val dbName = formatDatabaseName(db)
val dbTables = if (dbName == globalTempViewManager.database) {
globalTempViewManager.listViewNames(pattern).map { name =>
TableIdentifier(name, Some(globalTempViewManager.database))
}
} else {
requireDbExists(dbName)
externalCatalog.listTables(dbName, pattern).map { name =>
TableIdentifier(name, Some(dbName))
}
}
val localTempViews = synchronized {
StringUtils.filterPattern(tempTables.keys.toSeq, pattern).map { name =>
TableIdentifier(name)
}
}
dbTables ++ localTempViews
}
/**
* Refresh the cache entry for a metastore table, if any.
*/
def refreshTable(name: TableIdentifier): Unit = synchronized {
val dbName = formatDatabaseName(name.database.getOrElse(currentDb))
val tableName = formatTableName(name.table)
// Go through temporary tables and invalidate them.
// If the database is defined, this may be a global temporary view.
// If the database is not defined, there is a good chance this is a temp table.
if (name.database.isEmpty) {
tempTables.get(tableName).foreach(_.refresh())
} else if (dbName == globalTempViewManager.database) {
globalTempViewManager.get(tableName).foreach(_.refresh())
}
// Also invalidate the table relation cache.
val qualifiedTableName = QualifiedTableName(dbName, tableName)
tableRelationCache.invalidate(qualifiedTableName)
}
/**
* Drop all existing temporary tables.
* For testing only.
*/
def clearTempTables(): Unit = synchronized {
tempTables.clear()
}
// ----------------------------------------------------------------------------
// Partitions
// ----------------------------------------------------------------------------
// All methods in this category interact directly with the underlying catalog.
// These methods are concerned with only metastore tables.
// ----------------------------------------------------------------------------
// TODO: We need to figure out how these methods interact with our data source
// tables. For such tables, we do not store values of partitioning columns in
// the metastore. For now, partition values of a data source table will be
// automatically discovered when we load the table.
/**
* Create partitions in an existing table, assuming it exists.
* If no database is specified, assume the table is in the current database.
*/
def createPartitions(
tableName: TableIdentifier,
parts: Seq[CatalogTablePartition],
ignoreIfExists: Boolean): Unit = {
val db = formatDatabaseName(tableName.database.getOrElse(getCurrentDatabase))
val table = formatTableName(tableName.table)
requireDbExists(db)
requireTableExists(TableIdentifier(table, Option(db)))
requireExactMatchedPartitionSpec(parts.map(_.spec), getTableMetadata(tableName))
requireNonEmptyValueInPartitionSpec(parts.map(_.spec))
externalCatalog.createPartitions(db, table, parts, ignoreIfExists)
}
/**
* Drop partitions from a table, assuming they exist.
* If no database is specified, assume the table is in the current database.
*/
def dropPartitions(
tableName: TableIdentifier,
specs: Seq[TablePartitionSpec],
ignoreIfNotExists: Boolean,
purge: Boolean,
retainData: Boolean): Unit = {
val db = formatDatabaseName(tableName.database.getOrElse(getCurrentDatabase))
val table = formatTableName(tableName.table)
requireDbExists(db)
requireTableExists(TableIdentifier(table, Option(db)))
requirePartialMatchedPartitionSpec(specs, getTableMetadata(tableName))
requireNonEmptyValueInPartitionSpec(specs)
externalCatalog.dropPartitions(db, table, specs, ignoreIfNotExists, purge, retainData)
}
/**
* Override the specs of one or many existing table partitions, assuming they exist.
*
* This assumes index i of `specs` corresponds to index i of `newSpecs`.
* If no database is specified, assume the table is in the current database.
*/
def renamePartitions(
tableName: TableIdentifier,
specs: Seq[TablePartitionSpec],
newSpecs: Seq[TablePartitionSpec]): Unit = {
val tableMetadata = getTableMetadata(tableName)
val db = formatDatabaseName(tableName.database.getOrElse(getCurrentDatabase))
val table = formatTableName(tableName.table)
requireDbExists(db)
requireTableExists(TableIdentifier(table, Option(db)))
requireExactMatchedPartitionSpec(specs, tableMetadata)
requireExactMatchedPartitionSpec(newSpecs, tableMetadata)
requireNonEmptyValueInPartitionSpec(specs)
requireNonEmptyValueInPartitionSpec(newSpecs)
externalCatalog.renamePartitions(db, table, specs, newSpecs)
}
/**
* Alter one or many table partitions whose specs that match those specified in `parts`,
* assuming the partitions exist.
*
* If no database is specified, assume the table is in the current database.
*
* Note: If the underlying implementation does not support altering a certain field,
* this becomes a no-op.
*/
def alterPartitions(tableName: TableIdentifier, parts: Seq[CatalogTablePartition]): Unit = {
val db = formatDatabaseName(tableName.database.getOrElse(getCurrentDatabase))
val table = formatTableName(tableName.table)
requireDbExists(db)
requireTableExists(TableIdentifier(table, Option(db)))
requireExactMatchedPartitionSpec(parts.map(_.spec), getTableMetadata(tableName))
requireNonEmptyValueInPartitionSpec(parts.map(_.spec))
externalCatalog.alterPartitions(db, table, parts)
}
/**
* Retrieve the metadata of a table partition, assuming it exists.
* If no database is specified, assume the table is in the current database.
*/
def getPartition(tableName: TableIdentifier, spec: TablePartitionSpec): CatalogTablePartition = {
val db = formatDatabaseName(tableName.database.getOrElse(getCurrentDatabase))
val table = formatTableName(tableName.table)
requireDbExists(db)
requireTableExists(TableIdentifier(table, Option(db)))
requireExactMatchedPartitionSpec(Seq(spec), getTableMetadata(tableName))
requireNonEmptyValueInPartitionSpec(Seq(spec))
externalCatalog.getPartition(db, table, spec)
}
/**
* List the names of all partitions that belong to the specified table, assuming it exists.
*
* A partial partition spec may optionally be provided to filter the partitions returned.
* For instance, if there exist partitions (a='1', b='2'), (a='1', b='3') and (a='2', b='4'),
* then a partial spec of (a='1') will return the first two only.
*/
def listPartitionNames(
tableName: TableIdentifier,
partialSpec: Option[TablePartitionSpec] = None): Seq[String] = {
val db = formatDatabaseName(tableName.database.getOrElse(getCurrentDatabase))
val table = formatTableName(tableName.table)
requireDbExists(db)
requireTableExists(TableIdentifier(table, Option(db)))
partialSpec.foreach { spec =>
requirePartialMatchedPartitionSpec(Seq(spec), getTableMetadata(tableName))
requireNonEmptyValueInPartitionSpec(Seq(spec))
}
externalCatalog.listPartitionNames(db, table, partialSpec)
}
/**
* List the metadata of all partitions that belong to the specified table, assuming it exists.
*
* A partial partition spec may optionally be provided to filter the partitions returned.
* For instance, if there exist partitions (a='1', b='2'), (a='1', b='3') and (a='2', b='4'),
* then a partial spec of (a='1') will return the first two only.
*/
def listPartitions(
tableName: TableIdentifier,
partialSpec: Option[TablePartitionSpec] = None): Seq[CatalogTablePartition] = {
val db = formatDatabaseName(tableName.database.getOrElse(getCurrentDatabase))
val table = formatTableName(tableName.table)
requireDbExists(db)
requireTableExists(TableIdentifier(table, Option(db)))
partialSpec.foreach { spec =>
requirePartialMatchedPartitionSpec(Seq(spec), getTableMetadata(tableName))
requireNonEmptyValueInPartitionSpec(Seq(spec))
}
externalCatalog.listPartitions(db, table, partialSpec)
}
/**
* List the metadata of partitions that belong to the specified table, assuming it exists, that
* satisfy the given partition-pruning predicate expressions.
*/
def listPartitionsByFilter(
tableName: TableIdentifier,
predicates: Seq[Expression]): Seq[CatalogTablePartition] = {
val db = formatDatabaseName(tableName.database.getOrElse(getCurrentDatabase))
val table = formatTableName(tableName.table)
requireDbExists(db)
requireTableExists(TableIdentifier(table, Option(db)))
externalCatalog.listPartitionsByFilter(db, table, predicates, conf.sessionLocalTimeZone)
}
/**
* Verify if the input partition spec has any empty value.
*/
private def requireNonEmptyValueInPartitionSpec(specs: Seq[TablePartitionSpec]): Unit = {
specs.foreach { s =>
if (s.values.exists(_.isEmpty)) {
val spec = s.map(p => p._1 + "=" + p._2).mkString("[", ", ", "]")
throw new AnalysisException(
s"Partition spec is invalid. The spec ($spec) contains an empty partition column value")
}
}
}
/**
* Verify if the input partition spec exactly matches the existing defined partition spec
* The columns must be the same but the orders could be different.
*/
private def requireExactMatchedPartitionSpec(
specs: Seq[TablePartitionSpec],
table: CatalogTable): Unit = {
val defined = table.partitionColumnNames.sorted
specs.foreach { s =>
if (s.keys.toSeq.sorted != defined) {
throw new AnalysisException(
s"Partition spec is invalid. The spec (${s.keys.mkString(", ")}) must match " +
s"the partition spec (${table.partitionColumnNames.mkString(", ")}) defined in " +
s"table '${table.identifier}'")
}
}
}
/**
* Verify if the input partition spec partially matches the existing defined partition spec
* That is, the columns of partition spec should be part of the defined partition spec.
*/
private def requirePartialMatchedPartitionSpec(
specs: Seq[TablePartitionSpec],
table: CatalogTable): Unit = {
val defined = table.partitionColumnNames
specs.foreach { s =>
if (!s.keys.forall(defined.contains)) {
throw new AnalysisException(
s"Partition spec is invalid. The spec (${s.keys.mkString(", ")}) must be contained " +
s"within the partition spec (${table.partitionColumnNames.mkString(", ")}) defined " +
s"in table '${table.identifier}'")
}
}
}
// ----------------------------------------------------------------------------
// Functions
// ----------------------------------------------------------------------------
// There are two kinds of functions, temporary functions and metastore
// functions (permanent UDFs). Temporary functions are isolated across
// sessions. Metastore functions can be used across multiple sessions as
// their metadata is persisted in the underlying catalog.
// ----------------------------------------------------------------------------
// -------------------------------------------------------
// | Methods that interact with metastore functions only |
// -------------------------------------------------------
/**
* Create a metastore function in the database specified in `funcDefinition`.
* If no such database is specified, create it in the current database.
*/
def createFunction(funcDefinition: CatalogFunction, ignoreIfExists: Boolean): Unit = {
val db = formatDatabaseName(funcDefinition.identifier.database.getOrElse(getCurrentDatabase))
requireDbExists(db)
val identifier = FunctionIdentifier(funcDefinition.identifier.funcName, Some(db))
val newFuncDefinition = funcDefinition.copy(identifier = identifier)
if (!functionExists(identifier)) {
externalCatalog.createFunction(db, newFuncDefinition)
} else if (!ignoreIfExists) {
throw new FunctionAlreadyExistsException(db = db, func = identifier.toString)
}
}
/**
* Drop a metastore function.
* If no database is specified, assume the function is in the current database.
*/
def dropFunction(name: FunctionIdentifier, ignoreIfNotExists: Boolean): Unit = {
val db = formatDatabaseName(name.database.getOrElse(getCurrentDatabase))
requireDbExists(db)
val identifier = name.copy(database = Some(db))
if (functionExists(identifier)) {
// TODO: registry should just take in FunctionIdentifier for type safety
if (functionRegistry.functionExists(identifier.unquotedString)) {
// If we have loaded this function into the FunctionRegistry,
// also drop it from there.
// For a permanent function, because we loaded it to the FunctionRegistry
// when it's first used, we also need to drop it from the FunctionRegistry.
functionRegistry.dropFunction(identifier.unquotedString)
}
externalCatalog.dropFunction(db, name.funcName)
} else if (!ignoreIfNotExists) {
throw new NoSuchFunctionException(db = db, func = identifier.toString)
}
}
/**
* Retrieve the metadata of a metastore function.
*
* If a database is specified in `name`, this will return the function in that database.
* If no database is specified, this will return the function in the current database.
*/
def getFunctionMetadata(name: FunctionIdentifier): CatalogFunction = {
val db = formatDatabaseName(name.database.getOrElse(getCurrentDatabase))
requireDbExists(db)
externalCatalog.getFunction(db, name.funcName)
}
/**
* Check if the specified function exists.
*/
def functionExists(name: FunctionIdentifier): Boolean = {
val db = formatDatabaseName(name.database.getOrElse(getCurrentDatabase))
requireDbExists(db)
functionRegistry.functionExists(name.unquotedString) ||
externalCatalog.functionExists(db, name.funcName)
}
// ----------------------------------------------------------------
// | Methods that interact with temporary and metastore functions |
// ----------------------------------------------------------------
/**
* Construct a [[FunctionBuilder]] based on the provided class that represents a function.
*
* This performs reflection to decide what type of [[Expression]] to return in the builder.
*/
protected def makeFunctionBuilder(name: String, functionClassName: String): FunctionBuilder = {
// TODO: at least support UDAFs here
throw new UnsupportedOperationException("Use sqlContext.udf.register(...) instead.")
}
/**
* Loads resources such as JARs and Files for a function. Every resource is represented
* by a tuple (resource type, resource uri).
*/
def loadFunctionResources(resources: Seq[FunctionResource]): Unit = {
resources.foreach(functionResourceLoader.loadResource)
}
/**
* Registers a temporary or permanent function into a session-specific [[FunctionRegistry]]
*/
def registerFunction(
funcDefinition: CatalogFunction,
ignoreIfExists: Boolean,
functionBuilder: Option[FunctionBuilder] = None): Unit = {
val func = funcDefinition.identifier
if (functionRegistry.functionExists(func.unquotedString) && !ignoreIfExists) {
throw new AnalysisException(s"Function $func already exists")
}
val info = new ExpressionInfo(funcDefinition.className, func.database.orNull, func.funcName)
val builder =
functionBuilder.getOrElse(makeFunctionBuilder(func.unquotedString, funcDefinition.className))
functionRegistry.registerFunction(func.unquotedString, info, builder)
}
/**
* Drop a temporary function.
*/
def dropTempFunction(name: String, ignoreIfNotExists: Boolean): Unit = {
if (!functionRegistry.dropFunction(name) && !ignoreIfNotExists) {
throw new NoSuchTempFunctionException(name)
}
}
/**
* Returns whether it is a temporary function. If not existed, returns false.
*/
def isTemporaryFunction(name: FunctionIdentifier): Boolean = {
// copied from HiveSessionCatalog
val hiveFunctions = Seq("histogram_numeric")
// A temporary function is a function that has been registered in functionRegistry
// without a database name, and is neither a built-in function nor a Hive function
name.database.isEmpty &&
functionRegistry.functionExists(name.funcName) &&
!FunctionRegistry.builtin.functionExists(name.funcName) &&
!hiveFunctions.contains(name.funcName.toLowerCase(Locale.ROOT))
}
protected def failFunctionLookup(name: String): Nothing = {
throw new NoSuchFunctionException(db = currentDb, func = name)
}
/**
* Look up the [[ExpressionInfo]] associated with the specified function, assuming it exists.
*/
def lookupFunctionInfo(name: FunctionIdentifier): ExpressionInfo = synchronized {
// TODO: just make function registry take in FunctionIdentifier instead of duplicating this
val database = name.database.orElse(Some(currentDb)).map(formatDatabaseName)
val qualifiedName = name.copy(database = database)
functionRegistry.lookupFunction(name.funcName)
.orElse(functionRegistry.lookupFunction(qualifiedName.unquotedString))
.getOrElse {
val db = qualifiedName.database.get
requireDbExists(db)
if (externalCatalog.functionExists(db, name.funcName)) {
val metadata = externalCatalog.getFunction(db, name.funcName)
new ExpressionInfo(
metadata.className,
qualifiedName.database.orNull,
qualifiedName.identifier)
} else {
failFunctionLookup(name.funcName)
}
}
}
/**
* Return an [[Expression]] that represents the specified function, assuming it exists.
*
* For a temporary function or a permanent function that has been loaded,
* this method will simply lookup the function through the
* FunctionRegistry and create an expression based on the builder.
*
* For a permanent function that has not been loaded, we will first fetch its metadata
* from the underlying external catalog. Then, we will load all resources associated
* with this function (i.e. jars and files). Finally, we create a function builder
* based on the function class and put the builder into the FunctionRegistry.
* The name of this function in the FunctionRegistry will be `databaseName.functionName`.
*/
def lookupFunction(
name: FunctionIdentifier,
children: Seq[Expression]): Expression = synchronized {
// Note: the implementation of this function is a little bit convoluted.
// We probably shouldn't use a single FunctionRegistry to register all three kinds of functions
// (built-in, temp, and external).
if (name.database.isEmpty && functionRegistry.functionExists(name.funcName)) {
// This function has been already loaded into the function registry.
return functionRegistry.lookupFunction(name.funcName, children)
}
// If the name itself is not qualified, add the current database to it.
val database = name.database.orElse(Some(currentDb)).map(formatDatabaseName)
val qualifiedName = name.copy(database = database)
if (functionRegistry.functionExists(qualifiedName.unquotedString)) {
// This function has been already loaded into the function registry.
// Unlike the above block, we find this function by using the qualified name.
return functionRegistry.lookupFunction(qualifiedName.unquotedString, children)
}
// The function has not been loaded to the function registry, which means
// that the function is a permanent function (if it actually has been registered
// in the metastore). We need to first put the function in the FunctionRegistry.
// TODO: why not just check whether the function exists first?
val catalogFunction = try {
externalCatalog.getFunction(currentDb, name.funcName)
} catch {
case e: AnalysisException => failFunctionLookup(name.funcName)
case e: NoSuchPermanentFunctionException => failFunctionLookup(name.funcName)
}
loadFunctionResources(catalogFunction.resources)
// Please note that qualifiedName is provided by the user. However,
// catalogFunction.identifier.unquotedString is returned by the underlying
// catalog. So, it is possible that qualifiedName is not exactly the same as
// catalogFunction.identifier.unquotedString (difference is on case-sensitivity).
// At here, we preserve the input from the user.
registerFunction(catalogFunction.copy(identifier = qualifiedName), ignoreIfExists = false)
// Now, we need to create the Expression.
functionRegistry.lookupFunction(qualifiedName.unquotedString, children)
}
/**
* List all functions in the specified database, including temporary functions. This
* returns the function identifier and the scope in which it was defined (system or user
* defined).
*/
def listFunctions(db: String): Seq[(FunctionIdentifier, String)] = listFunctions(db, "*")
/**
* List all matching functions in the specified database, including temporary functions. This
* returns the function identifier and the scope in which it was defined (system or user
* defined).
*/
def listFunctions(db: String, pattern: String): Seq[(FunctionIdentifier, String)] = {
val dbName = formatDatabaseName(db)
requireDbExists(dbName)
val dbFunctions = externalCatalog.listFunctions(dbName, pattern).map { f =>
FunctionIdentifier(f, Some(dbName)) }
val loadedFunctions =
StringUtils.filterPattern(functionRegistry.listFunction(), pattern).map { f =>
// In functionRegistry, function names are stored as an unquoted format.
Try(parser.parseFunctionIdentifier(f)) match {
case Success(e) => e
case Failure(_) =>
// The names of some built-in functions are not parsable by our parser, e.g., %
FunctionIdentifier(f)
}
}
val functions = dbFunctions ++ loadedFunctions
// The session catalog caches some persistent functions in the FunctionRegistry
// so there can be duplicates.
functions.map {
case f if FunctionRegistry.functionSet.contains(f.funcName) => (f, "SYSTEM")
case f => (f, "USER")
}.distinct
}
// -----------------
// | Other methods |
// -----------------
/**
* Drop all existing databases (except "default"), tables, partitions and functions,
* and set the current database to "default".
*
* This is mainly used for tests.
*/
def reset(): Unit = synchronized {
setCurrentDatabase(DEFAULT_DATABASE)
externalCatalog.setCurrentDatabase(DEFAULT_DATABASE)
listDatabases().filter(_ != DEFAULT_DATABASE).foreach { db =>
dropDatabase(db, ignoreIfNotExists = false, cascade = true)
}
listTables(DEFAULT_DATABASE).foreach { table =>
dropTable(table, ignoreIfNotExists = false, purge = false)
}
listFunctions(DEFAULT_DATABASE).map(_._1).foreach { func =>
if (func.database.isDefined) {
dropFunction(func, ignoreIfNotExists = false)
} else {
dropTempFunction(func.funcName, ignoreIfNotExists = false)
}
}
clearTempTables()
globalTempViewManager.clear()
functionRegistry.clear()
tableRelationCache.invalidateAll()
// restore built-in functions
FunctionRegistry.builtin.listFunction().foreach { f =>
val expressionInfo = FunctionRegistry.builtin.lookupFunction(f)
val functionBuilder = FunctionRegistry.builtin.lookupFunctionBuilder(f)
require(expressionInfo.isDefined, s"built-in function '$f' is missing expression info")
require(functionBuilder.isDefined, s"built-in function '$f' is missing function builder")
functionRegistry.registerFunction(f, expressionInfo.get, functionBuilder.get)
}
}
/**
* Copy the current state of the catalog to another catalog.
*
* This function is synchronized on this [[SessionCatalog]] (the source) to make sure the copied
* state is consistent. The target [[SessionCatalog]] is not synchronized, and should not be
* because the target [[SessionCatalog]] should not be published at this point. The caller must
* synchronize on the target if this assumption does not hold.
*/
private[sql] def copyStateTo(target: SessionCatalog): Unit = synchronized {
target.currentDb = currentDb
// copy over temporary tables
tempTables.foreach(kv => target.tempTables.put(kv._1, kv._2))
}
}
| MLnick/spark | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/SessionCatalog.scala | Scala | apache-2.0 | 52,636 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.openwhisk.core.database.test
import java.util.Base64
import java.util.concurrent.TimeoutException
import java.util.concurrent.atomic.AtomicInteger
import akka.http.scaladsl.model.ContentType
import akka.stream.scaladsl.Source
import akka.util.ByteString
import org.scalatest.Assertions
import spray.json.DefaultJsonProtocol._
import spray.json._
import org.apache.openwhisk.common.TransactionId
import org.apache.openwhisk.core.database._
import org.apache.openwhisk.core.database.memory.MemoryArtifactStore
import org.apache.openwhisk.core.entity.Attachments.Attached
import org.apache.openwhisk.core.entity._
import org.apache.openwhisk.core.entity.types.{AuthStore, EntityStore}
import scala.collection.mutable.ListBuffer
import scala.concurrent.{Await, ExecutionContext, Future}
import scala.concurrent.duration.{Duration, DurationInt}
import scala.language.postfixOps
import scala.util.{Failure, Random, Success, Try}
/**
* WARNING: the put/get/del operations in this trait operate directly on the datastore,
* and in the presence of a cache, there will be inconsistencies if one mixes these
* operations with those that flow through the cache. To mitigate this, use unique asset
* names in tests, and defer all cleanup to the end of a test suite.
*/
trait DbUtils extends Assertions {
implicit val dbOpTimeout = 15 seconds
val instance = ControllerInstanceId("0")
val docsToDelete = ListBuffer[(ArtifactStore[_], DocInfo)]()
case class RetryOp() extends Throwable
val cnt = new AtomicInteger(0)
def transid() = TransactionId(cnt.incrementAndGet().toString)
// Call each few successfully 5 before the test continues to increase probability, that each node of the
// CouchDB/Cloudant cluster is updated.
val successfulViewCalls = 5
/**
* Retry an operation 'step()' awaiting its result up to 'timeout'.
* Attempt the operation up to 'count' times. The future from the
* step is not aborted --- TODO fix this.
*/
def retry[T](step: () => Future[T], timeout: Duration, count: Int = 100): Try[T] = {
val graceBeforeRetry = 50.milliseconds
val future = step()
if (count > 0) try {
val result = Await.result(future, timeout)
Success(result)
} catch {
case n: NoDocumentException =>
println("no document exception, retrying")
Thread.sleep(graceBeforeRetry.toMillis)
retry(step, timeout, count - 1)
case RetryOp() =>
println("condition not met, retrying")
Thread.sleep(graceBeforeRetry.toMillis)
retry(step, timeout, count - 1)
case t: TimeoutException =>
println("timed out, retrying")
Thread.sleep(graceBeforeRetry.toMillis)
retry(step, timeout, count - 1)
case t: Throwable =>
println(s"unexpected failure $t")
Failure(t)
} else Failure(new NoDocumentException("timed out"))
}
/**
* Wait on a view to update with documents added to namespace. This uses retry above,
* where the step performs a direct db query to retrieve the view and check the count
* matches the given value.
*/
def waitOnView[Au](db: ArtifactStore[Au], namespace: EntityName, count: Int, view: View)(
implicit context: ExecutionContext,
transid: TransactionId,
timeout: Duration): Unit =
waitOnViewImpl(db, List(namespace.asString), List(namespace.asString, WhiskEntityQueries.TOP), count, view)
/**
* Wait on a view to update with documents added to namespace. This uses retry above,
* where the step performs a direct db query to retrieve the view and check the count
* matches the given value.
*/
def waitOnView[Au](db: ArtifactStore[Au], path: EntityPath, count: Int, view: View)(
implicit context: ExecutionContext,
transid: TransactionId,
timeout: Duration): Unit =
waitOnViewImpl(db, List(path.asString), List(path.asString, WhiskEntityQueries.TOP), count, view)
/**
* Wait on a view to update with documents added(don't specify the namespace). This uses retry above,
* where the step performs a direct db query to retrieve the view and check the count
* matches the given value.
*/
def waitOnView[Au](db: ArtifactStore[Au], count: Int, view: View)(implicit context: ExecutionContext,
transid: TransactionId,
timeout: Duration): Unit =
waitOnViewImpl(db, List.empty, List.empty, count, view)
/**
* Wait on a view to update with documents added to namespace. This uses retry above,
* where the step performs a direct db query to retrieve the view and check the count
* matches the given value.
*/
private def waitOnViewImpl[Au](
db: ArtifactStore[Au],
startKey: List[String],
endKey: List[String],
count: Int,
view: View)(implicit context: ExecutionContext, transid: TransactionId, timeout: Duration): Unit = {
// Query the view at least `successfulViewCalls` times successfully, to handle inconsistency between several CouchDB-nodes.
(0 until successfulViewCalls).map { _ =>
val success = retry(() => {
db.query(view.name, startKey, endKey, 0, 0, false, true, false, StaleParameter.No) map { l =>
if (l.length != count) {
throw RetryOp()
} else true
}
}, timeout)
assert(success.isSuccess, "wait aborted")
}
}
/**
* Wait on a view specific to a collection to update with documents added to that collection in namespace.
* This uses retry above, where the step performs a collection-specific view query using the collection
* factory. The result count from the view is checked against the given value.
*/
def waitOnView(
db: EntityStore,
factory: WhiskEntityQueries[_],
namespace: EntityPath,
count: Int,
includeDocs: Boolean = false)(implicit context: ExecutionContext, transid: TransactionId, timeout: Duration) = {
// Query the view at least `successfulViewCalls` times successfully, to handle inconsistency between several CouchDB-nodes.
(0 until successfulViewCalls).map { _ =>
val success = retry(() => {
factory.listCollectionInNamespace(db, namespace, 0, 0, includeDocs) map { l =>
if (l.fold(_.length, _.length) < count) {
throw RetryOp()
} else true
}
}, timeout)
assert(success.isSuccess, "wait aborted")
}
}
/**
* Wait on view for the authentication table. This is like the other waitOnViews but
* specific to the WhiskAuth records.
*/
def waitOnView(db: AuthStore, authkey: BasicAuthenticationAuthKey, count: Int)(implicit context: ExecutionContext,
transid: TransactionId,
timeout: Duration) = {
// Query the view at least `successfulViewCalls` times successfully, to handle inconsistency between several CouchDB-nodes.
(0 until successfulViewCalls).map { _ =>
val success = retry(() => {
Identity.list(db, List(authkey.uuid.asString, authkey.key.asString)) map { l =>
if (l.length != count) {
throw RetryOp()
} else true
}
}, timeout)
assert(success.isSuccess, "wait aborted after: " + timeout + ": " + success)
}
}
/**
* Wait on view using the CouchDbRestClient. This is like the other waitOnViews.
*/
def waitOnView(db: CouchDbRestClient, designDocName: String, viewName: String, count: Int)(
implicit context: ExecutionContext,
timeout: Duration) = {
// Query the view at least `successfulViewCalls` times successfully, to handle inconsistency between several CouchDB-nodes.
(0 until successfulViewCalls).map { _ =>
val success = retry(
() => {
db.executeView(designDocName, viewName)().map {
case Right(doc) =>
val length = doc.fields("rows").convertTo[List[JsObject]].length
if (length != count) {
throw RetryOp()
} else true
case Left(_) =>
throw RetryOp()
}
},
timeout)
assert(success.isSuccess, "wait aborted after: " + timeout + ": " + success)
}
}
/**
* Puts document 'w' in datastore, and add it to gc queue to delete after the test completes.
*/
def put[A, Au >: A](db: ArtifactStore[Au], w: A, garbageCollect: Boolean = true)(
implicit transid: TransactionId,
timeout: Duration = 10 seconds): DocInfo = {
val docFuture = db.put(w)
val doc = Await.result(docFuture, timeout)
assert(doc != null)
if (garbageCollect) docsToDelete += ((db, doc))
doc
}
def putAndAttach[A <: DocumentRevisionProvider, Au >: A](
db: ArtifactStore[Au],
doc: A,
update: (A, Attached) => A,
contentType: ContentType,
docStream: Source[ByteString, _],
oldAttachment: Option[Attached],
garbageCollect: Boolean = true)(implicit transid: TransactionId, timeout: Duration = 10 seconds): DocInfo = {
val docFuture = db.putAndAttach[A](doc, update, contentType, docStream, oldAttachment)
val newDoc = Await.result(docFuture, timeout)._1
assert(newDoc != null)
if (garbageCollect) docsToDelete += ((db, newDoc))
newDoc
}
/**
* Gets document by id from datastore, and add it to gc queue to delete after the test completes.
*/
def get[A <: DocumentRevisionProvider, Au >: A](db: ArtifactStore[Au],
docid: DocId,
factory: DocumentFactory[A],
garbageCollect: Boolean = true)(implicit transid: TransactionId,
timeout: Duration = 10 seconds,
ma: Manifest[A]): A = {
val docFuture = factory.get(db, docid)
val doc = Await.result(docFuture, timeout)
assert(doc != null)
if (garbageCollect) docsToDelete += ((db, docid.asDocInfo))
doc
}
/**
* Deletes document by id from datastore.
*/
def del[A <: WhiskDocument, Au >: A](db: ArtifactStore[Au], docid: DocId, factory: DocumentFactory[A])(
implicit transid: TransactionId,
timeout: Duration = 10 seconds,
ma: Manifest[A]) = {
val docFuture = factory.get(db, docid)
val doc = Await.result(docFuture, timeout)
assert(doc != null)
Await.result(db.del(doc.docinfo), timeout)
}
/**
* Deletes document by id and revision from datastore.
*/
def delete(db: ArtifactStore[_], docinfo: DocInfo)(implicit transid: TransactionId,
timeout: Duration = 10 seconds) = {
Await.result(db.del(docinfo), timeout)
}
/**
* Puts a document 'entity' into the datastore, then do a get to retrieve it and confirm the identity.
*/
def putGetCheck[A <: DocumentRevisionProvider, Au >: A](db: ArtifactStore[Au],
entity: A,
factory: DocumentFactory[A],
gc: Boolean = true)(implicit transid: TransactionId,
timeout: Duration = 10 seconds,
ma: Manifest[A]): (DocInfo, A) = {
val doc = put(db, entity, gc)
assert(doc != null && doc.id.asString != null && doc.rev.asString != null)
val future = factory.get(db, doc.id, doc.rev)
val dbEntity = Await.result(future, timeout)
assert(dbEntity != null)
assert(dbEntity == entity)
(doc, dbEntity)
}
/**
* Deletes all documents added to gc queue.
*/
def cleanup()(implicit timeout: Duration = 10 seconds) = {
docsToDelete.map { e =>
Try {
Await.result(e._1.del(e._2)(TransactionId.testing), timeout)
Await.result(e._1.deleteAttachments(e._2)(TransactionId.testing), timeout)
}
}
docsToDelete.clear()
}
/**
* Generates a Base64 string for code which would not be inlined by the ArtifactStore
*/
def nonInlinedCode(db: ArtifactStore[_]): String = {
encodedRandomBytes(nonInlinedAttachmentSize(db))
}
/**
* Size in bytes for attachments which would always be inlined.
*/
def inlinedAttachmentSize(db: ArtifactStore[_]): Int = {
db match {
case inliner: AttachmentSupport[_] =>
inliner.maxInlineSize.toBytes.toInt - 1
case _ =>
throw new IllegalStateException(s"ArtifactStore does not support attachment inlining $db")
}
}
/**
* Size in bytes for attachments which would never be inlined.
*/
def nonInlinedAttachmentSize(db: ArtifactStore[_]): Int = {
db match {
case inliner: AttachmentSupport[_] =>
inliner.maxInlineSize.toBytes.toInt * 2
case _ =>
42
}
}
def assumeAttachmentInliningEnabled(db: ArtifactStore[_]): Unit = {
assume(inlinedAttachmentSize(db) > 0, "Attachment inlining is disabled")
}
protected def encodedRandomBytes(size: Int): String = Base64.getEncoder.encodeToString(randomBytes(size))
def isMemoryStore(store: ArtifactStore[_]): Boolean = store.isInstanceOf[MemoryArtifactStore[_]]
def isCouchStore(store: ArtifactStore[_]): Boolean = store.isInstanceOf[CouchDbRestStore[_]]
protected def removeFromCache[A <: DocumentRevisionProvider](entity: WhiskEntity, factory: DocumentFactory[A])(
implicit ec: ExecutionContext): Unit = {
factory.removeId(CacheKey(entity))
}
protected def randomBytes(size: Int): Array[Byte] = {
val arr = new Array[Byte](size)
Random.nextBytes(arr)
arr
}
}
| starpit/openwhisk | tests/src/test/scala/org/apache/openwhisk/core/database/test/DbUtils.scala | Scala | apache-2.0 | 14,768 |
package org.openmole.gui.client.core.alert
/*
* Copyright (C) 30/12/16 // mathieu.leclaire@openmole.org
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
import org.openmole.gui.client.core.files.TreeNodeTabs
import org.openmole.gui.client.core.panels.stackPanel
import rx._
import scalatags.JsDom.all._
import scaladget.tools._
import scalatags.JsDom.all.{ onclick, raw, span }
import org.openmole.gui.ext.client._
import org.openmole.gui.ext.client.Utils._
import org.scalajs.dom.raw.HTMLDivElement
import scaladget.bootstrapnative.bsn.btn_default
import scalatags.JsDom.{ TypedTag, tags }
import org.openmole.gui.ext.data._
object BannerLevel {
object Regular extends BannerLevel
object Critical extends BannerLevel
}
sealed trait BannerLevel
case class BannerMessage(messageDiv: TypedTag[HTMLDivElement], bannerLevel: BannerLevel)
class BannerAlert(resizeTabs: () ⇒ Unit) {
implicit val ctx: Ctx.Owner = Ctx.Owner.safe()
private val bannerMessages: Var[Seq[BannerMessage]] = Var(Seq())
val isOpen = bannerMessages.map { bm ⇒ !bm.isEmpty }
private val bannerDiv = tags.div(
Rx {
tags.div(omsheet.bannerAlert +++ (backgroundColor := color))(
tags.div(omsheet.bannerAlertInner)(
for {
bm ← bannerMessages()
} yield bm.messageDiv
)
)
}, span(omsheet.closeBanner, onclick := { () ⇒ clear })(
raw("×")
)
)(height := 70)
lazy val banner = isOpen.expandDiv(bannerDiv, () ⇒ {
resizeTabs()
})
def clear = bannerMessages() = Seq()
private def registerMessage(bannerMessage: BannerMessage) =
bannerMessages() = (bannerMessages.now :+ bannerMessage).distinct.takeRight(2)
def registerWithDetails(message: String, details: String, bannerLevel: BannerLevel = BannerLevel.Regular) =
registerMessage(
BannerMessage(
tags.div(tags.span(message), tags.button(btn_default +++ (marginLeft := 10), "Details", onclick := { () ⇒
stackPanel.content() = details
stackPanel.dialog.show
})),
BannerLevel.Critical
)
)
def register(message: String, bannerLevel: BannerLevel = BannerLevel.Regular): Unit =
registerMessage(BannerMessage(tags.div(tags.span(message)), bannerLevel))
def registerDiv(messageDiv: TypedTag[HTMLDivElement], level: BannerLevel = BannerLevel.Regular) =
registerMessage(BannerMessage(messageDiv, level))
def registerWithStack(message: String, stack: Option[String], bannerLevel: BannerLevel = BannerLevel.Regular) =
stack match {
case Some(s) ⇒ registerWithDetails(message, s, bannerLevel)
case None ⇒ register(message, bannerLevel)
}
private def color = {
if (bannerMessages.now.exists(_.bannerLevel == BannerLevel.Critical)) omsheet.RED
else if (bannerMessages.now.isEmpty) omsheet.DARK_GREY
else omsheet.BLUE
}
}
| openmole/openmole | openmole/gui/client/org.openmole.gui.client.core/src/main/scala/org/openmole/gui/client/core/alert/BannerAlert.scala | Scala | agpl-3.0 | 3,496 |
package org.vitrivr.adampro.query.ast.internal
import org.vitrivr.adampro.data.entity.Entity._
import org.vitrivr.adampro.query.ast.generic.{ExpressionDetails, QueryEvaluationOptions, QueryExpression}
import org.vitrivr.adampro.query.query.RankingQuery
import org.apache.spark.sql.DataFrame
import org.vitrivr.adampro.process.SharedComponentContext
import org.vitrivr.adampro.query.tracker.QueryTracker
import org.vitrivr.adampro.query.execution.parallel.{ParallelPathChooser, ParallelQueryHandler}
import scala.concurrent.duration.Duration
/**
* adamtwo
*
* Ivan Giangreco
* May 2016
*/
case class TimedScanExpression(private val exprs: Seq[QueryExpression], private val timelimit: Duration, id: Option[String] = None)(filterExpr: Option[QueryExpression] = None)(@transient implicit val ac: SharedComponentContext) extends QueryExpression(id) {
var confidence : Option[Float] = None
override val info = ExpressionDetails(None, Some("Timed Scan Expression"), id, confidence)
_children ++= exprs ++ filterExpr.map(Seq(_)).getOrElse(Seq())
def this(entityname: EntityName, nnq: RankingQuery, pathChooser: ParallelPathChooser, timelimit: Duration, id: Option[String])(filterExpr: Option[QueryExpression])(implicit ac: SharedComponentContext) = {
this(pathChooser.getPaths(entityname, nnq), timelimit, id)(filterExpr)
}
/**
*
* @return
*/
override protected def run(options : Option[QueryEvaluationOptions], filter: Option[DataFrame] = None)(tracker : QueryTracker)(implicit ac: SharedComponentContext): Option[DataFrame] = {
log.trace("perform time-limited evaluation")
ac.sc.setJobGroup(id.getOrElse(""), "timed parallel query", interruptOnCancel = true)
val prefilter = if (filter.isDefined && filterExpr.isDefined) {
Some(filter.get.join(filterExpr.get.execute(options)(tracker).get))
} else if (filter.isDefined) {
filter
} else if (filterExpr.isDefined){
filterExpr.get.execute(options)(tracker)
} else {
None
}
val res = ParallelQueryHandler.timedParallelQuery(exprs, timelimit, prefilter, options, id)(tracker)
confidence = Some(res.confidence)
res.results
}
override def equals(that: Any): Boolean =
that match {
case that: TimedScanExpression => this.exprs.equals(that.exprs) && this.timelimit.equals(that.timelimit)
case _ => false
}
override def hashCode(): Int = {
val prime = 31
var result = 1
result = prime * result + exprs.hashCode
result = prime * result + timelimit.hashCode
result
}
}
| dbisUnibas/ADAMpro | src/main/scala/org/vitrivr/adampro/query/ast/internal/TimedScanExpression.scala | Scala | mit | 2,565 |
package scribe.benchmark.tester
class Log4SLoggingTester extends LoggingTester {
override def run(messages: Iterator[String]): Unit = {
val logger = org.log4s.getLogger("scala")
messages.foreach(msg => logger.info(msg))
}
} | outr/scribe | benchmarks/src/main/scala/scribe/benchmark/tester/Log4SLoggingTester.scala | Scala | mit | 236 |
package models
import java.time.LocalDateTime
import play.api.libs.json.Json
sealed abstract class MongoModel {
def getId: String
}
case class Game(name: String, image: String,
pricesPerShop: Map[String, List[PriceEntry]], lastUpdate: LocalDateTime)
extends MongoModel {
override def getId: String = name
}
case class PriceEntry(date: LocalDateTime, price: Money)
case class User(email: String, wishList: List[Wish])
extends MongoModel {
override def getId: String = email
}
case class Wish(gameName: String, priceThreshold: Money)
case class Money(value: BigDecimal, currency: String)
object JSONHelpers {
implicit lazy val moneyFormat = Json.format[Money]
implicit lazy val wishFormat = Json.format[Wish]
implicit lazy val userFormat = Json.format[User]
implicit lazy val priceEntryFormat = Json.format[PriceEntry]
implicit lazy val gameFormat = Json.format[Game]
}
| salceson/toik-games-price-comparator | app/models/Models.scala | Scala | mit | 913 |
package poly.algebra
import poly.algebra.factory._
import poly.algebra.specgroup._
/**
* @author Tongfei Chen
*/
trait SignOps[@sp(fdi) X] {
def sgn(x: X): X
}
object SignOps extends ImplicitGetter[SignOps]
| ctongfei/poly-algebra | src/main/scala/poly/algebra/SignOps.scala | Scala | mit | 216 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.scala.dsl;
import org.apache.camel.model.LoopDefinition
import org.apache.camel.scala.dsl.builder.RouteBuilder
/**
* Scala enrichment for Camel's LoopDefinition
*/
case class SLoopDefinition(override val target: LoopDefinition)(implicit val builder: RouteBuilder) extends SAbstractDefinition[LoopDefinition] {
def copy() = wrap(target.copy())
}
| engagepoint/camel | components/camel-scala/src/main/scala/org/apache/camel/scala/dsl/SLoopDefinition.scala | Scala | apache-2.0 | 1,184 |
/*
* Copyright (c) 2015, Michael Lewis
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package com.waioeka.sbt
import sbt.OutputStrategy
/**
* Cucumber
* Companion object for the Cucumber class. Provides apply method.
*/
object Cucumber {
/**
* Creates a new Cucumber object.
*
* @param jvmParameters the JVM parameters.
* @param cucumberParameters the Cucumber parameters.
* @return
*/
def apply(
jvmParameters: JvmParameters,
cucumberParameters: CucumberParameters) : Cucumber = {
new Cucumber(jvmParameters)(cucumberParameters)
}
}
/**
* Cucumber
* This class is responsible for running Cucumber.
*
* @param jvmParameters the JVM parameters
* @param options the Cucumber parameters.
*/
class Cucumber(
jvmParameters: JvmParameters)(
options: CucumberParameters
) {
/** Standalone JVM that will run Cucumber. */
val jvm : Jvm = Jvm(jvmParameters.classPath, jvmParameters.systemProperties)
/**
* Run Cucumber, within the JVM.
*/
def run(output: OutputStrategy) : Int =
jvm.run(jvmParameters.mainClass,options.toList)(output)
}
| rrramiro/cucumber | src/main/scala/com/waioeka/sbt/Cucumber.scala | Scala | bsd-2-clause | 2,470 |
package models
case class SavedTrack(
added_at: java.util.Date,
track: Track
) | Jakeway/spotify-web-api-scala | src/main/scala/models/SavedTrack.scala | Scala | mit | 146 |
/*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package oled.distributed
import akka.actor._
import app.runutils.IOHandling.InputSource
import app.runutils.RunningOptions
import com.madhukaraphatak.sizeof.SizeEstimator
import logic.Examples.Example
import logic.{Clause, Theory}
import oled.distributed.Structures._
import org.slf4j.LoggerFactory
/**
* Created by nkatz on 2/15/17.
*/
class TopLevelActor[T <: InputSource](
val dataOptions: List[(T, T => Iterator[Example])],
val inputParams: RunningOptions,
val targetConcept: TargetConcept) extends Actor {
import context._
var actorsPoolSize = 0
var nodesCounter = 0
var startTime = 0L
var endTime = 0L
/* This function starts the learning Nodes. */
def getActorsPool() = {
val NodeActorNames = (1 to dataOptions.length).toList map (i => s"Node-$i-${targetConcept.toString}")
val nodeActorsPool = (NodeActorNames zip this.dataOptions) map { node =>
val (nodeName, nodeOptions, nodeDataDunction) = (node._1, node._2._1, node._2._2)
val otherActors = NodeActorNames.filter(_ != nodeName)
context.actorOf(Props(new Node(otherActors, targetConcept, inputParams, nodeOptions, nodeDataDunction)), name = nodeName)
}
nodeActorsPool
}
val actorsPool: List[ActorRef] = getActorsPool()
val actorNames: List[String] = actorsPool.map(x => x.path.name)
private var queuedExpandingNodes = scala.collection.mutable.Queue[QueuedExpandingNode]()
var nodeHavingTheSlot = "" // that's only for logging
def getOtherActorNames(actorName: String) = actorNames.filter(name => name != actorName)
def getOtherActorRefs(a: String) = getOtherActorNames(a) map (actorName => context.actorSelection(s"${self.path}/$actorName"))
private var finalTheories = List[Theory]() // these should all be the same
/*
* The logger for this class. Getting a logger this way instead of mixin-in the LazyLogging trait allows to
* name the logger after a particular class instance, which is helpful for tracing messages
* between different instances of the same class.
* */
private val logger = LoggerFactory.getLogger(self.path.name)
private var messages = List[Long]()
def updateMessages(m: AnyRef) = {
val size = SizeEstimator.estimate(m)
messages = messages :+ size
}
private var childrenMsgNums = List[Int]()
private var childrenMsgSizes = List[Long]()
def receive = {
case "go" =>
this.actorsPoolSize = actorsPool.length
this.nodesCounter = actorsPool.length
Thread.sleep(4000)
this.startTime = System.nanoTime()
actorsPool foreach (a => a ! "go")
case "go-no-communication" =>
this.actorsPoolSize = actorsPool.length
this.nodesCounter = actorsPool.length
Thread.sleep(4000)
this.startTime = System.nanoTime()
actorsPool foreach (a => a ! "go-no-communication")
become(replyHandler)
/*--------------------------------------------------------------------------------------*/
// For debugging
//context.actorOf(Props( new PingActor(this.actorNames) ), name = "Pinging-Actor") ! "go"
/*--------------------------------------------------------------------------------------*/
case request: SpecializationTicketRequest =>
become(requestHandler)
// re-send the message to self to be processed
self ! request
}
def replyHandler: Receive = {
// Upon receiving a reply, the flow continues by either sending a specialization
// ticket to the next enqueued node, or (if the queue is empty), by freeing the specialization slot
// and becoming a requestHandler to serve further expansion requests (see comment at the handleReply() method).
case reply: ExpansionFinished =>
handleReply()
// When an expansion request while this actor is in a replyHandler state (and therefore another node is
// specializing), the request is enqueued to be processed when the expansion slot opens
case request: SpecializationTicketRequest =>
this.queuedExpandingNodes.enqueue(new QueuedExpandingNode(request.senderName, request.otherNodeNames))
logger.info(s"Node ${request.senderName} is enqueued for expansion. The queue now is ${this.queuedExpandingNodes.map(x => x.senderName).mkString(" ")}")
context.actorSelection(s"${self.path}/${request.senderName}") ! new AnotherNodeIsSpecializing(this.nodeHavingTheSlot)
// This message is received by an enqueued expansion node that eventually received
// its specialization ticket, but all of its candidates have already been specialized
// in the meantime. The aborting node has already switched to normal state to continue processing
// so no message is required to be sent to it. The flow continues by either sending a specialization
// ticket to the next enqueued node, or (if the queue is empty), by freeing the specialization slot
// and becoming a requestHandler to serve further expansion requests (see comment at the handleReply() method).
case abort: ExpansionAbortMsg =>
logger.info(s"Node ${abort.abortingNodeName} aborted expansion (all candidates already specialized).")
val others = getOtherActorRefs(abort.abortingNodeName)
others foreach (_ ! new PrioritizedNodeAbortedExpansion(abort.abortingNodeName))
handleReply()
case msg: NodeDoneMessage =>
acceptNewDoneMsg(msg)
case msg: NodeTheoryMessage =>
acceptNewLearntTheory(msg)
}
def requestHandler: Receive = {
// When in a requestHandler state, the expansion slot (one expanding node at a time) is free,
// so simply sent a SpecializationTicket upon receiving a request when at this state.
case request: SpecializationTicketRequest =>
logger.info(s"Received a specialization ticket request from ${request.senderName}")
processNewRequest(request.senderName)
// When a node supervised by this top-level actor, wrap things up
case msg: NodeDoneMessage =>
acceptNewDoneMsg(msg)
// When all nodes supervised by this top-level actor are done, wrap things up
case msg: NodeTheoryMessage =>
acceptNewLearntTheory(msg)
}
def processNewRequest(requestingNodeName: String) = {
val others = getOtherActorRefs(requestingNodeName)
this.nodeHavingTheSlot = requestingNodeName // that's only for logging
others foreach (_ ! new AnotherNodeIsSpecializing(requestingNodeName))
become(replyHandler)
context.actorSelection(s"${self.path}/$requestingNodeName") ! new SpecializationTicket(self.path.name)
logger.info(s"Sent the ticket to $requestingNodeName")
}
def handleReply() = {
if (this.queuedExpandingNodes.nonEmpty) {
val nextInQueue = this.queuedExpandingNodes.dequeue()
logger.info(s"Sending specialization ticket to queued node ${nextInQueue.senderName}")
processNewRequest(nextInQueue.senderName)
} else {
this.nodeHavingTheSlot = "none" // that's only for logging
become(requestHandler)
}
}
def acceptNewDoneMsg(msg: NodeDoneMessage) = {
this.actorsPoolSize -= 1
logger.info(s"Node ${msg.sender} is done. ${this.actorsPoolSize} nodes remaining")
if (this.actorsPoolSize == 0) {
logger.info("All processing nodes are done")
val theoryRequest = new TheoryRequestMessage(self.path.name)
this.actorsPool foreach (a => a ! theoryRequest)
}
}
def acceptNewLearntTheory(msg: NodeTheoryMessage) = {
this.nodesCounter -= 1
logger.info(s"Node ${msg.sender} sent:\n${msg.theory.clauses.map(x => x.showWithStats + s"evaluated on ${x.seenExmplsNum} exmpls | refs: ${x.refinements.length}").mkString("\n")}")
this.finalTheories = this.finalTheories :+ msg.theory
this.childrenMsgNums = this.childrenMsgNums :+ msg.msgNum
this.childrenMsgSizes = this.childrenMsgSizes :+ msg.msgSize
if (this.nodesCounter == 0) {
this.endTime = System.nanoTime()
this.actorsPool.foreach(_ ! PoisonPill)
val totalTime = (this.endTime - this.startTime) / 1000000000.0
logger.info(s"Total training time: $totalTime sec")
val totalMsgNum = childrenMsgNums.sum + messages.length
val totalMsgSize = childrenMsgSizes.sum + messages.sum
context.parent ! new FinalTheoryMessage(getFinalTheory(), totalTime.toString, totalMsgNum, totalMsgSize, targetConcept)
}
}
/*
def getFinalTheory() = {
val uuids = this.finalTheories.head.clauses.map(_.uuid)
val withAccumScores = uuids.foldLeft(List[Clause]()) { (accumed, newUUID) =>
val copies = this.finalTheories.flatMap(theory => theory.clauses.filter(p => p.uuid == newUUID))
if (copies.length != this.finalTheories.length) throw new RuntimeException("Produced non-identical theories")
val (tps, fps, fns, exmpls) = copies.foldLeft(0, 0, 0, 0) { (x, y) =>
(x._1 + y.tps, x._2 + y.fps, x._3 + y.fns, x._4 + y.seenExmplsNum)
}
copies.head.tps = tps
copies.head.fps = fps
copies.head.fns = fns
copies.head.seenExmplsNum = exmpls
accumed :+ copies.head
}
// Poor-man's pruning. Not online, not time-consuming (and a bit cheating) offline,
// just a quick filtering to see how we're doing
val filteredTheory = withAccumScores.filter(p =>
p.seenExmplsNum > inputParams.minSeenExmpls && p.score > inputParams.postPruningThreshold)
filteredTheory
}
*/
def getFinalTheory() = {
this.finalTheories.head.clauses.foldLeft(List[Clause]()){ (accum, clause) =>
val clauseCopies = this.finalTheories.tail.flatMap(theory => theory.clauses.filter(c => c.uuid == clause.uuid))
if (clauseCopies.length + 1 != this.finalTheories.length) {
logger.info(s"\nCLAUSE\n${clause.tostring} (uuid: ${clause.uuid}) \nIS NOT FOUND IS SOME FINAL THEORY")
}
val sumCounts = clauseCopies.foldLeft(clause.tps, clause.fps, clause.fns, clause.seenExmplsNum) { (x, y) =>
(x._1 + y.tps, x._2 + y.fps, x._3 + y.fns, x._4 + y.seenExmplsNum)
}
clause.tps = sumCounts._1
clause.fps = sumCounts._2
clause.fns = sumCounts._3
clause.seenExmplsNum = sumCounts._4
if (clause.seenExmplsNum > inputParams.minEvalOn && clause.score >= inputParams.pruneThreshold) {
accum :+ clause
} else {
accum
}
}
}
/* Modify to handle data in intervals. */
/*
def getActorsPool() = {
val taskNames = (1 to this.taskIntervals.length).map(i => s"dataset-$i").toList
val NodeActorNames = taskNames map (db => s"Node-$db-${targetConcept.toString}")
val globalsPool = taskNames.map(t => new Globals(inputParams.entryPath, t))
val nodeActorsPool = (NodeActorNames, globalsPool, this.taskIntervals).zipped.toList map { x =>
val (nodeName, global, nodeIntervals) = (x._1, x._2, x._3)
val otherActors = NodeActorNames.filter(_ != nodeName)
context.actorOf(Props(
new Node(otherActors, masterDB, targetConcept, global, getDataFunction, inputParams, nodeIntervals)
), name = nodeName)
}
nodeActorsPool
}
*/
}
| nkatzz/OLED | src/main/scala/oled/distributed/TopLevelActor.scala | Scala | gpl-3.0 | 11,696 |
package truerss.api
import com.github.fntz.omhs.QueryReader
import truerss.util.CommonImplicits
case class QueryPage(offset: Int, limit: Int)
object QueryPage {
import CommonImplicits._
def first(queries: Map[String, Iterable[String]], name: String): Option[String] = {
queries.get(name).flatMap(_.headOption)
}
def offset(queries: Map[String, Iterable[String]]): Int = {
first(queries, "offset").map(_.toIntOr(0)).getOrElse(0)
}
def limit(queries: Map[String, Iterable[String]]): Int = {
first(queries, "limit").map(_.toIntOr(100)).getOrElse(100)
}
implicit val queryReader: QueryReader[QueryPage] = (queries: Map[String, Iterable[String]]) => {
Some(QueryPage(offset(queries), limit(queries)))
}
}
case class SourceFeedsFilter(unreadOnly: Boolean, offset: Int, limit: Int)
object SourceFeedsFilter {
import QueryPage._
implicit val sourceFeedsFilterReader: QueryReader[SourceFeedsFilter] = new QueryReader[SourceFeedsFilter] {
override def read(queries: Map[String, Iterable[String]]): Option[SourceFeedsFilter] = {
val unreadOnly = first(queries, "unreadOnly").forall(_.toBoolean)
Some(SourceFeedsFilter(
unreadOnly = unreadOnly,
offset = offset(queries),
limit = limit(queries)
))
}
}
} | truerss/truerss | src/main/scala/truerss/api/QueryPage.scala | Scala | mit | 1,287 |
/**
*
* DeviceManager
* Ledger wallet
*
* Created by Pierre Pollastri on 15/01/16.
*
* The MIT License (MIT)
*
* Copyright (c) 2015 Ledger
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
package co.ledger.wallet.core.device
import java.util.UUID
import co.ledger.wallet.core.device.DeviceFactory.{DeviceDiscovered, DeviceLost, ScanRequest}
import co.ledger.wallet.core.utils.Preferences
import scala.concurrent.{ExecutionContext, Future, Promise}
trait DeviceManager[Context] {
import DeviceManager._
implicit val ec: ExecutionContext
def compatibleConnectivityTypes: Future[Set[ConnectivityType]] = Future {
_deviceManager filter {
case (t, m) => m.isCompatible
} map {
case (t, m) => t
} toSet
}
def allCompatibleFactories: Iterable[DeviceFactory] = {
_deviceManager filter {
case (t, m) => m.isCompatible
} map {
case (t, m) => m
}
}
def deviceFactory(connectivityType: ConnectivityType): DeviceFactory = {
_deviceManager(connectivityType)
}
def requestScan(): ScanRequest = {
val requests = allCompatibleFactories.map({(factory) =>
factory.requestScan()
})
new CompoundScanRequest(requests)
}
def registerDevice(device: Device): Future[UUID] = Future {
val uuid = UUID.randomUUID()
device.uuid = uuid
_registeredDevices(uuid) = device
preferences.edit()
.putString("last_device_type", connectivityTypeToString(device.connectivityType))
.putString("last_device_info", device.info)
.putString("last_device_uuid", uuid.toString)
.commit()
uuid
}
def unregisterDevice(uuid: UUID): Unit = Future {
_registeredDevices.remove(uuid)
}
def unregisterDevice(device: Device): Unit = Future {
// TODO: Rewrite
_registeredDevices.retain((uuid, d) => d != device)
}
def connectedDevice(uuid: UUID): Future[Device] = Future {
_registeredDevices.getOrElse(uuid, throw new Exception("No such device"))
}
def lastConnectedDevice(): Future[Device] = {
if (!preferences.contains("last_device_uuid"))
Future.failed(new Exception("No last device"))
else
connectedDevice(UUID.fromString(preferences.string("last_device_uuid").orNull))
}
def lastConnectedDeviceInfo(): Future[(DeviceFactory, String)] = Future {
val deviceType = stringToConnectivityType(preferences.string("last_device_type").orNull)
val deviceInfo = preferences.string("last_device_info").orNull
(_deviceManager(deviceType), deviceInfo)
}
protected def preferences: Preferences
def context: Context
protected[this] val _registeredDevices = scala.collection.mutable.Map[UUID, Device]()
import DeviceManager.ConnectivityTypes._
protected val _deviceManager: Map[ConnectivityType, DeviceFactory]
type DelayedFunctionHandler = (Long, () => Unit) => Unit
protected def delayedFunctionHandler: DelayedFunctionHandler
private class CompoundScanRequest(requests: Iterable[ScanRequest]) extends ScanRequest {
override def start(): Unit = {
requests foreach {
_.duration = duration
}
super.start()
}
override def onStart(): Unit = {
for (request <- requests) {
request.start()
}
}
override protected def runDelayed(delay: Long)(f: => Unit): Unit = delayedFunctionHandler(delay, f _)
override def onStop(): Unit = {
for (request <- requests) {
request.stop()
}
}
for (request <- requests) {
request.onScanUpdate({
case DeviceDiscovered(device) => notifyDeviceDiscovered(device)
case DeviceLost(device) => notifyDeviceLost(device)
})
}
}
private def connectivityTypeToString(t: ConnectivityType): String = {
t match {
case Usb => "usb"
case Ble => "ble"
case Tee => "tee"
case Nfc => "nfc"
}
}
private def stringToConnectivityType(t: String): ConnectivityType = {
t match {
case "usb" => Usb
case "ble" => Ble
case "tee" => Tee
case "nfc" => Nfc
}
}
}
object DeviceManager {
object ConnectivityTypes extends Enumeration {
type ConnectivityType = Value
val Usb, Ble, Tee, Nfc = Value
}
type ConnectivityType = ConnectivityTypes.ConnectivityType
case class AndroidDeviceNotCompatibleException(msg: String) extends Exception(msg)
case class MissingPermissionException(msg: String) extends Exception(msg)
case class DisabledServiceException(msg: String) extends Exception(msg)
}
| LedgerHQ/ledger-wallet-ripple | src/main/scala/co/ledger/wallet/core/device/DeviceManager.scala | Scala | mit | 5,563 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package dbis.pig.plan.rewriting
import dbis.pig.tools.logging.PigletLogging
import dbis.pig.op.{PigOperator, _}
import dbis.pig.plan.DataflowPlan
import dbis.pig.plan.rewriting.Rules.registerAllRules
import dbis.pig.plan.rewriting.rulesets.GeneralRuleset._
import dbis.pig.plan.rewriting.dsl.RewriterDSL
import dbis.pig.plan.rewriting.internals._
import org.kiama.rewriting.Rewriter._
import org.kiama.rewriting.Rewriter.{rewrite => kiamarewrite}
import org.kiama.rewriting.Strategy
import scala.collection.mutable
import scala.reflect.ClassTag
import dbis.setm.SETM.timing
case class RewriterException(msg: String) extends Exception(msg)
/** Provides various methods for rewriting [[DataflowPlan]]s by wrapping functionality provided by
* Kiamas [[org.kiama.rewriting.Rewriter]] and [[org.kiama.rewriting.Strategy]] objects.
*
* This object keeps an internal [[Strategy]] object that holds all strategies added via [[addStrategy]]. Calling
* [[processPlan]] with a [[DataflowPlan]] will apply ```all``` those strategies to the plan until none applies
* anymore.
*
* ==Low-level methods==
*
* Methods that directly handle Strategies are provided:
*
* - [[addStrategy]] adds a single strategy to this object. Later calls to [[processPlan]] will then use this
* strategy in addition to all other ones added via this method.
*
* - [[addBinaryPigOperatorStrategy]] turns a function operating on two operators of specific subtypes of
* [[dbis.pig.op.PigOperator]] that are in a parent-child relationship into a strategy and adds it to this object.
*
* - [[buildBinaryPigOperatorStrategy]] is the same as [[addBinaryPigOperatorStrategy]] but doesn't add the
* strategy to this object.
*
* - [[buildRemovalStrategy]] builds a strategy to remove an operator from a [[DataflowPlan]]
*
* - [[processPlan]] applies a single strategy to a [[DataflowPlan]].
*
* ==Higher-level methods==
*
* For common operations on [[DataflowPlan]]s, some convenience methods to build and register a strategy are provided:
*
* - [[addStrategy]] for easily adding any method with a signature of `Any => Option[PigOperator]` as a strategy
*
* - [[addOperatorReplacementStrategy]] is similar to [[addStrategy]] but has some special behaviour that's useful
* when all the function passed to it does is replacing one operator with another one.
*
* - [[merge]] for merging two operators
*
* - [[reorder]] for reordering two operators
*
* ==DSL==
*
* A DSL for easily adding rewriting operations is provided as well, its documented in [[dbis.pig.plan.rewriting.dsl]].
*
* ==DataflowPlan helper methods==
*
* Some operations provided by [[DataflowPlan]] objects are not implemented there, but on this object. These include
*
* - [[insertAfter]] to insert an operator as a child operator of another
*
* - [[remove]] to remove an operator
*
* - [[replace]] to replace one operator with another
*
* - [[swap]] to swap the positions of two operators
*
* ==Helper methods for maintaining connectivity==
*
* After a rewriting operation, the `inputs` and `outputs` attribute of operators other than the rewritten ones
* might be changed (for example to accommodate new or deleted operators). To help maintaining these relationships,
* the methods [[fixMerge]], [[fixReordering]] and [[pullOpAcrossMultipleInputOp]] in several versions is provided.
* Their documentation include hints in which cases they apply.
*
* @todo Not all links in this documentation link to the correct methods, most notably links to overloaded ones.
*
*/
object Rewriter extends PigletLogging
with StrategyBuilders
with DFPSupport
with WindowSupport
with EmbedSupport
with MaterializationSupport
with Fixers
with FastStrategyAdder
with RewriterDSL {
private var ourStrategy = fail
/** Resets [[ourStrategy]] to [[fail]].
*
*/
private def resetStrategy = ourStrategy = fail
/** Add a [[org.kiama.rewriting.Strategy]] to this Rewriter.
*
* It will be added by [[org.kiama.rewriting.Rewriter.ior]]ing it with the already existing ones.
* @param s The new strategy.
*/
def addStrategy(s: Strategy): Unit = {
ourStrategy = ior(ourStrategy, s)
}
/** Adds a function `f` as a [[org.kiama.rewriting.Strategy]] to this object.
*
* If you're only intending to replace a single PigOperator with another one, use
* [[addOperatorReplacementStrategy]] instead.
*
* @param f
*/
//noinspection ScalaDocMissingParameterDescription
def addStrategy(f: Any => Option[PigOperator]): Unit = addStrategy(strategyf(t => f(t)))
/** Rewrites a given sink node with several [[org.kiama.rewriting.Strategy]]s that were added via
* [[dbis.pig.plan.rewriting.Rewriter.addStrategy]].
*
* @param op The sink node to rewrite.
* @return The rewritten sink node.
*/
def processPigOperator(op: PigOperator): Any = {
processPigOperator(op, ourStrategy)
}
/** Process a sink with a specified strategy
*
* @param op The sink to process.
* @param strategy The strategy to apply.
* @return
*/
private def processPigOperator(op: PigOperator, strategy: Strategy): Any = {
// TODO: We apply foreachRecursively separately because it always succeeds,
// so we'd otherwise run into an infinite loop
logger.debug(s"apply rewriting to $op")
val newop = kiamarewrite(strategy)(op)
newop
}
/** Apply all rewriting rules of this Rewriter to a [[dbis.pig.plan.DataflowPlan]].
*
* @param plan The plan to process.
* @return A rewritten [[dbis.pig.plan.DataflowPlan]]
*/
def processPlan(plan: DataflowPlan): DataflowPlan = timing("rewriting plan") {
evalExtraRuleCode(plan.extraRuleCode)
val forewriter = buildTypedCaseWrapper(foreachRecursively)
val fostrat = manybu(strategyf(t => forewriter(t)))
val rewriter = ior(outermost(ourStrategy), fostrat)
processPlan(plan, rewriter)
}
def processPlan(plan: DataflowPlan, strategy: Strategy): DataflowPlan = {
evalExtraRuleCode(plan.extraRuleCode)
// This looks innocent, but this is where the rewriting happens.
val newSources = plan.sourceNodes.flatMap(
processPigOperator(_, strategy) match {
case Nil => List.empty
case op: PigOperator => List(op)
case ops: Seq[PigOperator@unchecked] => ops
case e => throw new IllegalArgumentException(s"A rewriting operation returned something other than a " +
"PigOperator or Sequence of them, namely $e")
}).filterNot(_.isInstanceOf[Empty]).toList
var newPlanNodes = mutable.LinkedHashSet[PigOperator]() ++= newSources
var nodesToProcess = newSources.toList
// We can't modify nodesToProcess while iterating over it. Therefore we'll iterate over a copy of it as long as
// it contains elements.
while (nodesToProcess.nonEmpty) {
val iter = nodesToProcess.iterator
nodesToProcess = List[PigOperator]()
for (source <- iter) {
// newPlanNodes might already contain this PigOperator, but we encountered it again. Remove it to later add it
// again, thereby "pushing" it to an earlier position in the new plans list of operators because a
// LinkedHashSet iterates over the elements in the order of insertion, so PigOperators inserted later get
// emitted first.
// This is to make sure that that source is emitted before all other operators that need its data.
newPlanNodes -= source
// And remove its outputs as well to revisit them later on.
newPlanNodes --= source.outputs.flatMap(_.consumer)
newPlanNodes += source
for (output <- source.outputs.flatMap(_.consumer)) {
// We've found a new node - it needs to be included in the new plan, so add it to the new plans nodes.
newPlanNodes += output
// And we need to process its output nodes in the future.
// If we already processed a nodes outputs, they'll be removed again and put at the head of the new plans list
// of operators.
nodesToProcess ++= output.outputs.flatMap(_.consumer)
}
}
}
val newPlan = new DataflowPlan(newPlanNodes.toList.filterNot(_.isInstanceOf[Empty]))
newPlan.additionalJars ++= plan.additionalJars
newPlan.udfAliases ++= plan.udfAliases
newPlan.code = plan.code
newPlan
}
registerAllRules
}
| ksattler/piglet | src/main/scala/dbis/pig/plan/rewriting/Rewriter.scala | Scala | apache-2.0 | 9,373 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy.k8s
import java.util.{Locale, UUID}
import io.fabric8.kubernetes.api.model.{LocalObjectReference, LocalObjectReferenceBuilder, Pod}
import org.apache.commons.lang3.StringUtils
import org.apache.spark.{SPARK_VERSION, SparkConf}
import org.apache.spark.deploy.k8s.Config._
import org.apache.spark.deploy.k8s.Constants._
import org.apache.spark.deploy.k8s.submit._
import org.apache.spark.internal.Logging
import org.apache.spark.internal.config.ConfigEntry
import org.apache.spark.resource.ResourceProfile.DEFAULT_RESOURCE_PROFILE_ID
import org.apache.spark.util.Utils
/**
* Structure containing metadata for Kubernetes logic to build Spark pods.
*/
private[spark] abstract class KubernetesConf(val sparkConf: SparkConf) {
val resourceNamePrefix: String
def labels: Map[String, String]
def environment: Map[String, String]
def annotations: Map[String, String]
def secretEnvNamesToKeyRefs: Map[String, String]
def secretNamesToMountPaths: Map[String, String]
def volumes: Seq[KubernetesVolumeSpec]
def schedulerName: String
def appId: String
def appName: String = get("spark.app.name", "spark")
def namespace: String = get(KUBERNETES_NAMESPACE)
def imagePullPolicy: String = get(CONTAINER_IMAGE_PULL_POLICY)
def imagePullSecrets: Seq[LocalObjectReference] = {
sparkConf
.get(IMAGE_PULL_SECRETS)
.map { secret =>
new LocalObjectReferenceBuilder().withName(secret).build()
}
}
def workerDecommissioning: Boolean =
sparkConf.get(org.apache.spark.internal.config.DECOMMISSION_ENABLED)
def nodeSelector: Map[String, String] =
KubernetesUtils.parsePrefixedKeyValuePairs(sparkConf, KUBERNETES_NODE_SELECTOR_PREFIX)
def contains(config: ConfigEntry[_]): Boolean = sparkConf.contains(config)
def get[T](config: ConfigEntry[T]): T = sparkConf.get(config)
def get(conf: String): String = sparkConf.get(conf)
def get(conf: String, defaultValue: String): String = sparkConf.get(conf, defaultValue)
def getOption(key: String): Option[String] = sparkConf.getOption(key)
}
private[spark] class KubernetesDriverConf(
sparkConf: SparkConf,
val appId: String,
val mainAppResource: MainAppResource,
val mainClass: String,
val appArgs: Array[String],
val proxyUser: Option[String])
extends KubernetesConf(sparkConf) {
def driverNodeSelector: Map[String, String] =
KubernetesUtils.parsePrefixedKeyValuePairs(sparkConf, KUBERNETES_DRIVER_NODE_SELECTOR_PREFIX)
override val resourceNamePrefix: String = {
val custom = if (Utils.isTesting) get(KUBERNETES_DRIVER_POD_NAME_PREFIX) else None
custom.getOrElse(KubernetesConf.getResourceNamePrefix(appName))
}
override def labels: Map[String, String] = {
val presetLabels = Map(
SPARK_VERSION_LABEL -> SPARK_VERSION,
SPARK_APP_ID_LABEL -> appId,
SPARK_APP_NAME_LABEL -> KubernetesConf.getAppNameLabel(appName),
SPARK_ROLE_LABEL -> SPARK_POD_DRIVER_ROLE)
val driverCustomLabels = KubernetesUtils.parsePrefixedKeyValuePairs(
sparkConf, KUBERNETES_DRIVER_LABEL_PREFIX)
presetLabels.keys.foreach { key =>
require(
!driverCustomLabels.contains(key),
s"Label with key $key is not allowed as it is reserved for Spark bookkeeping operations.")
}
driverCustomLabels ++ presetLabels
}
override def environment: Map[String, String] = {
KubernetesUtils.parsePrefixedKeyValuePairs(sparkConf, KUBERNETES_DRIVER_ENV_PREFIX)
}
override def annotations: Map[String, String] = {
KubernetesUtils.parsePrefixedKeyValuePairs(sparkConf, KUBERNETES_DRIVER_ANNOTATION_PREFIX)
}
def serviceAnnotations: Map[String, String] = {
KubernetesUtils.parsePrefixedKeyValuePairs(sparkConf,
KUBERNETES_DRIVER_SERVICE_ANNOTATION_PREFIX)
}
override def secretNamesToMountPaths: Map[String, String] = {
KubernetesUtils.parsePrefixedKeyValuePairs(sparkConf, KUBERNETES_DRIVER_SECRETS_PREFIX)
}
override def secretEnvNamesToKeyRefs: Map[String, String] = {
KubernetesUtils.parsePrefixedKeyValuePairs(sparkConf, KUBERNETES_DRIVER_SECRET_KEY_REF_PREFIX)
}
override def volumes: Seq[KubernetesVolumeSpec] = {
KubernetesVolumeUtils.parseVolumesWithPrefix(sparkConf, KUBERNETES_DRIVER_VOLUMES_PREFIX)
}
override def schedulerName: String = get(KUBERNETES_DRIVER_SCHEDULER_NAME).getOrElse("")
}
private[spark] class KubernetesExecutorConf(
sparkConf: SparkConf,
val appId: String,
val executorId: String,
val driverPod: Option[Pod],
val resourceProfileId: Int = DEFAULT_RESOURCE_PROFILE_ID)
extends KubernetesConf(sparkConf) with Logging {
def executorNodeSelector: Map[String, String] =
KubernetesUtils.parsePrefixedKeyValuePairs(sparkConf, KUBERNETES_EXECUTOR_NODE_SELECTOR_PREFIX)
override val resourceNamePrefix: String = {
get(KUBERNETES_EXECUTOR_POD_NAME_PREFIX).getOrElse(
KubernetesConf.getResourceNamePrefix(appName))
}
override def labels: Map[String, String] = {
val presetLabels = Map(
SPARK_VERSION_LABEL -> SPARK_VERSION,
SPARK_EXECUTOR_ID_LABEL -> executorId,
SPARK_APP_ID_LABEL -> appId,
SPARK_APP_NAME_LABEL -> KubernetesConf.getAppNameLabel(appName),
SPARK_ROLE_LABEL -> SPARK_POD_EXECUTOR_ROLE,
SPARK_RESOURCE_PROFILE_ID_LABEL -> resourceProfileId.toString)
val executorCustomLabels = KubernetesUtils.parsePrefixedKeyValuePairs(
sparkConf, KUBERNETES_EXECUTOR_LABEL_PREFIX)
presetLabels.keys.foreach { key =>
require(
!executorCustomLabels.contains(key),
s"Custom executor labels cannot contain $key as it is reserved for Spark.")
}
executorCustomLabels ++ presetLabels
}
override def environment: Map[String, String] = sparkConf.getExecutorEnv.filter(
p => checkExecutorEnvKey(p._1)).toMap
override def annotations: Map[String, String] = {
KubernetesUtils.parsePrefixedKeyValuePairs(sparkConf, KUBERNETES_EXECUTOR_ANNOTATION_PREFIX)
}
override def secretNamesToMountPaths: Map[String, String] = {
KubernetesUtils.parsePrefixedKeyValuePairs(sparkConf, KUBERNETES_EXECUTOR_SECRETS_PREFIX)
}
override def secretEnvNamesToKeyRefs: Map[String, String] = {
KubernetesUtils.parsePrefixedKeyValuePairs(sparkConf, KUBERNETES_EXECUTOR_SECRET_KEY_REF_PREFIX)
}
override def volumes: Seq[KubernetesVolumeSpec] = {
KubernetesVolumeUtils.parseVolumesWithPrefix(sparkConf, KUBERNETES_EXECUTOR_VOLUMES_PREFIX)
}
override def schedulerName: String = get(KUBERNETES_EXECUTOR_SCHEDULER_NAME).getOrElse("")
private def checkExecutorEnvKey(key: String): Boolean = {
// Pattern for matching an executorEnv key, which meets certain naming rules.
val executorEnvRegex = "[-._a-zA-Z][-._a-zA-Z0-9]*".r
if (executorEnvRegex.pattern.matcher(key).matches()) {
true
} else {
logWarning(s"Invalid key: $key: " +
"a valid environment variable name must consist of alphabetic characters, " +
"digits, '_', '-', or '.', and must not start with a digit." +
s"Regex used for validation is '$executorEnvRegex')")
false
}
}
}
private[spark] object KubernetesConf {
def createDriverConf(
sparkConf: SparkConf,
appId: String,
mainAppResource: MainAppResource,
mainClass: String,
appArgs: Array[String],
proxyUser: Option[String]): KubernetesDriverConf = {
// Parse executor volumes in order to verify configuration before the driver pod is created.
KubernetesVolumeUtils.parseVolumesWithPrefix(sparkConf, KUBERNETES_EXECUTOR_VOLUMES_PREFIX)
new KubernetesDriverConf(
sparkConf.clone(),
appId,
mainAppResource,
mainClass,
appArgs,
proxyUser)
}
def createExecutorConf(
sparkConf: SparkConf,
executorId: String,
appId: String,
driverPod: Option[Pod],
resourceProfileId: Int = DEFAULT_RESOURCE_PROFILE_ID): KubernetesExecutorConf = {
new KubernetesExecutorConf(sparkConf.clone(), appId, executorId, driverPod, resourceProfileId)
}
def getKubernetesAppId(): String =
s"spark-${UUID.randomUUID().toString.replaceAll("-", "")}"
def getResourceNamePrefix(appName: String): String = {
val id = KubernetesUtils.uniqueID()
s"$appName-$id"
.trim
.toLowerCase(Locale.ROOT)
.replaceAll("[^a-z0-9\\\\-]", "-")
.replaceAll("-+", "-")
}
def getAppNameLabel(appName: String): String = {
// According to https://kubernetes.io/docs/concepts/overview/working-with-objects/labels,
// must be 63 characters or less to follow the DNS label standard, so take the 63 characters
// of the appName name as the label.
StringUtils.abbreviate(
s"$appName"
.trim
.toLowerCase(Locale.ROOT)
.replaceAll("[^a-z0-9\\\\-]", "-")
.replaceAll("-+", "-"),
"",
KUBERNETES_DNSNAME_MAX_LENGTH
)
}
/**
* Build a resources name based on the vendor device plugin naming
* convention of: vendor-domain/resource. For example, an NVIDIA GPU is
* advertised as nvidia.com/gpu.
*/
def buildKubernetesResourceName(vendorDomain: String, resourceName: String): String = {
s"${vendorDomain}/${resourceName}"
}
}
| shaneknapp/spark | resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/KubernetesConf.scala | Scala | apache-2.0 | 10,047 |
/*
* Copyright 2017 Datamountaineer.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.datamountaineer.streamreactor.connect.rethink.source
import java.util
import com.datamountaineer.streamreactor.connect.rethink.config.{ReThinkConfigConstants, ReThinkSourceConfig}
import com.datamountaineer.streamreactor.connect.utils.JarManifest
import com.typesafe.scalalogging.slf4j.StrictLogging
import org.apache.kafka.common.config.ConfigDef
import org.apache.kafka.connect.connector.Task
import org.apache.kafka.connect.source.SourceConnector
import org.apache.kafka.connect.util.ConnectorUtils
import scala.collection.JavaConversions._
import scala.collection.JavaConverters._
/**
* Created by andrew@datamountaineer.com on 22/09/16.
* stream-reactor
*/
class ReThinkSourceConnector extends SourceConnector with StrictLogging {
private var configProps: util.Map[String, String] = _
private val configDef = ReThinkSourceConfig.config
private val manifest = JarManifest(getClass.getProtectionDomain.getCodeSource.getLocation)
/**
* States which SinkTask class to use
**/
override def taskClass(): Class[_ <: Task] = classOf[ReThinkSourceTask]
/**
* Set the configuration for each work and determine the split
*
* @param maxTasks The max number of task workers be can spawn
* @return a List of configuration properties per worker
**/
override def taskConfigs(maxTasks: Int): util.List[util.Map[String, String]] = {
val raw = configProps.asScala.get(ReThinkConfigConstants.KCQL)
require(raw != null && raw.isDefined, s"No ${ReThinkConfigConstants.KCQL} provided!")
//sql1, sql2
val kcqls = raw.get.split(";")
val groups = ConnectorUtils.groupPartitions(kcqls.toList, maxTasks).asScala
//split up the kcql statement based on the number of tasks.
groups
.filterNot(g => g.isEmpty)
.map(g => {
val taskConfigs = new java.util.HashMap[String, String]
taskConfigs.putAll(configProps)
taskConfigs.put(ReThinkConfigConstants.KCQL, g.mkString(";")) //overwrite
taskConfigs.toMap.asJava
})
}
/**
* Start the sink and set to configuration
*
* @param props A map of properties for the connector and worker
**/
override def start(props: util.Map[String, String]): Unit = {
configProps = props
}
override def stop(): Unit = {}
override def config(): ConfigDef = configDef
override def version(): String = manifest.version()
}
| CodeSmell/stream-reactor | kafka-connect-rethink/src/main/scala/com/datamountaineer/streamreactor/connect/rethink/source/ReThinkSourceConnector.scala | Scala | apache-2.0 | 3,000 |
package org.overviewproject.pdfocr
package object exceptions {
sealed abstract class PdfOcrException(message: String, cause: Exception) extends Exception(message, cause)
class PdfInvalidException(cause: Exception)
extends PdfOcrException(s"Error in PDF file", cause)
class PdfEncryptedException(cause: Exception)
extends PdfOcrException(s"PDF file is password-protected", cause)
class TesseractMissingException(cause: Exception)
extends PdfOcrException(s"Could not find `tesseract` executable", cause)
class TesseractLanguageMissingException(val language: String)
extends PdfOcrException(s"Missing Tesseract language data files for language `$language`", null)
class TesseractFailedException(val retval: Int, val stderr: String)
extends PdfOcrException(s"Tesseract returned status code `$retval`. stderr: `$stderr`", null)
}
| overview/pdfocr | src/main/scala/org/overviewproject/pdfocr/exceptions/package.scala | Scala | agpl-3.0 | 863 |
package unof.cv.tools.paramsmenu
import java.util.regex.Pattern
import scala.scalajs.js
import scala.scalajs.js.Any.fromFunction1
import scala.scalajs.js.Any.fromString
import org.scalajs.jquery.JQuery
import org.scalajs.jquery.JQueryEventObject
import org.scalajs.jquery.jQuery
import unof.cv.tools.CallbackCenter
import unof.cv.tools.CvSetting
import SharedPannelFunctions._
object PannelImport extends LayerTypeInsensitvePannel with BasicPannel {
def myPannel(settings: CvSetting): String = settings.imagesImportDiv
def ifCategorySelected(callbacks: CallbackCenter, settings: CvSetting, cat: unof.cv.base.charLib.CMCategory): Unit = {
show(settings)
val importLayerButton = jQuery(settings.importLayersButton)
importLayerButton.empty()
importLayerButton.append("Import as parts")
}
def ifLayerSelected(callbacks: CallbackCenter, settings: CvSetting, image: unof.cv.base.charLib.CMLayer): Unit = {
hide(settings)
}
def ifPartSelected(callbacks: CallbackCenter, settings: CvSetting, part: unof.cv.base.charLib.CMPart): Unit = {
show(settings)
val importLayerButton = jQuery(settings.importLayersButton)
importLayerButton.empty()
importLayerButton.append("Import as images")
}
def bind(callbacks: CallbackCenter, settings: CvSetting) {
val layersInput = jQuery(settings.partMenuImportInput)
jQuery(settings.importLayersButton).click(addManyFiles(callbacks, layersInput)_)
}
private def addManyFiles(callbacks: CallbackCenter, input: JQuery)(evt: JQueryEventObject) = {
val inString = input.value().toString().trim()
if (inString.contains("\"")) {
val p = Pattern.compile("\"[^\"]+\"")
val matches = p.matcher(inString)
if (matches.find()) {
val seq = Iterator
.continually(matches.group())
.takeWhile { s => matches.find() }
.map(getSource(_))
.toSeq
if (!seq.isEmpty)
callbacks.onManyLayersImported(seq)
}
} else {
callbacks.onManyLayersImported(Seq(getSource(inString.trim())))
}
}
} | Hgjj/CharViewer | js/src/main/scala/unof/cv/tools/paramsmenu/PannelImport.scala | Scala | bsd-3-clause | 2,073 |
package scribe.format
import scribe.LogRecord
import scribe.output.LogOutput
trait Formatter {
def format[M](record: LogRecord[M]): LogOutput
}
object Formatter {
/**
* Only includes the log message and MDC
*/
lazy val simple: Formatter = formatter"$messages$mdc"
/**
* Only includes the log message and MDC, but the message is colored based on the logging level
*/
lazy val colored: Formatter = formatter"${levelColor(messages)}$mdc"
/**
* A classic logging style including the date, thread name (abbreviated), level, position, message, and MDC
*/
lazy val classic: Formatter = formatter"$date [$threadNameAbbreviated] $level $position - $messages$mdc"
/**
* Colored, but more compact output to show more on a single line
*/
lazy val compact: Formatter = formatter"$date ${string("[")}$levelColored${string("]")} ${green(position)} - $messages$mdc"
/**
* A rich log output format with coloring and lots of details. The default format.
*/
lazy val enhanced: Formatter = Formatter.fromBlocks(
dateCounter,
space,
openBracket,
threadNameAbbreviated,
closeBracket,
space,
openBracket,
levelColoredPaddedRight,
closeBracket,
space,
green(position),
string(" - "),
messages,
mdc
)
/**
* A multi-line formatter that includes expanded log information on the first line, and indented and auto-wrapping
* message and MDC on the following line(s).
*/
lazy val advanced: Formatter = Formatter.fromBlocks(
groupBySecond(
cyan(bold(dateFull)),
space,
italic(threadName),
space,
levelColored,
space,
green(position),
newLine
),
multiLine(messages),
mdcMultiLine
)
/**
* A strict format with a focus on consistent width.
*/
lazy val strict: Formatter = formatter"$date [$threadNameAbbreviated] $levelPaddedRight $positionAbbreviated - $messages$mdc"
/**
* The default formatter. This is used as a default when the formatter isn't explicitly specified. Defaults to
* enhanced.
*/
var default: Formatter = advanced
def fromBlocks(blocks: FormatBlock*): Formatter = new FormatBlocksFormatter(blocks.toList)
} | outr/scribe | core/shared/src/main/scala/scribe/format/Formatter.scala | Scala | mit | 2,208 |
// Copyright 2014 Commonwealth Bank of Australia
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package au.com.cba.omnia.maestro.schema
package syntax
import au.com.cba.omnia.maestro.schema._
/** A day in YYYYcMMcDD format with some separating character.
Hard coded to only accept days in years 1800 through 2200. */
case class YYYYcMMcDD(sep: String) extends Syntax {
val name = s"YYYYcMMcDD(\\'${sep}\\')"
def likeness(s: String) =
if (s.length != 10) 0.0
else {
val sYear = s.substring(0, 4)
val sMonth = s.substring(5, 7)
val sDay = s.substring(8, 10)
if (sYear .forall(_.isDigit) &&
sMonth.forall(_.isDigit) &&
sDay .forall(_.isDigit) &&
s(4).toString == sep &&
s(7).toString == sep)
if (tope.Day.validishYearMonthDay(
sYear.toInt, sMonth.toInt, sDay.toInt))
1.0 else 0.0
else 0.0
}
val parents: Set[Syntax] = Set(Any)
val partitions: Set[Syntax] = Set()
}
/** A day in DDcMMcYYYY format with some separating character.
Hard coded to only accept days in years 1800 through 2200. */
case class DDcMMcYYYY(sep: String) extends Syntax {
val name = s"DDcMMcYYYY(\\'${sep}\\')"
def likeness (s : String) =
if (s.length != 10) 0.0
else {
val sDay = s.substring(0, 2)
val sMonth = s.substring(3, 5)
val sYear = s.substring(6, 10)
if (sYear .forall(_.isDigit) &&
sMonth.forall(_.isDigit) &&
sDay .forall(_.isDigit) &&
s(2).toString == sep &&
s(5).toString == sep)
if (tope.Day.validishYearMonthDay(
sYear.toInt, sMonth.toInt, sDay.toInt))
1.0 else 0.0
else 0.0
}
val parents: Set[Syntax] = Set(Any)
val partitions: Set[Syntax] = Set()
}
| toddmowen/maestro | maestro-schema/src/main/scala/au/com/cba/omnia/maestro/schema/syntax/Day.scala | Scala | apache-2.0 | 2,377 |
package actor.salt
import actor.ActorUtils
import akka.actor.{ActorRef, Actor, ActorLogging}
import akka.event.LoggingReceive
import com.qianmi.bugatti.actors.{SaltTimeOut, SaltJobOk, SaltCommand}
import actor.salt.RefreshFilesActor._
/**
* Created by mind on 8/4/14.
*/
object RefreshFilesActor {
case object Run
case object Finish
case object Error
}
class RefreshFilesActor(spiritId: Int, realSender: ActorRef) extends Actor with ActorLogging {
val commands = Seq(
Seq("salt-run", "fileserver.update"),
Seq("salt", "*", "saltutil.sync_returners")
)
var step = 0
override def receive = LoggingReceive {
case Run => {
if (step < commands.length) {
log.debug(s"Refresh files run ${commands(step)}")
ActorUtils.spirits ! RemoteSpirit(spiritId, SaltCommand(commands(step)))
step += 1
} else {
realSender ! Finish
context.stop(self)
}
}
case sr: SaltJobOk => {
self ! Run
}
case SaltTimeOut => {
realSender ! Error
context.stop(self)
}
case ConnectStoped => {
sender ! Error
context.stop(self)
}
case x => log.debug(s"RefreshFilesActor receive unknown message ${x}")
}
}
| sdgdsffdsfff/bugatti | app/actor/salt/RefreshFilesActor.scala | Scala | bsd-2-clause | 1,230 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package spark.streaming.examples
import spark.util.IntParam
import spark.storage.StorageLevel
import spark.streaming._
import spark.streaming.util.RawTextHelper
/**
* Receives text from multiple rawNetworkStreams and counts how many '\\n' delimited
* lines have the word 'the' in them. This is useful for benchmarking purposes. This
* will only work with spark.streaming.util.RawTextSender running on all worker nodes
* and with Spark using Kryo serialization (set Java property "spark.serializer" to
* "spark.KryoSerializer").
* Usage: RawNetworkGrep <master> <numStreams> <host> <port> <batchMillis>
* <master> is the Spark master URL
* <numStream> is the number rawNetworkStreams, which should be same as number
* of work nodes in the cluster
* <host> is "localhost".
* <port> is the port on which RawTextSender is running in the worker nodes.
* <batchMillise> is the Spark Streaming batch duration in milliseconds.
*/
object RawNetworkGrep {
def main(args: Array[String]) {
if (args.length != 5) {
System.err.println("Usage: RawNetworkGrep <master> <numStreams> <host> <port> <batchMillis>")
System.exit(1)
}
val Array(master, IntParam(numStreams), host, IntParam(port), IntParam(batchMillis)) = args
// Create the context
val ssc = new StreamingContext(master, "RawNetworkGrep", Milliseconds(batchMillis),
System.getenv("SPARK_HOME"), Seq(System.getenv("SPARK_EXAMPLES_JAR")))
// Warm up the JVMs on master and slave for JIT compilation to kick in
RawTextHelper.warmUp(ssc.sparkContext)
val rawStreams = (1 to numStreams).map(_ =>
ssc.rawSocketStream[String](host, port, StorageLevel.MEMORY_ONLY_SER_2)).toArray
val union = ssc.union(rawStreams)
union.filter(_.contains("the")).count().foreach(r =>
println("Grep count: " + r.collect().mkString))
ssc.start()
}
}
| wgpshashank/spark | examples/src/main/scala/spark/streaming/examples/RawNetworkGrep.scala | Scala | apache-2.0 | 2,692 |
package rdfperftest
import java.util.concurrent.atomic.AtomicInteger
import java.util.concurrent.atomic.AtomicBoolean
import org.log4s._
class TestStats(rdfEngine: RDFEngine, threads: Int, count: AtomicInteger) {
private[this] val logger = getLogger
val startMillis = System.currentTimeMillis
def elapsedMillis = System.currentTimeMillis - startMillis
var lastMessageMillis = System.currentTimeMillis
private val warmedUp = new AtomicBoolean(false)
def isWarmedUp = warmedUp.get
private var warmupCount = 0L
private var warmupTime = 0L
private val s = if (threads == 1) "" else "s"
def setWarmedUp = {
warmupCount = count.get
warmupTime = System.currentTimeMillis - startMillis
warmedUp.set(true)
logger.info(s"$rdfEngine with $threads thread$s: Warmup finished after reading $warmupCount statements")
}
def warmedUpCount = count.get - warmupCount
def rate: Int = {
if (warmupTime == 0 || warmedUpCount == 0) 0
else {
val totalElapsed = elapsedMillis - warmupTime
(warmedUpCount.toFloat / totalElapsed.toFloat).toInt
}
}
def logUpdate = {
logger.info(s"$rdfEngine with $threads thread$s ${elapsedMillis}ms elapsed, $warmedUpCount statements read at $rate statements/ms")
lastMessageMillis = System.currentTimeMillis
}
} | andrewstellman/RDF4J-Sesame-RDF-Parsing-Performance-Test | src/main/scala/rdfperftest/TestStats.scala | Scala | unlicense | 1,312 |
package org.ai4fm.filehistory.core
import java.io.File
import org.ai4fm.filehistory.FileHistoryProject
import org.ai4fm.filehistory.FileVersion
import org.eclipse.core.runtime.CoreException
import org.eclipse.core.runtime.IProgressMonitor
/**
* @author Andrius Velykis
*/
trait IFileHistoryManager {
/**
* @return (syncedFileVersion, changed) a tuple containing the synced file version (either an
* existing (if file not changed) or a new one. `changed` indicates whether the file
* version was changed/created.
*/
@throws(classOf[CoreException])
def syncFileVersion(historyProject: FileHistoryProject, sourceRootPath: String, sourcePath: String,
textOpt: Option[String], syncPointOpt: Option[Int], monitor: IProgressMonitor): (FileVersion, Boolean)
def historyFile(version: FileVersion): File
}
| andriusvelykis/proofprocess | org.ai4fm.filehistory.core/src/org/ai4fm/filehistory/core/IFileHistoryManager.scala | Scala | epl-1.0 | 853 |
package katas.scala.sort.mergesort
import org.scalatest.Matchers
import katas.scala.sort.SeqSortTest
class MergeSort21 extends SeqSortTest with Matchers {
override def sort[T](seq: Seq[T])(implicit ordered: (T) => Ordered[T], tag: ClassManifest[T]): Seq[T] = {
def merge(s1: Seq[T], s2: Seq[T]): Seq[T] = {
if (s1.isEmpty) s2
else if (s2.isEmpty) s1
else if (s1.head < s2.head) s1.head +: merge(s1.tail, s2)
else s2.head +: merge(s1, s2.tail)
}
if (seq.size <= 1) return seq
val mid = seq.size / 2
val (part1, part2) = seq.splitAt(mid)
merge(sort(part1), sort(part2))
}
} | dkandalov/katas | scala/src/katas/scala/sort/mergesort/MergeSort21.scala | Scala | unlicense | 601 |
package sangria.schema
import language.{implicitConversions, existentials}
import sangria.{introspection, ast}
import sangria.validation.{EnumValueCoercionViolation, EnumCoercionViolation, Violation}
import sangria.introspection.{SchemaMetaField, TypeMetaField, TypeNameMetaField}
import scala.annotation.implicitNotFound
import scala.reflect.ClassTag
sealed trait Type
sealed trait InputType[+T] extends Type
sealed trait OutputType[+T] extends Type
sealed trait LeafType extends Type
sealed trait CompositeType[T] extends Type with Named with OutputType[T]
sealed trait AbstractType extends Type with Named {
def name: String
def typeOf[Ctx](value: Any, schema: Schema[Ctx, _]): Option[ObjectType[Ctx, _]] =
schema.possibleTypes get name flatMap (_.find(_ isInstanceOf value).asInstanceOf[Option[ObjectType[Ctx, _]]])
}
sealed trait NullableType
sealed trait UnmodifiedType
sealed trait Named {
def name: String
def description: Option[String]
}
object Named {
private[schema] def checkFields[T <: Seq[Named]](fields: T): T =
if (fields.isEmpty)
throw new IllegalArgumentException("No fields provided! You need to provide at least one field to a Type.")
else if (fields.map(_.name).toSet.size != fields.size)
throw new IllegalArgumentException("All fields within a Type should have unique names!")
else fields
private[schema] def checkFieldsFn[T <: Seq[Named]](fields: T): () => T =
if (fields.map(_.name).toSet.size != fields.size)
throw new IllegalArgumentException("All fields within a Type should have unique names!")
else () => fields
private[schema] def checkFields[T <: Seq[Named]](fieldsFn: () => T): () => T =
() => checkFields(fieldsFn())
}
case class ScalarType[T](
name: String,
description: Option[String] = None,
coerceUserInput: Any => Either[Violation, T],
coerceOutput: T => ast.Value,
coerceInput: ast.Value => Either[Violation, T]) extends InputType[T] with OutputType[T] with LeafType with NullableType with UnmodifiedType with Named
sealed trait ObjectLikeType[Ctx, Val] extends OutputType[Val] with CompositeType[Val] with NullableType with UnmodifiedType with Named {
def interfaces: List[InterfaceType[Ctx, _]]
def fieldsFn: () => List[Field[Ctx, Val]]
private lazy val ownFields = fieldsFn()
def removeDuplicates[T, E](list: List[T], valueFn: T => E) =
list.foldLeft((Nil, Nil): (List[E], List[T])) {
case (a @ (visited, acc), e) if visited contains valueFn(e) => a
case ((visited, acc), e) => (visited :+ valueFn(e), acc :+ e)
}._2
lazy val fields: List[Field[Ctx, _]] = removeDuplicates(
ownFields ++ interfaces.flatMap(i => i.fields.asInstanceOf[List[Field[Ctx, _]]]),
(e: Field[Ctx, _]) => e.name)
private lazy val fieldsByName = fields groupBy (_.name) mapValues (_.head)
def getField(schema: Schema[_, _], fieldName: String): Option[Field[Ctx, _]] =
if (fieldName == SchemaMetaField.name && name == schema.query.name) Some(SchemaMetaField.asInstanceOf[Field[Ctx, _]])
else if (fieldName == TypeMetaField.name && name == schema.query.name) Some(TypeMetaField.asInstanceOf[Field[Ctx, _]])
else if (fieldName == TypeNameMetaField.name) Some(TypeNameMetaField.asInstanceOf[Field[Ctx, _]])
else fieldsByName get fieldName
}
case class ObjectType[Ctx, Val: ClassTag] private (
name: String,
description: Option[String],
fieldsFn: () => List[Field[Ctx, Val]],
interfaces: List[InterfaceType[Ctx, _]]
) extends ObjectLikeType[Ctx, Val] {
def isInstanceOf(value: Any) = implicitly[ClassTag[Val]].runtimeClass.isAssignableFrom(value.getClass)
}
object ObjectType {
def apply[Ctx, Val: ClassTag](name: String, fields: List[Field[Ctx, Val]]): ObjectType[Ctx, Val] =
ObjectType(name, None, fieldsFn = Named.checkFieldsFn(fields), Nil)
def apply[Ctx, Val: ClassTag](name: String, description: String, fields: List[Field[Ctx, Val]]): ObjectType[Ctx, Val] =
ObjectType(name, Some(description), fieldsFn = Named.checkFieldsFn(fields), Nil)
def apply[Ctx, Val: ClassTag](name: String, fields: List[Field[Ctx, Val]], interfaces: List[PossibleInterface[Ctx, Val]]): ObjectType[Ctx, Val] =
ObjectType(name, None, fieldsFn = Named.checkFieldsFn(fields), interfaces map (_.interfaceType))
def apply[Ctx, Val: ClassTag](name: String, description: String, fields: List[Field[Ctx, Val]], interfaces: List[PossibleInterface[Ctx, Val]]): ObjectType[Ctx, Val] =
ObjectType(name, Some(description), fieldsFn = Named.checkFieldsFn(fields), interfaces map (_.interfaceType))
def apply[Ctx, Val: ClassTag](name: String, fieldsFn: () => List[Field[Ctx, Val]]): ObjectType[Ctx, Val] =
ObjectType(name, None, Named.checkFields(fieldsFn), Nil)
def apply[Ctx, Val: ClassTag](name: String, description: String, fieldsFn: () => List[Field[Ctx, Val]]): ObjectType[Ctx, Val] =
ObjectType(name, Some(description), Named.checkFields(fieldsFn), Nil)
def apply[Ctx, Val: ClassTag](name: String, fieldsFn: () => List[Field[Ctx, Val]], interfaces: List[PossibleInterface[Ctx, Val]]): ObjectType[Ctx, Val] =
ObjectType(name, None, Named.checkFields(fieldsFn), interfaces map (_.interfaceType))
def apply[Ctx, Val: ClassTag](name: String, description: String, fieldsFn: () => List[Field[Ctx, Val]], interfaces: List[PossibleInterface[Ctx, Val]]): ObjectType[Ctx, Val] =
ObjectType(name, Some(description), Named.checkFields(fieldsFn), interfaces map (_.interfaceType))
implicit def acceptUnitCtx[Ctx, Val](objectType: ObjectType[Unit, Val]): ObjectType[Ctx, Val] =
objectType.asInstanceOf[ObjectType[Ctx, Val]]
}
case class InterfaceType[Ctx, Val] private (
name: String,
description: Option[String] = None,
fieldsFn: () => List[Field[Ctx, Val]],
interfaces: List[InterfaceType[Ctx, _]],
manualPossibleTypes: () => List[ObjectType[_, _]]
) extends ObjectLikeType[Ctx, Val] with AbstractType {
def withPossibleTypes(possible: PossibleObject[Ctx, Val]*) = copy(manualPossibleTypes = () => possible.toList map (_.objectType))
def withPossibleTypes(possible: () => List[PossibleObject[Ctx, Val]]) = copy(manualPossibleTypes = () => possible() map (_.objectType))
}
object InterfaceType {
val emptyPossibleTypes: () => List[ObjectType[_, _]] = () => Nil
def apply[Ctx, Val](name: String, fields: List[Field[Ctx, Val]]): InterfaceType[Ctx, Val] =
InterfaceType(name, None, fieldsFn = Named.checkFieldsFn(fields), Nil, emptyPossibleTypes)
def apply[Ctx, Val](name: String, description: String, fields: List[Field[Ctx, Val]]): InterfaceType[Ctx, Val] =
InterfaceType(name, Some(description), fieldsFn = Named.checkFieldsFn(fields), Nil, emptyPossibleTypes)
def apply[Ctx, Val](name: String, fields: List[Field[Ctx, Val]], interfaces: List[PossibleInterface[Ctx, Val]]): InterfaceType[Ctx, Val] =
InterfaceType(name, None, fieldsFn = Named.checkFieldsFn(fields), interfaces map (_.interfaceType), emptyPossibleTypes)
def apply[Ctx, Val](name: String, description: String, fields: List[Field[Ctx, Val]], interfaces: List[PossibleInterface[Ctx, Val]]): InterfaceType[Ctx, Val] =
InterfaceType(name, Some(description), fieldsFn = Named.checkFieldsFn(fields), interfaces map (_.interfaceType), emptyPossibleTypes)
def apply[Ctx, Val](name: String, fieldsFn: () => List[Field[Ctx, Val]]): InterfaceType[Ctx, Val] =
InterfaceType(name, None, Named.checkFields(fieldsFn), Nil, emptyPossibleTypes)
def apply[Ctx, Val](name: String, description: String, fieldsFn: () => List[Field[Ctx, Val]]): InterfaceType[Ctx, Val] =
InterfaceType(name, Some(description), Named.checkFields(fieldsFn), Nil, emptyPossibleTypes)
def apply[Ctx, Val](name: String, fieldsFn: () => List[Field[Ctx, Val]], interfaces: List[PossibleInterface[Ctx, Val]]): InterfaceType[Ctx, Val] =
InterfaceType(name, None, Named.checkFields(fieldsFn), interfaces map (_.interfaceType), emptyPossibleTypes)
def apply[Ctx, Val](name: String, description: String, fieldsFn: () => List[Field[Ctx, Val]], interfaces: List[PossibleInterface[Ctx, Val]]): InterfaceType[Ctx, Val] =
InterfaceType(name, Some(description), Named.checkFields(fieldsFn), interfaces map (_.interfaceType), emptyPossibleTypes)
}
case class PossibleInterface[Ctx, Concrete](interfaceType: InterfaceType[Ctx, _])
object PossibleInterface extends PossibleInterfaceLowPrioImplicits {
implicit def apply[Ctx, Abstract, Concrete](interface: InterfaceType[Ctx, Abstract])(implicit ev: PossibleType[Abstract, Concrete]): PossibleInterface[Ctx, Concrete] =
PossibleInterface[Ctx, Concrete](interface)
}
trait PossibleInterfaceLowPrioImplicits {
implicit def applyUnit[Ctx, Abstract, Concrete](interface: InterfaceType[Ctx, Abstract])(implicit ev: PossibleType[Abstract, Concrete]): PossibleInterface[Unit, Concrete] =
PossibleInterface[Unit, Concrete](interface.asInstanceOf[InterfaceType[Unit, Abstract]])
}
case class PossibleObject[Ctx, Abstract](objectType: ObjectType[Ctx, _])
object PossibleObject {
implicit def apply[Ctx, Abstract, Concrete](obj: ObjectType[Ctx, Concrete])(implicit ev: PossibleType[Abstract, Concrete]): PossibleObject[Ctx, Abstract] =
PossibleObject[Ctx, Abstract](obj)
implicit def applyUnit[Ctx, Abstract, Concrete](obj: ObjectType[Unit, Concrete])(implicit ev: PossibleType[Abstract, Concrete]): PossibleObject[Ctx, Abstract] =
PossibleObject[Ctx, Abstract](obj.asInstanceOf[ObjectType[Ctx, Concrete]])
}
trait PossibleType[AbstractType, ConcreteType]
object PossibleType {
private object SingletonPossibleType extends PossibleType[AnyRef, AnyRef]
def create[AbstractType, ConcreteType] = SingletonPossibleType.asInstanceOf[PossibleType[AbstractType, ConcreteType]]
implicit def InheritanceBasedPossibleType[Abstract, Concrete](implicit ev: Concrete <:< Abstract): PossibleType[Abstract, Concrete] =
create[Abstract, Concrete]
}
case class UnionType[Ctx](
name: String,
description: Option[String] = None,
types: List[ObjectType[Ctx, _]]) extends OutputType[Any] with CompositeType[Any] with AbstractType with NullableType with UnmodifiedType
case class Field[Ctx, Val] private (
name: String,
fieldType: OutputType[_],
description: Option[String],
arguments: List[Argument[_]],
resolve: Context[Ctx, Val] => Action[Ctx, _],
deprecationReason: Option[String],
manualPossibleTypes: () => List[ObjectType[_, _]]) extends Named with HasArguments {
def withPossibleTypes(possible: PossibleObject[Ctx, Val]*) = copy(manualPossibleTypes = () => possible.toList map (_.objectType))
def withPossibleTypes(possible: () => List[PossibleObject[Ctx, Val]]) = copy(manualPossibleTypes = () => possible() map (_.objectType))
}
object Field {
def apply[Ctx, Val, Res, Out](
name: String,
fieldType: OutputType[Out],
description: Option[String] = None,
arguments: List[Argument[_]] = Nil,
resolve: Context[Ctx, Val] => Action[Ctx, Res],
possibleTypes: => List[PossibleObject[_, _]] = Nil,
deprecationReason: Option[String] = None)(implicit ev: ValidOutType[Res, Out]) =
Field[Ctx, Val](name, fieldType, description, arguments, resolve, deprecationReason, () => possibleTypes map (_.objectType))
}
@implicitNotFound(msg = "${Res} is invalid type for the resulting GraphQL type ${Out}.")
trait ValidOutType[-Res, +Out]
object ValidOutType {
val valid = new ValidOutType[Any, Any] {}
implicit def validSubclass[Res, Out](implicit ev: Res <:< Out) = valid.asInstanceOf[ValidOutType[Res, Out]]
implicit def validNothing[Out] = valid.asInstanceOf[ValidOutType[Nothing, Out]]
implicit def validOption[Res, Out](implicit ev: Res <:< Out) = valid.asInstanceOf[ValidOutType[Res, Option[Out]]]
implicit def validSeq[Res, Out](implicit ev: Res <:< Out) = valid.asInstanceOf[ValidOutType[Res, Seq[Out]]]
}
trait InputValue[T] {
def name: String
def inputValueType: InputType[_]
def description: Option[String]
def defaultValue: Option[_]
}
case class Argument[T] private (
name: String,
argumentType: InputType[_],
description: Option[String],
defaultValue: Option[_]) extends InputValue[T] with Named {
def inputValueType = argumentType
}
object Argument {
def apply[T, Default](
name: String,
argumentType: InputType[T],
description: String,
defaultValue: Default)(implicit ev: ValidOutType[Default, T], res: ArgumentType[T]): Argument[res.Res] =
Argument(name, argumentType, Some(description), Some(defaultValue))
def apply[T, Default](
name: String,
argumentType: InputType[T],
defaultValue: Default)(implicit ev: ValidOutType[Default, T], res: ArgumentType[T]): Argument[res.Res] =
Argument(name, argumentType, None, Some(defaultValue))
def apply[T](
name: String,
argumentType: InputType[T],
description: String)(implicit res: ArgumentType[T]): Argument[res.Res] =
Argument(name, argumentType, Some(description), None)
def apply[T](
name: String,
argumentType: InputType[T])(implicit res: ArgumentType[T]): Argument[res.Res] =
Argument(name, argumentType, None, None)
}
trait ArgumentType[T] {
type Res
}
object ArgumentType extends ArgumentTypeLowPrio {
implicit def optionArgTpe[T] = new ArgumentType[Option[T]] {
type Res = T
}
}
trait ArgumentTypeLowPrio {
implicit def defaultArgTpe[T] = new ArgumentType[T] {
type Res = T
}
}
case class EnumType[T](
name: String,
description: Option[String] = None,
values: List[EnumValue[T]]) extends InputType[T] with OutputType[T] with LeafType with NullableType with UnmodifiedType with Named {
lazy val byName = values groupBy (_.name) mapValues (_.head)
lazy val byValue = values groupBy (_.value) mapValues (_.head)
def coerceUserInput(value: Any): Either[Violation, (T, Boolean)] = value match {
case name: String => byName get name map (v => Right(v.value -> v.deprecationReason.isDefined)) getOrElse Left(EnumValueCoercionViolation(name))
case v if byValue exists (_._1 == v) => Right(v.asInstanceOf[T] -> byValue(v.asInstanceOf[T]).deprecationReason.isDefined)
case _ => Left(EnumCoercionViolation)
}
def coerceInput(value: ast.Value): Either[Violation, (T, Boolean)] = value match {
case ast.EnumValue(name, _) => byName get name map (v => Right(v.value -> v.deprecationReason.isDefined)) getOrElse Left(EnumValueCoercionViolation(name))
case _ => Left(EnumCoercionViolation)
}
def coerceOutput(value: T) = ast.EnumValue(byValue(value).name)
}
case class EnumValue[+T](
name: String,
description: Option[String] = None,
value: T,
deprecationReason: Option[String] = None) extends Named
case class InputObjectType[T] private (
name: String,
description: Option[String] = None,
fieldsFn: () => List[InputField[_]]
) extends InputType[T] with NullableType with UnmodifiedType with Named {
lazy val fields = fieldsFn()
lazy val fieldsByName = fields groupBy(_.name) mapValues(_.head)
}
object InputObjectType {
type InputObjectRes = Map[String, Any]
def apply(name: String, fields: List[InputField[_]]): InputObjectType[InputObjectRes] =
InputObjectType(name, None, fieldsFn = Named.checkFieldsFn(fields))
def apply(name: String, description: String, fields: List[InputField[_]]): InputObjectType[InputObjectRes] =
InputObjectType(name, Some(description), fieldsFn = Named.checkFieldsFn(fields))
def apply(name: String, fieldsFn: () => List[InputField[_]]): InputObjectType[InputObjectRes] =
InputObjectType(name, None, Named.checkFields(fieldsFn))
def apply(name: String, description: String, fieldsFn: () => List[InputField[_]]): InputObjectType[InputObjectRes] =
InputObjectType(name, Some(description), Named.checkFields(fieldsFn))
}
case class InputField[T](
name: String,
fieldType: InputType[T],
description: Option[String] = None,
defaultValue: Option[T] = None) extends InputValue[T] with Named {
def inputValueType = fieldType
}
case class ListType[T](ofType: OutputType[T]) extends OutputType[Seq[T]] with NullableType
case class ListInputType[T](ofType: InputType[T]) extends InputType[Seq[T]] with NullableType
case class OptionType[T](ofType: OutputType[T]) extends OutputType[Option[T]]
case class OptionInputType[T](ofType: InputType[T]) extends InputType[Option[T]]
sealed trait HasArguments {
def arguments: List[Argument[_]]
}
case class Directive(
name: String,
description: Option[String] = None,
arguments: List[Argument[_]] = Nil,
shouldInclude: DirectiveContext => Boolean,
onOperation: Boolean,
onFragment: Boolean,
onField: Boolean) extends HasArguments
case class Schema[Ctx, Val](
query: ObjectType[Ctx, Val],
mutation: Option[ObjectType[Ctx, Val]] = None,
additionalTypes: List[Type with Named] = Nil,
directives: List[Directive] = BuiltinDirectives) {
lazy val types: Map[String, (Int, Type with Named)] = {
def updated(priority: Int, name: String, tpe: Type with Named, result: Map[String, (Int, Type with Named)]) =
if (result contains name) result else result.updated(name, priority -> tpe)
def collectTypes(priority: Int, tpe: Type, result: Map[String, (Int, Type with Named)]): Map[String, (Int, Type with Named)] = {
tpe match {
case t: Named if result contains t.name => result
case OptionType(ofType) => collectTypes(priority, ofType, result)
case OptionInputType(ofType) => collectTypes(priority, ofType, result)
case ListType(ofType) => collectTypes(priority, ofType, result)
case ListInputType(ofType) => collectTypes(priority, ofType, result)
case t @ ScalarType(name, _, _, _, _) => updated(priority, name, t, result)
case t @ EnumType(name, _, _) => updated(priority, name, t, result)
case t @ InputObjectType(name, _, _) =>
t.fields.foldLeft(updated(priority, name, t, result)) {case (acc, field) => collectTypes(priority, field.fieldType, acc)}
case t: ObjectLikeType[_, _] =>
val own = t.fields.foldLeft(updated(priority, t.name, t, result)) {
case (acc, field) =>
val fromArgs = field.arguments.foldLeft(collectTypes(priority, field.fieldType, acc)) {
case (aacc, arg) => collectTypes(priority, arg.argumentType, aacc)
}
field.manualPossibleTypes().foldLeft(fromArgs) {
case (acc, objectType) => collectTypes(priority, objectType, acc)
}
}
val withPossible = t match {
case i: InterfaceType[_, _] =>
i.manualPossibleTypes().foldLeft(own) {
case (acc, objectType) => collectTypes(priority, objectType, acc)
}
case _ => own
}
t.interfaces.foldLeft(withPossible) {
case (acc, interface) => collectTypes(priority, interface, acc)
}
case t @ UnionType(name, _, types) =>
types.foldLeft(updated(priority, name, t, result)) {case (acc, tpe) => collectTypes(priority, tpe, acc)}
}
}
val schemaTypes = collectTypes(3, introspection.__Schema, Map(BuiltinScalars map (s => s.name -> (4, s)): _*))
val queryTypes = collectTypes(2, query, schemaTypes)
val queryTypesWithAdditions = queryTypes ++ additionalTypes.map(t => t.name -> (1, t))
val queryAndMutTypes = mutation map (collectTypes(1, _, queryTypesWithAdditions)) getOrElse queryTypesWithAdditions
queryAndMutTypes
}
lazy val typeList = types.values.toList.sortBy(t => t._1 + t._2.name).map(_._2)
lazy val allTypes = types collect {case (name, (_, tpe)) => name -> tpe}
lazy val inputTypes = types collect {case (name, (_, tpe: InputType[_])) => name -> tpe}
lazy val outputTypes = types collect {case (name, (_, tpe: OutputType[_])) => name -> tpe}
lazy val scalarTypes = types collect {case (name, (_, tpe: ScalarType[_])) => name -> tpe}
lazy val unionTypes: Map[String, UnionType[_]] =
types.filter(_._2._2.isInstanceOf[UnionType[_]]).mapValues(_._2.asInstanceOf[UnionType[_]])
lazy val directivesByName = directives groupBy (_.name) mapValues (_.head)
def getInputType(tpe: ast.Type): Option[InputType[_]] = tpe match {
case ast.NamedType(name, _) => inputTypes get name map (OptionInputType(_))
case ast.NotNullType(ofType, _) => getInputType(ofType) collect {case OptionInputType(ot) => ot}
case ast.ListType(ofType, _) => getInputType(ofType) map (t => OptionInputType(ListInputType(t)))
}
def getOutputType(tpe: ast.Type, topLevel: Boolean = false): Option[OutputType[_]] = tpe match {
case ast.NamedType(name, _) => outputTypes get name map (ot => if (topLevel) ot else OptionType(ot))
case ast.NotNullType(ofType, _) => getOutputType(ofType) collect {case OptionType(ot) => ot}
case ast.ListType(ofType, _) => getOutputType(ofType) map (ListType(_))
}
lazy val directImplementations: Map[String, List[ObjectLikeType[_, _]]] = {
typeList
.collect{case objectLike: ObjectLikeType[_, _] => objectLike}
.flatMap(objectLike => objectLike.interfaces map (_.name -> objectLike))
.groupBy(_._1)
.mapValues(_ map (_._2))
}
lazy val implementations: Map[String, List[ObjectType[_, _]]] = {
def findConcreteTypes(tpe: ObjectLikeType[_, _]): List[ObjectType[_, _]] = tpe match {
case obj: ObjectType[_, _] => obj :: Nil
case interface: InterfaceType[_, _] => directImplementations(interface.name) flatMap findConcreteTypes
}
directImplementations map {
case (name, directImpls) => name -> directImpls.flatMap(findConcreteTypes).groupBy(_.name).map(_._2.head).toList
}
}
lazy val possibleTypes: Map[String, List[ObjectType[_, _]]] =
implementations ++ unionTypes.values.map(ut => ut.name -> ut.types)
def isPossibleType(baseTypeName: String, tpe: ObjectType[_, _]) =
possibleTypes get baseTypeName exists (_ exists (_.name == tpe.name))
} | narahari92/sangria | src/main/scala/sangria/schema/Schema.scala | Scala | apache-2.0 | 21,951 |
package colossus
package protocols.http
import core._
import org.scalatest._
import akka.util.ByteString
class HttpSpec extends WordSpec with MustMatchers{
"http request" must {
"encode to bytes" in {
val head = HttpHead(
version = HttpVersion.`1.1`,
url = "/hello",
method = HttpMethod.Post,
headers = List("foo" -> "bar")
)
val request = HttpRequest(head, Some(ByteString("hello")))
val expected = "POST /hello HTTP/1.1\\r\\nfoo: bar\\r\\n\\r\\nhello"
request.bytes.utf8String must equal(expected)
}
"encode request with headers and no body" in {
val head = HttpHead(
version = HttpVersion.`1.1`,
url = "/hello",
method = HttpMethod.Post,
headers = List("foo" -> "bar")
)
val request = HttpRequest(head, None)
val expected = "POST /hello HTTP/1.1\\r\\nfoo: bar\\r\\n\\r\\n"
request.bytes.utf8String must equal(expected)
}
}
"http response" must {
"encode basic response" in {
val content = "Hello World!"
val response = HttpResponse(HttpVersion.`1.1`, HttpCodes.OK, Vector(), ByteString(content))
val expected = s"HTTP/1.1 200 OK\\r\\nContent-Length: ${content.length}\\r\\n\\r\\n$content"
val res = response.encode()
res match {
case x : DataBuffer => {
val received = ByteString(x.takeAll).utf8String
received must equal (expected)
}
case y => throw new Exception(s"expected a DataBuffer, received a $y instead")
}
}
"encode a basic response as a stream" in {
val content = "Hello World!"
val response = HttpResponse(HttpVersion.`1.1`, HttpCodes.OK, Vector(), ByteString(content))
val expected = s"HTTP/1.1 200 OK\\r\\nContent-Length: ${content.length}\\r\\n\\r\\n$content"
val stream: DataReader = StreamingHttpResponse.fromStatic(response).encode()
}
}
}
| noikiy/colossus | colossus-tests/src/test/scala/colossus/protocols/http/HttpSpec.scala | Scala | apache-2.0 | 1,926 |
trait TopTrait { // must be nested and a trait
private[this] val _st : Int = 0 // crashes if TopTrait is not top level
val escape = { () => _st }
}
| AlexSikia/dotty | tests/untried/pos/t5508-min-okay2.scala | Scala | bsd-3-clause | 152 |
package io.datalayer.randomforest
import io.datalayer.randomforest._
import breeze.linalg._
import org.apache.spark.mllib.linalg.Vectors
import org.apache.spark.mllib.linalg.distributed.{IndexedRow, IndexedRowMatrix}
import org.scalatest.FunSuite
import scala.collection.mutable.ArrayBuffer
//import org.scalatest.ShouldMatchers
//import org.apache.spark.SparkContext
//import org.apache.spark.SparkContext._
//import org.apache.spark.SparkConf
import scala.language.implicitConversions
import io.datalayer.common.SparkContextManager
class DataDNATest extends FunSuite {
/*
test("Input and labels with different sizes") {
val data = new Data
intercept[IncompatibleDataTypeException]{
data.load(Seq(Seq(1.0,1.1),Seq(2.0,2.1),Seq(3.0,3.1)), Seq(0.0,0.1))
}
}
*/
test("RowDNA test") {
val rowtest = new RowDNA[Double,Seq[Double], Int](Seq(1.5,2.5,3.5),Some(1))
assert(rowtest.nb_attributes == 3)
assert(rowtest.isLabeled == true)
assert(rowtest.attributes(0) == 1.5)
assert(rowtest.attributes(1) == 2.5)
assert(rowtest.attributes(2) == 3.5)
assert(rowtest.label == Some(1))
}
test("First DataDNA test") {
info("Going to be the coolest thing you've ever done!")
val r1 = new RowDNA[Double,Seq[Double], Int](Seq(1.0,1.35,1.5,1.99),Some(0))
val r2 = new RowDNA[Double,Seq[Double], Int](Seq(2.0,2.35,2.5,2.99),Some(1))
val r3 = new RowDNA[Double,Seq[Double], Int](Seq(3.0,3.35,3.5,3.99),Some(2))
val datatest = new Data(Seq(r1,r2,r3))
assert(datatest.nb_objects == 3)
assert(datatest.nb_attributes == 4)
assert(datatest.nb_classes == 3)
assert(datatest.labeled == true)
assert(datatest.getAttribute(1) == List(1.35, 2.35, 3.35))
assert(datatest.getLabel(1) == 1)
//println(datatest.findRandomSplit)
//println(datatest.split(0,1.5)._1)
//println(datatest.split(0,1.5)._2)
//println(datatest.getCounts)
//println(datatest.getLabels(Seq(1,2)))
//println(datatest.getObject(1))
//println(datatest.getObjects(Seq(0,2)))
//println(datatest.getAttributes(Seq(1,2)))
//println(datatest)
//println(datatest.map(_.attributes(0)))
//println(datatest.map(_.label))
//println(datatest.partition(_.attributes(1) < 2.5))
/*
val data = new Data
data.load(Seq(Seq(1.0,1.1),Seq(2.0,2.1),Seq(3.0,3.1)))
data.split(0, 1.5)
data.load(Seq(Seq(1.0,1.1),Seq(2.0,2.1),Seq(3.0,3.1)), Seq(0.0,0.1,0.2))
data.inputs.foreach(println)
println(data.labels)
//data.loadCSV
// Test split
val splited = data.split(0, 1.5)
println(splited._1.labels)
println(splited._2.labels)
//data.describe
data.inputs.map(x => x)
assert(splited._1.labels == Seq(0.0))
assert(data.getAttribute(0).inputs == Seq(Seq(1.0),Seq(2.0),Seq(3.0)))
assert(data.getAttributes(Seq(0)).inputs == Seq(Seq(1.0),Seq(2.0), Seq(3.0)))
assert(data.getObjects(Seq(0,1)).inputs == Seq(Seq(1.0,1.1),Seq(2.0,2.1)))
assert(data.getObject(1).inputs == Seq(Seq(2.0,2.1)))
assert(data.getLabels(Seq(0,1)) == Seq(0.0,0.1))
assert(data.getLabel(1) == Seq(0.1))
assert(data.getValue(0,0) == 1.0)
assert(data.labeled == true)
assert(data.nb_attributes == 2)
assert(data.nb_objects == 3)
*/
}
test("DataRDD test") {
/*
info("Some serious stuff going on…")
val sc = SparkContextManager.getSparkContext(8)
val data = new DataSchemaRDD(sc)
val train = sc.parallelize(Seq(Array(1.0,1.1), Array(2.0,2.1), Array(3.0,3.1)))
val labels = sc.parallelize(Seq((1.0, 1.toLong), (2.0, 1.toLong), (1.0, 0.toLong)))
data.load(train, labels)
val split = data.split(0, 1.5)
assert(Seq(Seq(1.0, 1.1)) === split._1.inputs.collect().toSeq.map(_.toSeq))
assert(Seq(Seq(2.0, 2.1), Seq(3.0, 3.1)) === split._2.inputs.collect().toSeq.map(_.toSeq))
assert(data.getAttributes(Seq(0)).inputs.collect.toSeq.map(_.toSeq) === Seq(Seq(1.0),Seq(2.0), Seq(3.0)))
assert(data.getAttributes(Seq(1)).inputs.collect.toSeq.map(_.toSeq) === Seq(Seq(1.1),Seq(2.1), Seq(3.1)))
assert(data.getObjects(Seq(2)).inputs.collect.toSeq.map(_.toSeq) === Seq(Seq(3.0, 3.1)))
assert(data.inputs.take(1).take(1)(0)(1) === 1.1)
// data.loadCSV("/home/manuel/wrk/model/randomforest/src/test/resources/", 0)
*/
}
}
| 0asa/algorithm | src/test/scala/io/datalayer/randomforest/DataDNATest.scala | Scala | apache-2.0 | 4,333 |
/*
* Copyright 2015 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.play.auth.microservice.filters
import org.mockito.Mockito.{verify, when}
import org.mockito.{ArgumentCaptor, Matchers}
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.mock.MockitoSugar
import org.scalatest.{WordSpecLike, Matchers => MatchersResults}
import play.api.Routes
import play.api.mvc.Results._
import play.api.mvc.{AnyContentAsEmpty, RequestHeader}
import play.api.test.{FakeHeaders, FakeRequest}
import uk.gov.hmrc.play.auth.controllers.{AuthConfig, AuthParamsControllerConfig, LevelOfAssurance}
import uk.gov.hmrc.play.auth.microservice.connectors._
import scala.concurrent.Future
import scala.concurrent.duration._
class AuthorisationFilterLOASpec extends WordSpecLike with MatchersResults with MockitoSugar with ScalaFutures {
val levelOfAssurance = LevelOfAssurance.LOA_1_5
val configWithLoa = AuthConfig(levelOfAssurance = levelOfAssurance)
val authConnectorMock = mock[AuthConnector]
val filter = new AuthorisationFilter {
override def authConnector: AuthConnector = authConnectorMock
override def authConfig(rh: RequestHeader): Option[AuthConfig] = Some(configWithLoa)
override def controllerNeedsAuth(controllerName: String): Boolean = true
override def authParamsConfig: AuthParamsControllerConfig = ???
}
"AuthorisationFilter" should {
"add the levelOfAssurance when calling auth" in {
import akka.util.Timeout
implicit val timeout = Timeout(3 seconds)
when(authConnectorMock.authorise(Matchers.any(), Matchers.any())(Matchers.any())).thenReturn(Future.successful(AuthorisationResult(true, false)))
val req = FakeRequest("GET", "/myregime/myId", FakeHeaders(), AnyContentAsEmpty, tags = Map(Routes.ROUTE_VERB-> "GET"))
val result = filter((next: RequestHeader) => Future.successful(Ok("All is good")))(req)
play.api.test.Helpers.status(result) shouldBe 200
play.api.test.Helpers.contentAsString(result) shouldBe "All is good"
val captor = ArgumentCaptor.forClass(classOf[AuthRequestParameters])
verify(authConnectorMock).authorise(Matchers.any(),captor.capture())(Matchers.any())
captor.getValue().levelOfAssurance shouldBe levelOfAssurance.toString
}
}
}
| xnejp03/play-authorisation | src/test/scala/uk/gov/hmrc/play/auth/microservice/filters/AuthorisationFilterLOASpec.scala | Scala | apache-2.0 | 2,832 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.crypto
import com.typesafe.config.ConfigFactory
import org.apache.commons.codec.binary.Base64
import org.mockito.MockitoSugar
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpecLike
import scala.collection.JavaConverters._
class CompositeOneWayCryptoSpec extends AnyWordSpecLike with Matchers with MockitoSugar {
private val baseConfigKey = "crypto.spec"
private object CurrentKey {
val configKey = baseConfigKey + ".key"
val encryptionKey = Base64.encodeBase64String(Array[Byte](0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15))
val aeadText = Base64.encodeBase64String("some additional text".getBytes)
val plainMessage = "this is my message"
val encryptedMessage = "up/76On5j54pAjzqZR1mqM5E28skTl8Aw0GkKi+zjkk="
}
"A correctly constructed one way encrypter" should {
"encrypt and verify a password" in {
val config = ConfigFactory.parseMap(
Map(
CurrentKey.configKey -> CurrentKey.encryptionKey
).asJava
)
val cryptor = new CompositeOneWayCrypto(baseConfigKey, config)
val encrypted = cryptor.hash(PlainText("myPassword"))
cryptor.verify(PlainText("myPassword"), encrypted) should be(true)
}
}
"Constructing a one way encrypter without current or previous keys" should {
"throw a SecurityException on construction" in {
intercept[SecurityException] {
new CompositeOneWayCrypto(baseConfigKey, ConfigFactory.empty)
}
}
}
}
| hmrc/crypto | src/test/scala/uk/gov/hmrc/crypto/CompositeOneWayCryptoSpec.scala | Scala | apache-2.0 | 2,146 |
/****************************************
* FACADE FOR GOOGLE CHARTS *
****************************************/
package aleastchs.googleCharts.helpers.generalHelp
import scala.scalajs.js
@js.native
trait GoogleChartTrait extends js.Object {
// a div element where the chart should be drawn
val element: js.Dynamic = js.native
// Draws the chart
def draw(data: DataTableAPI, options: js.Object): Unit = js.native
// Clears the chart, and releases all of its allocated resources
def clearChart(): Unit = js.native
// Returns an array of the selected chart entities
def getSelection(): js.Array[js.Any] = js.native
}
object GoogleChart {
def apply(
element: js.Dynamic
) = js.Dynamic.literal(
element = element
)
} | aleastChs/scalajs-google-charts | src/main/scala/aleastchs/googleCharts/helpers/generalHelp/GoogleChart.scala | Scala | mit | 781 |
package com.twitter.finatra.http.request
import com.twitter.finagle.http.{MapHeaderMap, Request}
import com.twitter.finatra.http.HttpHeaders
import com.twitter.finatra.http.exceptions.{BadRequestException, NotAcceptableException}
import com.twitter.finatra.request.ContentType
import com.twitter.inject.{Mockito, Test}
class RequestUtilsTest
extends Test
with Mockito {
"RequestUtils" should {
"throw BadRequestException when missing host header for pathUrl" in {
val request = smartMock[Request]
request.headerMap returns MapHeaderMap()
request.host returns None
intercept[BadRequestException] {
RequestUtils.pathUrl(request)
}
}
"pathUrl with slash" in {
val request = smartMock[Request]
request.headerMap returns MapHeaderMap()
request.host returns Some("www.foo.com")
request.path returns "/part/next/end/"
val result = RequestUtils.pathUrl(request)
result should equal("http://www.foo.com/part/next/end/")
}
"respondTo */* content type in request when no accept header is sent" in {
val request = smartMock[Request]
request.headerMap returns MapHeaderMap()
val response = RequestUtils.respondTo(request) {
case _ => true
}
response should be(true)
}
"respondTo text/html content-type in request" in {
val request = smartMock[Request]
request.headerMap returns MapHeaderMap(HttpHeaders.Accept -> ContentType.HTML.toString)
val response = RequestUtils.respondTo(request) {
case ContentType.HTML => true
case _ => false
}
response should be(true)
}
"respondTo application/json content-type in request" in {
val request = smartMock[Request]
request.headerMap returns MapHeaderMap(HttpHeaders.Accept -> ContentType.JSON.toString)
val response = RequestUtils.respondTo(request) {
case ContentType.JSON => true
case _ => false
}
response should be(true)
}
"return NotAcceptableException for request" in {
val request = smartMock[Request]
// accept application/json
request.headerMap returns MapHeaderMap(HttpHeaders.Accept -> ContentType.JSON.toString)
intercept[NotAcceptableException] {
// only handle text/html
RequestUtils.respondTo(request) {
case ContentType.HTML => true
}
}
}
}
}
| joecwu/finatra | http/src/test/scala/com/twitter/finatra/http/request/RequestUtilsTest.scala | Scala | apache-2.0 | 2,430 |
/*
* Copyright 2015-2016 IBM Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package whisk.core.entitlement
import scala.util.Try
import spray.json.DeserializationException
import spray.json.JsString
import spray.json.JsValue
import spray.json.RootJsonFormat
/** An enumeration of privileges available to subjects. */
protected[core] object Privilege extends Enumeration {
type Privilege = Value
val READ, PUT, DELETE, ACTIVATE, REJECT = Value
val CRUD = Set(READ, PUT, DELETE)
val ALL = CRUD + ACTIVATE
implicit val serdes = new RootJsonFormat[Privilege] {
def write(p: Privilege) = JsString(p.toString)
def read(json: JsValue) = Try {
val JsString(str) = json
Privilege.withName(str.trim.toUpperCase)
} getOrElse {
throw new DeserializationException("Privilege must be a valid string")
}
}
}
| CrowdFlower/incubator-openwhisk | common/scala/src/main/scala/whisk/core/entitlement/Privilege.scala | Scala | apache-2.0 | 1,417 |
package gui
import java.awt.BorderLayout
import java.awt.event.{ActionEvent, ActionListener, WindowEvent, WindowListener}
import java.io.IOException
import javax.swing._
import connections.ConnectionManager
import connections.network.NetworkDeviceManager
import connections.usb.{Adb, UsbDeviceManager}
import slide.{Device, FileManager, SystemInfo}
import enums.ConnectionMode
import gui.ImageIcons
object Frame extends JFrame with WindowListener {
{
/** Setup interface */
UIManager.setLookAndFeel(UIManager.getSystemLookAndFeelClassName)
this.setTitle("Slide")
this.setResizable(false)
this.addWindowListener(this)
this.setBounds(100, 100, 0, 0)
this.setDefaultCloseOperation(WindowConstants.EXIT_ON_CLOSE)
val menuBar = new MenuBar{
override def showAdb() = new Console().runAdbProcess(Adb.adbDevices())
override def restartAdb() = Adb.restartAdb()
}
this.setJMenuBar(menuBar)
if (!SystemInfo.isNetworkIsAvailable)
showErrorPrompt("Error", "No suitable network interface found.\\nWiFi connection will be unavailable.")
}
/** Can implement onDownloadStart, and onDownloadFinish */
private val fm: FileManager = new FileManager {}
/** Initialize ADB */
Adb.init(fm)
/**
* Field where device information will appear.
*/
private val deviceField: DeviceField = new DeviceField(() => super.pack(),
new ActionListener {
override def actionPerformed(e: ActionEvent): Unit = {
if (ConnectionManager.hasConnection(ConnectionMode.USB) || ConnectionManager.multipleConnections) {
try {
usbMan.connect("localhost")
}
catch {
case e@(_: IOException | _: NullPointerException) =>
showErrorPrompt("Error", "Could not connect over USB.\\nCheck if your device is listed by pressing Alt+A")
}
}
else if (ConnectionManager.hasConnection(ConnectionMode.WIFI)) {
try {
networkMan.connect(networkMan.ip)
} catch {
case e@(_: IOException | _: NullPointerException) =>
showErrorPrompt("Error", "Could not connect over LAN.\\nPlease disable any interfering software and try again.")
}
}
}
})
/** Adds device field to container */
this.getContentPane.add(deviceField, BorderLayout.CENTER)
/** Controls USB connections. */
private val usbMan: UsbDeviceManager = new UsbDeviceManager {
override def onUsbConnectionAdded(): Unit = {
onConnectionAdded(device, ConnectionMode.USB)
}
override def onUsbConnectionRemoved(): Unit = {
onConnectionRemoved(device, ConnectionMode.USB)
}
override def throwError(message: String): Unit = {
showErrorPrompt("Error", message)
}
}
/** Controls WiFi connections. */
private val networkMan: NetworkDeviceManager = new NetworkDeviceManager {
override def onWifiConnectionAdded(): Unit = {
onConnectionAdded(device, ConnectionMode.WIFI)
}
override def onWifiConnectionRemoved(): Unit = {
onConnectionRemoved(device, ConnectionMode.WIFI)
}
override def throwError(message: String): Unit = {
showErrorPrompt("Error", message)
}
}
/**
* Called when a device is connected.
* @param device Device information.
* @param connectionMode Mode of connection.
*/
private def onConnectionAdded(device: Device, connectionMode: ConnectionMode): Unit = {
ConnectionManager.addConnection(connectionMode)
if (ConnectionManager.multipleConnections) {
device.icon = ImageIcons.usbIcon
} else {
connectionMode match {
case ConnectionMode.USB =>
device.icon = ImageIcons.usbIcon
case ConnectionMode.WIFI =>
device.icon = ImageIcons.wifiIcon
}
}
deviceField.show()
deviceField.setUi(device)
pack()
}
/**
* Called when a device is removed.
* @param device Device information
* @param connectionMode Mode of connection
*/
private def onConnectionRemoved(device: Device, connectionMode: ConnectionMode): Unit = {
if (ConnectionManager.multipleConnections) {
deviceField.showDeviceField(visibility = true)
connectionMode match {
case ConnectionMode.USB =>
device.icon = ImageIcons.wifiIcon
case ConnectionMode.WIFI =>
device.icon = ImageIcons.usbIcon
}
} else {
deviceField.showDeviceField(visibility = false)
}
ConnectionManager.removeConnection(connectionMode)
if (device == null) {
deviceField.showDeviceField(visibility = false)
} else {
deviceField.setUi(device)
}
pack()
}
// Start managers
networkMan.startBackgroundScanner()
usbMan.startBackgroundScanner()
/**
* Displays error prompt.
* @param title Title of the dialog.
* @param message Contents of the error message.
*/
def showErrorPrompt(title: String, message: String): Unit = {
val errorMessage: ErrorMessage = new ErrorMessage(this, "Connection Error")
errorMessage.title = title
errorMessage.message = message
errorMessage.showDialog()
}
override def windowOpened(e: WindowEvent): Unit = super.pack()
override def windowDeiconified(e: WindowEvent): Unit = {}
override def windowClosing(e: WindowEvent): Unit = {}
override def windowClosed(e: WindowEvent): Unit = {}
override def windowActivated(e: WindowEvent): Unit = {}
override def windowDeactivated(e: WindowEvent): Unit = {}
override def windowIconified(e: WindowEvent): Unit = {}
}
| LorenK96/slide-desktop | src/main/scala/gui/Frame.scala | Scala | gpl-2.0 | 6,252 |
/**
* Licensed to Big Data Genomics (BDG) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The BDG licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.bdgenomics.adam.rich
import org.bdgenomics.adam.models.{ ReferencePosition, TagType, Attribute }
import org.bdgenomics.adam.rich.RichAlignmentRecord._
import org.bdgenomics.formats.avro.{ AlignmentRecord, Contig }
import org.scalatest.FunSuite
import org.scalatest.exceptions.TestFailedException
class RichAlignmentRecordSuite extends FunSuite {
test("referenceLengthFromCigar") {
assert(referenceLengthFromCigar("3M") === 3)
assert(referenceLengthFromCigar("30M") === 30)
assert(referenceLengthFromCigar("10Y") === 0) // should abort when it hits an illegal operator
assert(referenceLengthFromCigar("10M1Y") === 10) // same
assert(referenceLengthFromCigar("10M1I10M") === 20)
assert(referenceLengthFromCigar("10M1D10M") === 21)
assert(referenceLengthFromCigar("1S10M1S") === 10)
}
test("Unclipped Start") {
val recordWithoutClipping = AlignmentRecord.newBuilder().setReadMapped(true).setCigar("10M").setStart(42L).setEnd(52L).build()
val recordWithClipping = AlignmentRecord.newBuilder().setReadMapped(true).setCigar("2S8M").setStart(42L).setEnd(50L).build()
val recordWithHardClipping = AlignmentRecord.newBuilder().setReadMapped(true).setCigar("3H2S5M4S").setStart(42L).setEnd(47L).build()
assert(recordWithoutClipping.unclippedStart == 42L)
assert(recordWithClipping.unclippedStart == 40L)
assert(recordWithHardClipping.unclippedStart == 37L)
}
test("Unclipped End") {
val unmappedRead = AlignmentRecord.newBuilder().setReadMapped(false).setStart(0L).setCigar("10M").setEnd(10L).build()
val recordWithoutClipping = AlignmentRecord.newBuilder().setReadMapped(true).setCigar("10M").setStart(10L).setEnd(20L).build()
val recordWithClipping = AlignmentRecord.newBuilder().setReadMapped(true).setCigar("8M2S").setStart(10L).setEnd(18L).build()
val recordWithHardClipping = AlignmentRecord.newBuilder().setReadMapped(true).setCigar("6M2S2H").setStart(10L).setEnd(16L).build()
assert(recordWithoutClipping.unclippedEnd == 20L)
assert(recordWithClipping.unclippedEnd == 20L)
assert(recordWithHardClipping.unclippedEnd == 20L)
}
test("Illumina Optics") {
val nonIlluminaRecord = AlignmentRecord.newBuilder().setReadName("THISISNOTILLUMINA").build()
assert(nonIlluminaRecord.illuminaOptics == None)
val illuminaRecord = AlignmentRecord.newBuilder().setReadName("613F0AAXX100423:4:86:16767:3088").build()
illuminaRecord.illuminaOptics match {
case Some(optics) =>
assert(optics.tile == 86)
assert(optics.x == 16767)
assert(optics.y == 3088)
case None => throw new TestFailedException("Failed to parse valid Illumina read name", 4)
}
}
test("Cigar Clipping Sequence") {
val contig = Contig.newBuilder.setContigName("chr1").build
val softClippedRead = AlignmentRecord.newBuilder().setReadMapped(true).setStart(100).setCigar("10S90M").setContig(contig).build()
assert(softClippedRead.referencePositions(0).map(_.pos) == Some(90L))
}
test("tags contains optional fields") {
val contig = Contig.newBuilder.setContigName("chr1").build
val rec = AlignmentRecord.newBuilder().setAttributes("XX:i:3\\tYY:Z:foo").setContig(contig).build()
assert(rec.tags.size === 2)
assert(rec.tags(0) === Attribute("XX", TagType.Integer, 3))
assert(rec.tags(1) === Attribute("YY", TagType.String, "foo"))
}
test("Reference Positions") {
val contig = Contig.newBuilder.setContigName("chr1").build
val hardClippedRead = AlignmentRecord.newBuilder().setReadMapped(true).setStart(1000).setCigar("90M10H").setContig(contig).build()
assert(hardClippedRead.referencePositions.length == 90)
assert(hardClippedRead.referencePositions(0).map(_.pos) == Some(1000L))
val softClippedRead = AlignmentRecord.newBuilder().setReadMapped(true).setStart(1000).setCigar("10S90M").setContig(contig).build()
assert(softClippedRead.referencePositions.length == 100)
assert(softClippedRead.referencePositions(0).map(_.pos) == Some(990L))
assert(softClippedRead.referencePositions(10).map(_.pos) == Some(1000L))
val doubleMatchNonsenseRead = AlignmentRecord.newBuilder().setReadMapped(true).setStart(1000).setCigar("10M10M").setContig(contig).build()
Range(0, 20).foreach(i => assert(doubleMatchNonsenseRead.referencePositions(i).map(_.pos) == Some(1000 + i)))
val deletionRead = AlignmentRecord.newBuilder().setReadMapped(true).setStart(1000).setCigar("5M5D10M").setContig(contig).build()
assert(deletionRead.referencePositions.length == 15)
assert(deletionRead.referencePositions(0).map(_.pos) == Some(1000L))
assert(deletionRead.referencePositions(5).map(_.pos) == Some(1010L))
val insertionRead = AlignmentRecord.newBuilder().setReadMapped(true).setStart(1000).setCigar("10M2I10M").setContig(contig).build()
assert(insertionRead.referencePositions.length == 22)
assert(insertionRead.referencePositions(0).map(_.pos) == Some(1000L))
assert(insertionRead.referencePositions(10).map(_.pos) == None)
assert(insertionRead.referencePositions(12).map(_.pos) == Some(1010L))
val indelRead = AlignmentRecord.newBuilder().setReadMapped(true).setStart(1000).setCigar("10M3D10M2I").setContig(contig).build()
assert(indelRead.referencePositions.length == 22)
assert(indelRead.referencePositions(0).map(_.pos) == Some(1000L))
assert(indelRead.referencePositions(10).map(_.pos) == Some(1013L))
assert(indelRead.referencePositions(20).map(_.pos) == None)
val hg00096read = AlignmentRecord.newBuilder().setReadMapped(true).setStart(1000).setCigar("1S28M1D32M1I15M1D23M").setContig(contig).build()
assert(hg00096read.referencePositions.length == 100)
assert(hg00096read.referencePositions(0).map(_.pos) == Some(999L))
assert(hg00096read.referencePositions(1).map(_.pos) == Some(1000L))
assert(hg00096read.referencePositions(29).map(_.pos) == Some(1029L))
assert(hg00096read.referencePositions(61).map(_.pos) == None)
assert(hg00096read.referencePositions(62).map(_.pos) == Some(1061L))
assert(hg00096read.referencePositions(78).map(_.pos) == Some(1078L))
assert(hg00096read.referencePositions(99).map(_.pos) == Some(1099L))
}
test("read overlap unmapped read") {
val unmappedRead = AlignmentRecord.newBuilder().setReadMapped(false).setStart(0L).setCigar("10M").setEnd(10L).build()
val overlaps = unmappedRead.overlapsReferencePosition(ReferencePosition("chr1", 10))
assert(!overlaps)
}
test("read overlap reference position") {
val contig = Contig.newBuilder.setContigName("chr1").build
val record = RichAlignmentRecord(AlignmentRecord.newBuilder().setReadMapped(true).setCigar("10M").setStart(10L).setEnd(20L).setContig(contig).build())
assert(record.overlapsReferencePosition(ReferencePosition("chr1", 10)) == true)
assert(record.overlapsReferencePosition(ReferencePosition("chr1", 14)) == true)
assert(record.overlapsReferencePosition(ReferencePosition("chr1", 19)) == true)
assert(record.overlapsReferencePosition(ReferencePosition("chr1", 20)) == false)
}
test("read overlap same position different contig") {
val contig = Contig.newBuilder.setContigName("chr1").build
val record = RichAlignmentRecord(AlignmentRecord.newBuilder().setReadMapped(true).setCigar("10M").setStart(10L).setEnd(20L).setContig(contig).build())
assert(record.overlapsReferencePosition(ReferencePosition("chr2", 10)) == false)
}
}
| VinACE/adam | adam-core/src/test/scala/org/bdgenomics/adam/rich/RichAlignmentRecordSuite.scala | Scala | apache-2.0 | 8,260 |
package com.sksamuel.elastic4s.http.explain
import com.sksamuel.elastic4s.explain.ExplainRequest
import com.sksamuel.elastic4s.http.search.queries.QueryBuilderFn
import com.sksamuel.elastic4s.json.{XContentBuilder, XContentFactory}
object ExplainBodyFn {
def apply(v: ExplainRequest): XContentBuilder = {
val builder = XContentFactory.jsonBuilder()
builder.rawField("query", QueryBuilderFn(v.query.get))
builder.endObject()
}
}
| Tecsisa/elastic4s | elastic4s-http/src/main/scala/com/sksamuel/elastic4s/http/explain/ExplainBodyFn.scala | Scala | apache-2.0 | 446 |
package lila
package object importer extends PackageObject with WithPlay
| bjhaid/lila | modules/importer/src/main/package.scala | Scala | mit | 75 |
/* sbt -- Simple Build Tool
* Copyright 2008 David MacIver, Mark Harrah
*/
package sbt
import scala.collection._
object ReflectUtilities {
/** Converts the camelCase String `name` to lowercase separated by `separator`. */
def transformCamelCase(name: String, separator: Char): String =
{
val buffer = new StringBuilder
for (char <- name) {
import java.lang.Character._
if (isUpperCase(char)) {
buffer += separator
buffer += toLowerCase(char)
} else
buffer += char
}
buffer.toString
}
def ancestry(clazz: Class[_]): List[Class[_]] =
if (clazz == classOf[AnyRef] || !classOf[AnyRef].isAssignableFrom(clazz)) List(clazz)
else clazz :: ancestry(clazz.getSuperclass);
def fields(clazz: Class[_]) =
mutable.OpenHashMap(ancestry(clazz).
flatMap(_.getDeclaredFields).
map(f => (f.getName, f)): _*)
/**
* Collects all `val`s of type `T` defined on value `self`.
* The returned Map maps the name of each `val` to its value.
* This depends on scalac implementation details to determine what is a `val` using only Java reflection.
*/
def allValsC[T](self: AnyRef, clazz: Class[T]): immutable.SortedMap[String, T] =
{
var mappings = new immutable.TreeMap[String, T]
val correspondingFields = fields(self.getClass)
for (method <- self.getClass.getMethods) {
if (method.getParameterTypes.isEmpty && clazz.isAssignableFrom(method.getReturnType)) {
for (field <- correspondingFields.get(method.getName) if field.getType == method.getReturnType) {
val value = method.invoke(self).asInstanceOf[T]
if (value == null) throw new UninitializedVal(method.getName, method.getDeclaringClass.getName)
mappings += ((method.getName, value))
}
}
}
mappings
}
/**
* Collects all `val`s of type `T` defined on value `self`.
* The returned Map maps the name of each `val` to its value.
* This requires an available `Manifest` for `T` and depends on scalac implementation details to determine
* what is a `val` using only Java reflection.
*/
def allVals[T](self: AnyRef)(implicit mt: scala.reflect.Manifest[T]): immutable.SortedMap[String, T] =
allValsC(self, mt.runtimeClass).asInstanceOf[immutable.SortedMap[String, T]]
}
/** An exception to indicate that while traversing the `val`s for an instance of `className`, the `val` named `valName` was `null`. */
final class UninitializedVal(val valName: String, val className: String) extends RuntimeException("val " + valName + " in class " + className + " was null.\\nThis is probably an initialization problem and a 'lazy val' should be used.") | jasonchaffee/sbt | util/classpath/src/main/scala/sbt/ReflectUtilities.scala | Scala | bsd-3-clause | 2,735 |
package scala.models
import com.bryzek.apidoc.generator.v0.models.InvocationForm
import com.bryzek.apidoc.spec.v0.models.Method
import scala.generator.{ScalaOperation, ScalaResource, ScalaService}
import org.scalatest.{ShouldMatchers, FunSpec}
class Play2RouteGeneratorSpec extends FunSpec with ShouldMatchers {
def getScalaResource(ssd: ScalaService, plural: String): ScalaResource = {
ssd.resources.find(_.plural == plural).getOrElse {
sys.error(s"Could not find $plural resource. Available resources: " + ssd.resources.map(_.plural).mkString(", "))
}
}
def getScalaMethod(ssd: ScalaService, resourcePlural: String, method: Method, path: String): ScalaOperation = {
val resource = getScalaResource(ssd, resourcePlural)
resource.operations.filter { op => op.method == method && op.path == path }.headOption.getOrElse {
val errorMsg = s"Operations found for $resourcePlural\n" + resource.operations.map { op =>
"%s %s".format(op.method, op.path)
}.mkString("\n")
sys.error(s"Failed to find method[$method] with path[$path] for resource[$resourcePlural]\n$errorMsg")
}
}
it("service with no operations") {
val service = models.TestHelper.service(models.TestHelper.buildJson("""
"imports": [],
"headers": [],
"info": [],
"models": [],
"enums": [],
"unions": [],
"models": [],
"resources": []
"""))
Play2RouteGenerator(InvocationForm(service)).invoke() match {
case Left(errors) => errors.mkString(", ") should be("Service does not have any resource operations")
case Right(code) => fail("expected error when generating routes for a service with no operations")
}
}
describe("with reference-api service") {
lazy val service = models.TestHelper.referenceApiService
lazy val ssd = new ScalaService(service)
it("normalizes explicit paths that match resource name") {
val resource = getScalaResource(ssd, "Organizations")
val op = getScalaMethod(ssd, "Organizations", Method.Get, "/organizations")
val r = Play2Route(ssd, op, resource)
r.method should be("controllers.Organizations.get")
}
it("enums are strongly typed") {
val resource = getScalaResource(ssd, "Users")
val op = getScalaMethod(ssd, "Users", Method.Get, "/users/:age_group")
val r = Play2Route(ssd, op, resource)
r.method should be("controllers.Users.getByAgeGroup")
r.params.mkString("") should be("age_group: com.bryzek.apidoc.reference.api.v0.models.AgeGroup")
}
it("supports multiple query parameters") {
val echoResource = getScalaResource(ssd, "Echoes")
val op = getScalaMethod(ssd, "Echoes", Method.Get, "/echoes")
val r = Play2Route(ssd, op, echoResource)
r.method should be("controllers.Echoes.get")
r.params.mkString(", ") should be("foo: _root_.scala.Option[String], optional_messages: _root_.scala.Option[List[String]], required_messages: List[String]")
Play2RouteGenerator(InvocationForm(service)).invoke() match {
case Left(errors) => fail(errors.mkString(", "))
case Right(sourceFiles) => {
sourceFiles.size shouldBe 1
models.TestHelper.assertEqualsFile(
"/generators/play-2-route-reference-api.routes",
sourceFiles.head.contents
)
}
}
}
it("camel cases hypen in route") {
val echoResource = getScalaResource(ssd, "Echoes")
val op = getScalaMethod(ssd, "Echoes", Method.Get, "/echoes/arrays-only")
val r = Play2Route(ssd, op, echoResource)
r.method should be("controllers.Echoes.getArraysOnly")
}
}
describe("with quality service example") {
lazy val quality = ScalaService(models.TestHelper.parseFile("/examples/quality.json"))
it("correctly orders parameters defined in path and parameters") {
val op = getScalaMethod(quality, "Teams", Method.Get, "/:org/teams/:key")
op.parameters.map(_.name) should be(Seq("org", "key"))
op.parameters.map(_.`type`.name) should be(Seq("string", "string"))
}
}
describe("with apidoc service") {
lazy val service = models.TestHelper.apidocApiService
lazy val ssd = new ScalaService(service)
describe("users resource") {
lazy val userResource = getScalaResource(ssd, "Users")
it("GET w/ default path, parameters") {
val op = userResource.operations.filter { op => op.method == Method.Get && op.path == "/users" }.head
val r = Play2Route(ssd, op, userResource)
r.verb should be(Method.Get)
r.url should be("/users")
r.method should be("controllers.Users.get")
r.params.mkString(", ") should be("guid: _root_.scala.Option[_root_.java.util.UUID], email: _root_.scala.Option[String], token: _root_.scala.Option[String]")
}
it("GET w/ path, guid path param, no additional parameters") {
val op = userResource.operations.filter { op => op.method == Method.Get && op.path == "/users/:guid" }.head
val r = Play2Route(ssd, op, userResource)
r.verb should be(Method.Get)
r.url should be("/users/:guid")
r.method should be("controllers.Users.getByGuid")
r.params.mkString(", ") should be("guid: _root_.java.util.UUID")
}
it("POST w/ default path, no parameters") {
val op = userResource.operations.filter { op => op.method == Method.Post && op.path == "/users" }.head
val r = Play2Route(ssd, op, userResource)
r.verb should be(Method.Post)
r.url should be("/users")
r.method should be("controllers.Users.post")
r.params.mkString(", ") should be("")
}
it("PUT w/ guid in path, no parameters") {
val op = userResource.operations.filter { op => op.method == Method.Put && op.path == "/users/:guid" }.head
val r = Play2Route(ssd, op, userResource)
r.verb should be(Method.Put)
r.url should be("/users/:guid")
r.method should be("controllers.Users.putByGuid")
r.params.mkString(", ") should be("guid: _root_.java.util.UUID")
}
}
describe("membership_request resource") {
lazy val membershipRequestResource = getScalaResource(ssd, "MembershipRequests")
it("POST /membership_requests/:guid/accept") {
val op = membershipRequestResource.operations.filter { op => op.method == Method.Post && op.path == "/membership_requests/:guid/accept" }.head
val r = Play2Route(ssd, op, membershipRequestResource)
r.verb should be(Method.Post)
r.url should be("/membership_requests/:guid/accept")
r.method should be("controllers.MembershipRequests.postAcceptByGuid")
r.params.mkString(", ") should be("guid: _root_.java.util.UUID")
}
}
describe("application resource") {
it("GET /:orgKey") {
val resource = getScalaResource(ssd, "Applications")
val op = getScalaMethod(ssd, "Applications", Method.Get, "/:orgKey")
val r = Play2Route(ssd, op, resource)
r.method should be("controllers.Applications.getByOrgKey")
}
}
}
}
| krschultz/apidoc-generator | scala-generator/src/test/scala/models/Play2RouteGeneratorSpec.scala | Scala | mit | 7,128 |
/*
* Copyright 2014–2017 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.fs
import slamdata.Predef._
import quasar.{BackendCapability, BackendRef}
import quasar.contrib.pathy._
import quasar.fp.free, free._
import scalaz._
import scalaz.concurrent.Task
final case class SupportedFs[S[_]](
ref: BackendRef,
impl: Option[FileSystemUT[S]],
implNonChrooted: Option[FileSystemUT[S]] = None
) {
def liftIO: SupportedFs[Coproduct[Task, S, ?]] =
this.copy(impl = impl.map(_.liftIO), implNonChrooted = implNonChrooted.map(_.liftIO))
}
/** FileSystem Under Test
*
* @param ref description of the filesystem
* @param testInterp an interpreter of the filesystem into the `Task` monad
* @param setupInterp a second interpreter which has the ability to insert
* and otherwise write to the filesystem, even if `testInterp` does not
* @param testDir a directory in the filesystem tests may use for temp data
* @param close an effect to clean up any resources created when the
* interpreters are used. Need not be called if no interpreted effect was
* ever run, but it's safe to call it either way.
*/
final case class FileSystemUT[S[_]](
ref: BackendRef,
testInterp: S ~> Task,
setupInterp: S ~> Task,
testDir: ADir,
close: Task[Unit]
) {
type F[A] = Free[S, A]
def supports(bc: BackendCapability): Boolean =
ref supports bc
def liftIO: FileSystemUT[Coproduct[Task, S, ?]] =
FileSystemUT(
ref,
NaturalTransformation.refl[Task] :+: testInterp,
NaturalTransformation.refl[Task] :+: setupInterp,
testDir,
close)
def contramap[T[_]](f: T ~> S): FileSystemUT[T] =
FileSystemUT(ref, testInterp compose f, setupInterp compose f, testDir, close)
def contramapF[T[_]](f: T ~> Free[S, ?]): FileSystemUT[T] =
FileSystemUT(
ref,
foldMapNT(testInterp) compose f,
foldMapNT(setupInterp) compose f,
testDir,
close)
val testInterpM: F ~> Task = foldMapNT(testInterp)
val setupInterpM: F ~> Task = foldMapNT(setupInterp)
}
| drostron/quasar | it/src/test/scala/quasar/fs/FileSystemUT.scala | Scala | apache-2.0 | 2,605 |
package scala.pickling.internal
import scala.collection.concurrent.TrieMap
import scala.collection.mutable
import scala.pickling.spi.PicklerRegistry._
import scala.reflect.runtime.universe.Mirror
import scala.pickling._
import scala.pickling.runtime.CustomRuntime
import scala.pickling.spi.{PicklerRegistry, RuntimePicklerGenerator}
import scala.pickling.util.ClassMapper
/** Default pickle registry just uses TrieMaps and delegates behavior to a runtime pickler generator. */
final class DefaultPicklerRegistry(generator: RuntimePicklerGenerator)
extends PicklerRegistry with CustomRuntime {
type PicklerGenerator = FastTypeTag[_] => Pickler[_]
type UnpicklerGenerator = FastTypeTag[_] => Unpickler[_]
private val picklerMap: mutable.Map[String, Pickler[_]] = new TrieMap[String, Pickler[_]]
private val picklerGenMap: mutable.Map[String, PicklerGenerator] = new TrieMap[String, PicklerGenerator]
private val unpicklerMap: mutable.Map[String, Unpickler[_]] = new TrieMap[String, Unpickler[_]]
private val unpicklerGenMap: mutable.Map[String, UnpicklerGenerator] = new TrieMap[String, UnpicklerGenerator]
val tupleGenerators: (PicklerGen[Any], UnpicklerGen[Any]) =
(tuplePicklerGenerator.asInstanceOf[PicklerGen[Any]],
tupleUnpicklerGenerator.asInstanceOf[UnpicklerGen[Any]])
val templatesToRegister =
Vector("scala.Tuple2" -> tupleGenerators) ++
ClassMapper.specializedTupleNamesFor("scala.Tuple2")
.map(_ -> tupleGenerators)
registerTemplatesAtInit(templatesToRegister)
override def genUnpickler(mirror: Mirror, tagKey: String)(implicit share: refs.Share): Unpickler[_] = {
lookupUnpickler(tagKey) match {
case Some(p) => p
case None =>
// TODO - This should probably just be taking the `tagKey` and no mirror or share, the mirror/share
// should be configured by the default runtime.
val p = generator.genUnpickler(mirror, tagKey)
registerUnpickler(tagKey, p)
p
}
}
def genPickler(classLoader: ClassLoader, clazz: Class[_], tag: FastTypeTag[_])(implicit share: refs.Share): Pickler[_] = {
lookupPickler(tag.key) match {
case Some(p) => p
case None =>
// TODO - genPickler should probably just be using the tag and `currentMirror` of internal.
val p = generator.genPickler(classLoader, clazz, tag)
registerPickler(tag.key, p)
p
}
}
/** Registers a pickler with this registry for future use. */
override def registerPickler[T](key: String, p: Pickler[T]): Unit =
picklerMap.put(key, p)
/** Registers an unpickler with this registry for future use. */
override def registerUnpickler[T](key: String, p: Unpickler[T]): Unit =
unpicklerMap.put(key, p)
override private[pickling] def clearRegisteredPicklerUnpicklerFor[T: FastTypeTag]: Unit = {
val tag = implicitly[FastTypeTag[T]]
picklerMap -= tag.key
unpicklerMap -= tag.key
}
override val isLookupEnabled = true
/** Checks the existence of a pickler ignoring the registered generators. */
override def lookupExistingPickler(key: String): Option[Pickler[_]] =
picklerMap.get(key)
/** Checks the existence of an unpickler ignoring the registered generators. */
override def lookupExistingUnpickler(key: String): Option[Unpickler[_]] =
unpicklerMap.get(key)
/** Checks the existence of an unpickler. */
override def lookupUnpickler(key: String): Option[Unpickler[_]] = {
unpicklerMap.get(key) match {
case x: Some[Unpickler[_]] => x
case None =>
// Now we use the typeConstructor registry
FastTypeTag(key) match {
case a @ FastTypeTag(typename, args) =>
unpicklerGenMap.get(typename) match {
case Some(gen) =>
// Generate, register and return a pickler
val up = gen(a)
registerUnpickler(key, up)
Some(up)
case None => None
}
case _ => None // This key is not an applied type.
}
}
}
/** Looks for a pickler with the given FastTypeTag string. */
override def lookupPickler(key: String): Option[Pickler[_]] = {
picklerMap.get(key) match {
case x: Some[Pickler[_]] => x
case None =>
FastTypeTag(key) match {
case a @ FastTypeTag(typename, _) =>
picklerGenMap.get(typename) match {
case Some(gen) =>
// Genereate the pickler, register it with ourselves for future lookup, and return it.
val up = gen(a)
registerPickler(key, up)
Some(up)
case None => None
}
case _ => None // This key is not an applied type.
}
}
}
/** Registers a pickler and unpickler for a type with this registry for future use.
*
* @param key The type key for the pickler.
* Note: In reflective scenarios this may not include type
* parameters. In those situations, the pickler should be able
* to handle arbitrary (existential) type parameters.
* @param p The unpickler to register.
*/
override def registerPicklerUnpickler[T](key: String, p: (Pickler[T] with Unpickler[T])): Unit = {
registerPickler(key, p)
registerUnpickler(key, p)
}
/** Registers a function which can generate picklers for a given type constructor.
*
* @param typeConstructorKey The type constructor. e.g. "scala.List" for something that can make scala.List[A] picklers.
* @param generator A function which takes an applied type string (your type + arguments) and returns a pickler for
* this type.
*/
override def registerUnpicklerGenerator[T](typeConstructorKey: String, generator: UnpicklerGen[T]): Unit =
unpicklerGenMap.put(typeConstructorKey, generator)
/** Registers a function which can generate picklers for a given type constructor.
*
* @param typeConstructorKey The type constructor. e.g. "scala.List" for something that can make scala.List[A] picklers.
* @param generator A function which takes an applied type string (your type + arguments) and returns a pickler for
* this type.
*/
override def registerPicklerGenerator[T](typeConstructorKey: String, generator: PicklerGen[T]): Unit =
picklerGenMap.put(typeConstructorKey, generator)
/** Registers a function which can generate picklers for a given type constructor.
*
* @param typeConstructorKey The type constructor. e.g. "scala.List" for something that can make scala.List[A] picklers.
* @param generator A function which takes an applied type string (your type + arguments) and returns a pickler for
* this type.
*/
override def registerPicklerUnpicklerGenerator[T](typeConstructorKey: String, generator: PicklerUnpicklerGen[T]): Unit = {
registerPicklerGenerator(typeConstructorKey, generator)
registerUnpicklerGenerator(typeConstructorKey, generator)
}
/** Transfer the "state" between different [[scala.pickling.spi.PicklingRuntime]]s.
*
* Watch out, this operation is not thread-safe.
*
* Make a new [[scala.pickling.spi.PicklingRuntime]] aware of
* the already registered [[Pickler]]s and [[Unpickler]]s present
* in the one that will be replaced.
*/
private[pickling] def dumpStateTo(r: PicklerRegistry): Unit = {
for(p <- picklerMap) r.registerPickler(p._1, p._2.asInstanceOf[Pickler[Any]])
for(p <- picklerGenMap) r.registerPicklerGenerator(p._1, p._2.asInstanceOf[PicklerGen[Any]])
for(u <- unpicklerMap) r.registerUnpickler(u._1, u._2.asInstanceOf[Unpickler[Any]])
for(u <- unpicklerGenMap) r.registerUnpicklerGenerator(u._1, u._2.asInstanceOf[UnpicklerGen[Any]])
}
}
| scala/pickling | core/src/main/scala/scala/pickling/internal/DefaultPicklerRegistry.scala | Scala | bsd-3-clause | 7,824 |
//inner class
class ReturnSeveralOutput1 {
def foo(i: Int): Int = {
/*start*/
val x = i
if (true) return x
val y = "a"
val z = 1
val zz = "1"
/*end*/
println(x + y + z + zz)
i
}
}
/*
//inner class
class ReturnSeveralOutput1 {
def foo(i: Int): Int = {
val testMethodNameResult: TestMethodNameResult = testMethodName(i) match {
case Left(toReturn) => return toReturn
case Right(result) => result
}
val x: Int = testMethodNameResult.x
val y: String = testMethodNameResult.y
val z: Int = testMethodNameResult.z
val zz: String = testMethodNameResult.zz
println(x + y + z + zz)
i
}
class TestMethodNameResult(val x: Int, val y: String, val z: Int, val zz: String)
def testMethodName(i: Int): Either[Int, TestMethodNameResult] = {
val x = i
if (true) return Left(x)
val y = "a"
val z = 1
val zz = "1"
Right(new TestMethodNameResult(x, y, z, zz))
}
}
*/ | ilinum/intellij-scala | testdata/extractMethod/innerClass/ReturnSeveralOutput1.scala | Scala | apache-2.0 | 970 |
package domala.tests.entity
import domala._
import domala.jdbc.Config
import domala.tests.{TestConfig, holder}
import org.scalatest.{BeforeAndAfter, FunSuite}
import domala.jdbc.Result
class ValueTypeTestSuite extends FunSuite with BeforeAndAfter {
implicit val config: Config = TestConfig
val dao: ValueTypeTestDao = ValueTypeTestDao.impl
before {
Required {
dao.create()
}
}
after {
Required {
dao.drop()
}
}
test("select basic value type") {
Required {
assert(dao.selectBasic(0) === ValueTypeBasic(0, boolean = false, 0: Byte, 0: Short, 0, 0, 0.0f, 0.0))
}
}
test("insert basic value type") {
Required {
dao.insertBasic(ValueTypeBasic(1, boolean = true, 1: Byte, 1: Short, 1, 1, 1.0f, 1.0))
assert(dao.selectBasic(1) === ValueTypeBasic(1, boolean = true, 1: Byte, 1: Short, 1, 1, 1.0f, 1.0))
}
}
test("select basic value type option") {
Required {
assert(dao.selectOption(0) === ValueTypeOption(0, Some(false), Some(0: Byte), Some(0: Short), Some(0), Some(0), Some(0.0f), Some(0.0)))
}
}
test("insert basic value type option") {
Required {
val entity = ValueTypeOption(1, Some(true), Some(1: Byte), Some(1: Short), Some(1), Some(1), Some(1.0f), Some(1.0))
dao.insertOption(entity)
assert(dao.selectOption(1) === entity)
}
}
test("insert basic value type none") {
Required {
val entity = ValueTypeOption(1, None, None, None, None, None, None, None)
dao.insertOption(entity)
assert(dao.selectOption(1) === entity)
}
}
test("select basic value holder") {
Required {
assert(dao.selectHolder(0) === ValueTypeHolder(0, BooleanHolder(false), ByteHolder(0: Byte), ShortHolder(0: Short), IntHolder(0), LongHolder(0), FloatHolder(0.0f), DoubleHolder(0.0)))
}
}
test("insert basic value holder") {
Required {
val entity = ValueTypeHolder(1, BooleanHolder(true), ByteHolder(1: Byte), ShortHolder(1: Short), IntHolder(1), LongHolder(1), FloatHolder(1.0f), DoubleHolder(1.0))
dao.insertHolder(entity)
assert(dao.selectHolder(1) === entity)
}
}
test("select basic value holder option") {
Required {
assert(dao.selectHolderOption(0) === ValueTypeHolderOption(0, Some(BooleanHolder(false)), Some(ByteHolder(0: Byte)), Some(ShortHolder(0: Short)), Some(IntHolder(0)), Some(LongHolder(0)), Some(FloatHolder(0.0f)), Some(DoubleHolder(0.0))))
}
}
test("insert basic value holder option") {
Required {
val entity = ValueTypeHolderOption(1, Some(BooleanHolder(true)), Some(ByteHolder(1: Byte)), Some(ShortHolder(1: Short)), Some(IntHolder(1)), Some(LongHolder(1)), Some(FloatHolder(1.0f)), Some(DoubleHolder(1.0)))
dao.insertHolderOption(entity)
assert(dao.selectHolderOption(1) === entity)
}
}
test("insert basic value holder none") {
Required {
val entity = ValueTypeHolderOption(1, None, None, None, None, None, None, None)
dao.insertHolderOption(entity)
assert(dao.selectHolderOption(1) === entity)
}
}
test("select basic value AnyVal") {
Required {
assert(dao.selectVal(0) === ValueTypeVal(0, BooleanVal(false), ByteVal(0: Byte), ShortVal(0: Short), IntVal(0), LongVal(0), FloatVal(0.0f), DoubleVal(0.0)))
}
}
test("insert basic value AnyVal") {
Required {
val entity = ValueTypeVal(1, BooleanVal(true), ByteVal(1: Byte), ShortVal(1: Short), IntVal(1), LongVal(1), FloatVal(1.0f), DoubleVal(1.0))
dao.insertVal(entity)
assert(dao.selectVal(1) === entity)
}
}
test("select basic value AnyVal option") {
Required {
assert(dao.selectValOption(0) === ValueTypeValOption(0, Some(BooleanVal(false)), Some(ByteVal(0: Byte)), Some(ShortVal(0: Short)), Some(IntVal(0)), Some(LongVal(0)), Some(FloatVal(0.0f)), Some(DoubleVal(0.0))))
}
}
test("insert basic value AnyVal option") {
Required {
val entity = ValueTypeValOption(1, Some(BooleanVal(true)), Some(ByteVal(1: Byte)), Some(ShortVal(1: Short)), Some(IntVal(1)), Some(LongVal(1)), Some(FloatVal(1.0f)), Some(DoubleVal(1.0)))
dao.insertValOption(entity)
assert(dao.selectValOption(1) === entity)
}
}
test("insert basic value AnyVal none") {
Required {
val entity = ValueTypeValOption(1, None, None, None, None, None, None, None)
dao.insertValOption(entity)
assert(dao.selectValOption(1) === entity)
}
}
}
@Entity
@Table(name = "value_types")
case class ValueTypeBasic(
id: Int,
boolean: Boolean,
byte: Byte,
short: Short,
int: Int,
long: Long,
float: Float,
double: Double
)
@Entity
@Table(name = "value_types")
case class ValueTypeOption(
id: Int,
boolean: Option[Boolean],
byte: Option[Byte],
short: Option[Short],
int: Option[Int],
long: Option[Long],
float: Option[Float],
double: Option[Double]
)
@Holder
case class BooleanHolder(value: Boolean)
@Holder
case class ByteHolder(value: Byte)
@Holder
case class ShortHolder(value: Short)
@Holder
case class IntHolder(value: Int)
@Holder
case class LongHolder(value: Long)
@Holder
case class FloatHolder(value: Float)
@Holder
case class DoubleHolder(value: Double)
@Entity
@Table(name = "value_types")
case class ValueTypeHolder(
id: Int,
boolean: BooleanHolder,
byte: ByteHolder,
short: ShortHolder,
int: IntHolder,
long: LongHolder,
float: FloatHolder,
double: DoubleHolder
)
@Entity
@Table(name = "value_types")
case class ValueTypeHolderOption(
id: Int,
boolean: Option[BooleanHolder],
byte: Option[ByteHolder],
short: Option[ShortHolder],
int: Option[IntHolder],
long: Option[LongHolder],
float: Option[FloatHolder],
double: Option[DoubleHolder]
)
case class BooleanVal(value: Boolean) extends AnyVal
case class ByteVal(value: Byte) extends AnyVal
case class ShortVal(value: Short) extends AnyVal
case class IntVal(value: Int) extends AnyVal
case class LongVal(value: Long) extends AnyVal
case class FloatVal(value: Float) extends AnyVal
case class DoubleVal(value: Double) extends AnyVal
@Entity
@Table(name = "value_types")
case class ValueTypeVal(
id: Int,
boolean: BooleanVal,
byte: ByteVal,
short: ShortVal,
int: IntVal,
long: LongVal,
float: FloatVal,
double: DoubleVal
)
@Entity
@Table(name = "value_types")
case class ValueTypeValOption(
id: Int,
boolean: Option[BooleanVal],
byte: Option[ByteVal],
short: Option[ShortVal],
int: Option[IntVal],
long: Option[LongVal],
float: Option[FloatVal],
double: Option[DoubleVal],
)
@Dao(config = TestConfig)
trait ValueTypeTestDao {
@Script(sql =
"""
create table value_types(
id int not null identity primary key,
boolean boolean,
byte tinyint,
short smallint,
int int,
long bigint,
float real,
double double
);
insert into value_types (id, boolean, byte, short, int, long, float, double) values(0, false, 0, 0, 0, 0, 0, 0);
""")
def create()
@Script(sql =
"""
drop table value_types;
""")
def drop()
@Select(sql=
"""
select * from value_types where id = /* id */0
"""
)
def selectBasic(id: Int): ValueTypeBasic
@Insert
def insertBasic(entity: ValueTypeBasic): Result[ValueTypeBasic]
@Select(sql=
"""
select * from value_types where id = /* id */0
"""
)
def selectOption(id: Int): ValueTypeOption
@Insert
def insertOption(entity: ValueTypeOption): Result[ValueTypeOption]
@Select(sql=
"""
select * from value_types where id = /* id */0
"""
)
def selectHolder(id: Int): ValueTypeHolder
@Insert
def insertHolder(entity: ValueTypeHolder): Result[ValueTypeHolder]
@Select(sql=
"""
select * from value_types where id = /* id */0
"""
)
def selectHolderOption(id: Int): ValueTypeHolderOption
@Insert
def insertHolderOption(entity: ValueTypeHolderOption): Result[ValueTypeHolderOption]
@Select(sql=
"""
select * from value_types where id = /* id */0
"""
)
def selectVal(id: Int): ValueTypeVal
@Insert
def insertVal(entity: ValueTypeVal): Result[ValueTypeVal]
@Select(sql=
"""
select * from value_types where id = /* id */0
"""
)
def selectValOption(id: Int): ValueTypeValOption
@Insert
def insertValOption(entity: ValueTypeValOption): Result[ValueTypeValOption]
}
| bakenezumi/domala | paradise/src/test/scala/domala/tests/entity/ValueTypeTestSuite.scala | Scala | apache-2.0 | 8,253 |
package validation
import org.scalatest.FunSuite
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
import WorkingWithLists.P11._
@RunWith(classOf[JUnitRunner])
class P11TestSuite extends FunSuite {
test("encodeModified an ordinary list: encodeModified(List('a, 'a, 'a, 'a, 'b, 'c, 'c, 'a, 'a, 'd, 'e, 'e, 'e, 'e)) = List((4,'a), 'b, (2,'c), (2,'a), 'd, (4,'e))") {
assert(P11.encodeModified(List('a, 'a, 'a, 'a, 'b, 'c, 'c, 'a, 'a, 'd, 'e, 'e, 'e, 'e)) === List((4,'a), 'b, (2,'c), (2,'a), 'd, (4,'e)))
}
test("encodeModified an ordinary list: encodeModified(List(1, 1, 2, 1, 3, 5, 8)) = List((2, 1), 2, 1, 3, 5, 8)") {
assert(P11.encodeModified(List(1, 1, 2, 1, 3, 5, 8)) === List((2, 1), 2, 1, 3, 5, 8))
}
test("encodeModified an one-item list: encodeModified(List(3)) = List(3)") {
assert(P11.encodeModified(List(3)) === List(3))
}
test("encodeModified an empty list: encodeModified(List()) = List()") {
assert(P11.encodeModified(List()) === List())
}
}
| ihac/Ninety-Nine-Scala-Problems | src/test/scala/validation/P11TestSuite.scala | Scala | gpl-3.0 | 1,013 |
package c2corg
import scala.concurrent.duration._
import io.gatling.core.Predef._
import io.gatling.http.Predef._
object Waypoint {
val feeder = csv("waypoints.csv").random
val view = feed(feeder).exec(
http("View waypoint")
.get("/waypoints/${id}/${lang}/foo")
.headers(C2corgConf.header_html)
.basicAuth(C2corgConf.basic_auth_username, C2corgConf.basic_auth_password)
)
}
| c2corg/v6_api | c2corg_api/scripts/loadtests/gatling/user-files/simulations/c2corg/Waypoint.scala | Scala | agpl-3.0 | 406 |
package blended.updater.config
import java.io.File
import com.typesafe.config.{Config, ConfigFactory}
import scala.util.Try
object ResolvedProfileCompanion {
def fromConfig(config: Config, featureDir : File): Try[ResolvedProfile] = Try {
ResolvedProfile(
profile = ProfileCompanion.read(config.getObject("profile").toConfig()).get,
)
}
def toConfig(resolvedProfile: ResolvedProfile): Config = {
val profileCfg : Config = ProfileCompanion.toConfig(resolvedProfile.profile)
ConfigFactory.empty().withValue("profile", profileCfg.root())
}
}
| woq-blended/blended | blended.updater.config/jvm/src/main/scala/blended/updater/config/ResolvedProfileCompanion.scala | Scala | apache-2.0 | 574 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.carbondata.spark.testsuite.createTable
import java.io.File
import java.util
import org.apache.avro
import org.apache.commons.io.FileUtils
import org.apache.spark.sql.Row
import org.apache.spark.sql.test.util.QueryTest
import org.junit.Assert
import org.scalatest.BeforeAndAfterAll
import org.apache.carbondata.core.constants.CarbonCommonConstants
import org.apache.carbondata.core.datastore.impl.FileFactory
import org.apache.carbondata.core.util.{CarbonProperties, CarbonTestUtil}
import org.apache.carbondata.sdk.file.CarbonWriter
class TestNonTransactionalCarbonTableWithComplexType extends QueryTest with BeforeAndAfterAll {
var writerPath = new File(this.getClass.getResource("/").getPath +
"../../target/SparkCarbonFileFormat/WriterOutput/").getCanonicalPath
// getCanonicalPath gives path with \\, but the code expects /.
writerPath = writerPath.replace("\\\\", "/")
private def cleanTestData() = {
FileUtils.deleteDirectory(new File(writerPath))
}
override def beforeAll(): Unit = {
CarbonProperties.getInstance()
.addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT,
CarbonCommonConstants.CARBON_TIMESTAMP_DEFAULT_FORMAT)
sql("DROP TABLE IF EXISTS sdkOutputTable")
}
override def afterAll(): Unit = {
sql("DROP TABLE IF EXISTS sdkOutputTable")
}
private def WriteFilesWithAvroWriter(rows: Int,
mySchema: String,
json: String,
isLocalDictionary: Boolean): Unit = {
// conversion to GenericData.Record
val nn = new avro.Schema.Parser().parse(mySchema)
val record = testUtil.jsonToAvro(json, mySchema)
try {
val writer = if (isLocalDictionary) {
CarbonWriter.builder
.outputPath(writerPath).enableLocalDictionary(true)
.localDictionaryThreshold(2000)
.uniqueIdentifier(System.currentTimeMillis())
.withAvroInput(nn)
.writtenBy("TestNonTransactionalCarbonTableWithComplexType")
.build()
} else {
CarbonWriter.builder
.outputPath(writerPath)
.uniqueIdentifier(System.currentTimeMillis())
.withAvroInput(nn)
.writtenBy("TestNonTransactionalCarbonTableWithComplexType")
.build()
}
var i = 0
while (i < rows) {
writer.write(record)
i = i + 1
}
writer.close()
}
catch {
case e: Exception =>
e.printStackTrace()
Assert.fail(e.getMessage)
}
}
// test multi level -- 4 levels [array of array of array of struct]
def buildAvroTestDataMultiLevel4(rows: Int,
options: util.Map[String, String],
isLocalDictionary: Boolean): Any = {
FileUtils.deleteDirectory(new File(writerPath))
val mySchema = """ {
| "name": "address",
| "type": "record",
| "fields": [
| {
| "name": "name",
| "type": "string"
| },
| {
| "name": "age",
| "type": "int"
| },
| {
| "name": "BuildNum",
| "type": {
| "type": "array",
| "items": {
| "name": "FloorNum",
| "type": "array",
| "items": {
| "name": "doorNum",
| "type": "array",
| "items": {
| "name": "my_address",
| "type": "record",
| "fields": [
| {
| "name": "street",
| "type": "string"
| },
| {
| "name": "city",
| "type": "string"
| },
| {
| "name": "Temperature",
| "type": "double"
| },
| {
| "name": "WindSpeed",
| "type": "string"
| },
| {
| "name": "year",
| "type": "string"
| }
| ]
| }
| }
| }
| }
| }
| ]
|} """.stripMargin
val json =
""" {
| "name": "bob",
| "age": 10,
| "BuildNum": [
| [
| [
| {"street":"abc", "city":"city1", "Temperature":12.6,
| "WindSpeed":"1234.56", "year":"2018-05-10"},
| {"street":"def", "city":"city2", "Temperature":13.6,
| "WindSpeed":"1234.56", "year":"2018-05-10"},
| {"street":"cfg", "city":"city3", "Temperature":14.6,
| "WindSpeed":"1234.56", "year":"2018-05-10"}
| ],
| [
| {"street":"abc1", "city":"city3", "Temperature":12.6,
| "WindSpeed":"1234.56", "year":"2018-05-10"},
| {"street":"def1", "city":"city4", "Temperature":12.6,
| "WindSpeed":"1234.56", "year":"2018-05-10"},
| {"street":"cfg1", "city":"city5", "Temperature":12.6,
| "WindSpeed":"1234.56", "year":"2018-05-10"}
| ]
| ],
| [
| [
| {"street":"abc2", "city":"cityx", "Temperature":12.6,
| "WindSpeed":"1234.56", "year":"2018-05-10"},
| {"street":"abc3", "city":"cityy", "Temperature":12.6,
| "WindSpeed":"1234.56", "year":"2018-05-10"},
| {"street":"abc4", "city":"cityz", "Temperature":12.6,
| "WindSpeed":"1234.56", "year":"2018-05-10"}
| ],
| [
| {"street":"a1bc", "city":"cityA", "Temperature":12.6,
| "WindSpeed":"1234.56", "year":"2018-05-10"},
| {"street":"a1bc", "city":"cityB", "Temperature":12.6,
| "WindSpeed":"1234.56", "year":"2018-05-10"},
| {"street":"a1bc", "city":"cityc", "Temperature":12.6,
| "WindSpeed":"1234.56", "year":"2018-05-10"}
| ]
| ]
| ]
|} """.stripMargin
WriteFilesWithAvroWriter(rows, mySchema, json, isLocalDictionary)
}
def buildAvroTestDataMultiLevel4Type(isLocalDictionary: Boolean): Any = {
FileUtils.deleteDirectory(new File(writerPath))
buildAvroTestDataMultiLevel4(3, null, isLocalDictionary)
}
// test multi level -- 4 levels [array of array of array of struct]
test("test multi level support : array of array of array of struct") {
buildAvroTestDataMultiLevel4Type(false)
assert(new File(writerPath).exists())
sql("DROP TABLE IF EXISTS sdkOutputTable")
sql(
s"""CREATE EXTERNAL TABLE sdkOutputTable STORED AS carbondata LOCATION
|'$writerPath' """.stripMargin)
sql("select * from sdkOutputTable").collect()
// TODO: Add a validation
sql("DROP TABLE sdkOutputTable")
// drop table should not delete the files
cleanTestData()
}
test("test local dictionary for complex datatype") {
buildAvroTestDataMultiLevel4Type(true)
assert(new File(writerPath).exists())
sql("DROP TABLE IF EXISTS localComplex")
sql(
s"""CREATE EXTERNAL TABLE localComplex STORED AS carbondata LOCATION
|'$writerPath' """.stripMargin)
assert(FileFactory.getCarbonFile(writerPath).exists())
assert(CarbonTestUtil.checkForLocalDictionary(CarbonTestUtil.getDimRawChunk(writerPath, 0)))
sql("describe formatted localComplex").collect
val descLoc = sql("describe formatted localComplex").collect
descLoc.find(_.get(0).toString.contains("Local Dictionary Enabled")) match {
case Some(row) => assert(row.get(1).toString.contains("true"))
case None => assert(false)
}
// TODO: Add a validation
sql("DROP TABLE localComplex")
// drop table should not delete the files
cleanTestData()
}
test("test multi level support : array of array of array of with Double data type") {
cleanTestData()
val mySchema = """ {
| "name": "address",
| "type": "record",
| "fields": [
| {
| "name": "name",
| "type": "string"
| },
| {
| "name": "age",
| "type": "int"
| },
| {
| "name" :"my_address",
| "type" :{
| "name": "my_address",
| "type": "record",
| "fields": [
| {
| "name": "Temperaturetest",
| "type": "double"
| }
| ]
| }
| }
| ]
|} """.stripMargin
val jsonvalue =
"""{
|"name" :"babu",
|"age" :12,
|"my_address" :{ "Temperaturetest" :123 }
|}
""".stripMargin
val pschema = org.apache.avro.Schema.parse(mySchema)
val records = testUtil.jsonToAvro(jsonvalue, mySchema)
val writer = CarbonWriter.builder()
.outputPath(writerPath)
.withAvroInput(pschema)
.writtenBy("TestNonTransactionalCarbonTableWithComplexType")
.build()
writer.write(records)
writer.close()
sql("DROP TABLE IF EXISTS sdkOutputTable")
sql(
s"""CREATE EXTERNAL TABLE sdkOutputTable STORED AS carbondata LOCATION
|'$writerPath' """.stripMargin)
sql("select * from sdkOutputTable").collect()
// TODO: Add a validation
sql("DROP TABLE sdkOutputTable")
// drop table should not delete the files
cleanTestData()
}
// test multi level -- 4 levels [array of array of array of struct]
test("test ComplexDataType projection for array of array of array of struct") {
buildAvroTestDataMultiLevel4Type(false)
assert(new File(writerPath).exists())
sql("DROP TABLE IF EXISTS sdkOutputTable")
sql(
s"""CREATE EXTERNAL TABLE sdkOutputTable STORED AS carbondata LOCATION
|'$writerPath' """.stripMargin)
checkAnswer(sql("select BuildNum[0][0][0].street from sdkOutputTable"),
Seq(Row("abc"), Row("abc"), Row("abc")))
checkAnswer(sql("select BuildNum[1][0][0].street from sdkOutputTable"),
Seq(Row("abc2"), Row("abc2"), Row("abc2")))
sql("DROP TABLE sdkOutputTable")
// drop table should not delete the files
cleanTestData()
}
def buildAvroTestDataMultiLevel6Type(isLocalDictionary: Boolean): Any = {
FileUtils.deleteDirectory(new File(writerPath))
buildAvroTestDataMultiLevel6(1, null, isLocalDictionary)
}
// test multi level -- 6 levels
def buildAvroTestDataMultiLevel6(rows: Int,
options: util.Map[String, String],
isLocalDictionary: Boolean): Any = {
FileUtils.deleteDirectory(new File(writerPath))
val mySchema =
""" {
|"type": "record",
| "name": "UserInfo",
| "namespace": "com.apache.schema.schemalevel6_struct",
| "fields": [
| {
| "name": "username",
| "type": "string",
| "default": "NONE"
| },
| {
| "name": "age",
| "type": "int",
| "default": -1
| },
| {
| "name": "phone",
| "type": "string",
| "default": "NONE"
| },
| {
| "name": "housenum",
| "type": "string",
| "default": "NONE"
| },
| {
| "name": "address",
| "type": {
| "type": "record",
| "name": "Mailing_Address",
| "fields": [
| {
| "name": "Address_Detail",
| "type": {
| "type": "record",
| "name": "Address_Detail",
| "fields": [
| {
| "name": "Building_Detail",
| "type": {
| "type": "record",
| "name": "Building_Address",
| "fields": [
| {
| "name": "Society_name",
| "type": "string"
| },
| {
| "name": "building_no",
| "type": "string"
| },
| {
| "name": "house_no",
| "type": "int"
| },
| {
| "name": "Building_Type",
| "type": {
| "type": "record",
| "name": "Building_Type",
| "fields": [
| {
| "name":"Buildingname",
| "type":"string"
| },
| {
| "name":"buildingArea",
| "type":"int"
| },
| {
| "name":"Building_Criteria",
| "type":{
| "type":"record",
| "name":"BuildDet",
| "fields":[
| {
| "name":"f1",
| "type":"int"
| },
| {
| "name":"f2",
| "type":"string"
| },
| {
| "name":"BuildDetInner",
| "type":
| {
| "type":"record",
| "name":"BuildInner",
| "fields":[
| {
| "name": "duplex",
| "type": "boolean"
| },
| {
| "name": "Price",
| "type": "int"
| },
| {
| "name": "TotalCost",
| "type": "int"
| },
| {
| "name": "Floor",
| "type": "int"
| },
| {
| "name": "PhoneNo",
| "type": "long"
| },
| {
| "name": "value",
| "type": "string"
| }
| ]
| }
| }
| ]
| }
| }
| ]
| }
| }
| ]
| }
| }
| ]
| }
| }
| ]
| }
| }
| ]
|} """.stripMargin
val json =
""" {
|"username": "DON",
|"age": 21,
|"phone": "9888",
|"housenum": "44",
|"address": {
|"Address_Detail": {
|"Building_Detail": {
|"Society_name": "TTTT",
|"building_no": "5",
|"house_no": 78,
|"Building_Type": {
|"Buildingname": "Amaranthus",
|"buildingArea": 34,
|"Building_Criteria": {
|"f1": 23,
|"f2": "RRR",
|"BuildDetInner": {
|"duplex": true,
|"Price": 3434,
|"TotalCost": 7777,
|"Floor": 4,
|"PhoneNo": 5656,
|"value":"Value"
|}
|}
|}
|}
|}
|}
|} """.stripMargin
WriteFilesWithAvroWriter(rows, mySchema, json, isLocalDictionary)
}
test("test ComplexDataType projection for struct of struct -6 levels") {
buildAvroTestDataMultiLevel6Type(false)
assert(new File(writerPath).exists())
sql("DROP TABLE IF EXISTS sdkOutputTable")
sql(
s"""CREATE EXTERNAL TABLE sdkOutputTable STORED AS carbondata LOCATION
|'$writerPath' """.stripMargin)
// scalastyle:off lineLength
checkAnswer(sql("select * from sdkOutputTable"),
Seq(Row("DON", 21, "9888", "44", Row(Row(Row("TTTT", "5", 78, Row("Amaranthus", 34,
Row(23, "RRR", Row(true, 3434, 7777, 4, 5656, "Value")))))))))
checkAnswer(sql("select address from sdkOutputTable"),
Seq(Row(Row(Row(Row("TTTT",
"5",
78,
Row("Amaranthus", 34, Row(23, "RRR", Row(true, 3434, 7777, 4, 5656, "Value")))))))))
checkAnswer(sql("select address.Address_Detail from sdkOutputTable"),
Seq(Row(Row(Row("TTTT",
"5",
78,
Row("Amaranthus", 34, Row(23, "RRR", Row(true, 3434, 7777, 4, 5656, "Value"))))))))
checkAnswer(sql("select address.Address_Detail.Building_Detail from sdkOutputTable"),
Seq(Row(Row("TTTT",
"5",
78,
Row("Amaranthus", 34, Row(23, "RRR", Row(true, 3434, 7777, 4, 5656, "Value")))))))
checkAnswer(sql(
"select address.Address_Detail.Building_Detail.Building_Type from sdkOutputTable"),
Seq(Row(Row("Amaranthus", 34, Row(23, "RRR", Row(true, 3434, 7777, 4, 5656, "Value"))))))
checkAnswer(sql(
"select address.Address_Detail.Building_Detail.Building_Type.Building_Criteria from " +
"sdkOutputTable"), Seq(Row(Row(23, "RRR", Row(true, 3434, 7777, 4, 5656, "Value")))))
checkAnswer(sql(
"select address.Address_Detail.Building_Detail.Building_Type.Building_Criteria" +
".BuildDetInner.duplex from sdkOutputTable"), Seq(Row(true)))
checkAnswer(sql(
"select address.Address_Detail.Building_Detail.Building_Type.Building_Criteria" +
".BuildDetInner.price from sdkOutputTable"), Seq(Row(3434)))
checkAnswer(sql(
"select address.Address_Detail.Building_Detail.Building_Type.Building_Criteria" +
".BuildDetInner.totalcost from sdkOutputTable"), Seq(Row(7777)))
checkAnswer(sql(
"select address.Address_Detail.Building_Detail.Building_Type.Building_Criteria" +
".BuildDetInner.floor from sdkOutputTable"), Seq(Row(4)))
checkAnswer(sql(
"select address.Address_Detail.Building_Detail.Building_Type.Building_Criteria" +
".BuildDetInner.phoneNo from sdkOutputTable"), Seq(Row(5656)))
checkAnswer(sql(
"select address.Address_Detail.Building_Detail.Building_Type.Building_Criteria" +
".BuildDetInner.value from sdkOutputTable"), Seq(Row("Value")))
checkAnswer(sql("select address,address.Address_Detail from sdkOutputTable"),
Seq(Row(Row(Row(Row("TTTT",
"5",
78,
Row("Amaranthus", 34, Row(23, "RRR", Row(true, 3434, 7777, 4, 5656, "Value"))))))
,
Row(Row("TTTT",
"5",
78,
Row("Amaranthus", 34, Row(23, "RRR", Row(true, 3434, 7777, 4, 5656, "Value"))))))))
checkAnswer(sql(
"select address.Address_Detail.Building_Detail.Building_Type,address.Address_Detail" +
".Building_Detail.Building_Type.Building_Criteria from sdkOutputTable"),
Seq(Row(Row("Amaranthus", 34, Row(23, "RRR", Row(true, 3434, 7777, 4, 5656, "Value"))),
Row(23, "RRR", Row(true, 3434, 7777, 4, 5656, "Value")))))
checkAnswer(sql(
"select address.Address_Detail,address.Address_Detail.Building_Detail.Building_Type" +
".Building_Criteria from sdkOutputTable"),
Seq(Row(Row(Row("TTTT",
"5",
78,
Row("Amaranthus", 34, Row(23, "RRR", Row(true, 3434, 7777, 4, 5656, "Value"))))),
Row(23, "RRR", Row(true, 3434, 7777, 4, 5656, "Value")))))
checkAnswer(sql(
"select address.Address_Detail,address.Address_Detail.Building_Detail.Society_name,address" +
".Address_Detail.Building_Detail.Building_Type.Building_Criteria.f1 from sdkOutputTable"),
Seq(Row(Row(Row("TTTT",
"5",
78,
Row("Amaranthus", 34, Row(23, "RRR", Row(true, 3434, 7777, 4, 5656, "Value"))))),
"TTTT",
23)))
checkAnswer(sql(
"select address.Address_Detail.Building_Detail.Society_name,address.Address_Detail" +
".Building_Detail.building_no from sdkOutputTable"),
Seq(Row("TTTT", "5")))
sql("select address.Address_Detail.Building_Detail.Society_name," +
"address.Address_Detail.Building_Detail.building_no from sdkOutputTable " +
"where address.Address_Detail.Building_Detail.Society_name ='TTTT'").collect()
sql("DROP TABLE sdkOutputTable")
// scalastyle:on lineLength
cleanTestData()
}
}
| zzcclp/carbondata | integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestNonTransactionalCarbonTableWithComplexType.scala | Scala | apache-2.0 | 24,375 |
/*
* Copyright 2017 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.ct600a.v3
import org.joda.time.LocalDate
import uk.gov.hmrc.cato.time.DateHelper
import uk.gov.hmrc.ct.box._
import uk.gov.hmrc.ct.ct600a.v3.retriever.CT600ABoxRetriever
case class LPQ07(value: Option[LocalDate]) extends CtBoxIdentifier(name = "When do you plan to file your return?") with CtOptionalDate with Input with ValidatableBox[CT600ABoxRetriever] {
override def validate(boxRetriever: CT600ABoxRetriever): Set[CtValidation] = {
failIf(boxRetriever.lpq04().orFalse) {
validateDateAsMandatory("LPQ07", this) ++
validateDateAsBetweenInclusive("LPQ07", this, DateHelper.now(), DateHelper.now().plusYears(2))
}
}
}
| pncampbell/ct-calculations | src/main/scala/uk/gov/hmrc/ct/ct600a/v3/LPQ07.scala | Scala | apache-2.0 | 1,275 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.