code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1 value | license stringclasses 15 values | size int64 5 1M |
|---|---|---|---|---|---|
/**
* Copyright (c) 2013, The National Archives <digitalpreservation@nationalarchives.gov.uk>
* http://www.nationalarchives.gov.uk
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package uk.gov.nationalarchives.csv.validator
import scalaz._, Scalaz._
import uk.gov.nationalarchives.csv.validator.schema.Optional
import uk.gov.nationalarchives.csv.validator.schema.Rule
import uk.gov.nationalarchives.csv.validator.schema.Schema
import uk.gov.nationalarchives.csv.validator.schema.Warning
import uk.gov.nationalarchives.csv.validator.metadata.Cell
import uk.gov.nationalarchives.csv.validator.metadata.Row
import scala.annotation.tailrec
trait AllErrorsMetaDataValidator extends MetaDataValidator {
override def validateRows(rows: Iterator[Row], schema: Schema): MetaDataValidation[Any] = {
@tailrec
def validateRows(results: List[MetaDataValidation[Any]] = List.empty[MetaDataValidation[Any]]) : List[MetaDataValidation[Any]] = {
if(!rows.hasNext) {
results.reverse
} else {
val row = rows.next()
val result = validateRow(row, schema, Some(rows.hasNext))
validateRows(result :: results)
}
}
val v = validateRows()
v.sequence[MetaDataValidation, Any]
}
override protected def rules(row: Row, schema: Schema, mayBeLast: Option[Boolean] = None): MetaDataValidation[List[Any]] = {
val cells: (Int) => Option[Cell] = row.cells.lift
val v = schema.columnDefinitions.zipWithIndex.map {
case (columnDefinition, columnIndex) =>
validateCell(columnIndex, cells, row, schema, mayBeLast)
}
v.sequence[MetaDataValidation, Any]
}
override protected def rulesForCell(columnIndex: Int, row: Row, schema: Schema, mayBeLast: Option[Boolean] = None): MetaDataValidation[Any] = {
val columnDefinition = schema.columnDefinitions(columnIndex)
def isWarningDirective: Boolean = columnDefinition.directives.contains(Warning())
def isOptionDirective: Boolean = columnDefinition.directives.contains(Optional())
if(row.cells(columnIndex).value.trim.isEmpty && isOptionDirective) true.successNel
else columnDefinition.rules.map{rule =>
rule.evaluate(columnIndex, row, schema, mayBeLast)
}.map{ ruleResult:Rule#RuleValidation[Any] => {
if(isWarningDirective) toWarnings(ruleResult, row.lineNumber, columnIndex) else toErrors(ruleResult, row.lineNumber, columnIndex)
}}.sequence[MetaDataValidation, Any]
}
} | valydia/csv-validator | csv-validator-core/src/main/scala/uk/gov/nationalarchives/csv/validator/AllErrorsMetaDataValidator.scala | Scala | mpl-2.0 | 2,604 |
package org.lambdaunbound.ppt
case class TitleLine(line:Seq[String])
case class DataLine(line:Seq[Either[Double,String]])
object LineOps {
def parseDouble(s: String) = try { Some(s.toDouble) } catch { case _ => None }
def processStringLine(s:String):Seq[String] = {
s.trim.split("\\\\s+")//.toVector
}
def processMixedLine(s:String):Seq[Either[Double,String]] = {
s.trim.split("\\\\s+").map{w => parseDouble(w) match {
case Some(d) => Left(d)
case None => Right(w)
}
}
}
def createDataLine(s:String) = DataLine(processMixedLine(s))
def createTitleLine(s:String) = TitleLine(processStringLine(s))
def createFilter(index:Int,f:Double=>Boolean):(DataLine=>Boolean) = {dl:DataLine => dl.line(index).fold(f,_=>false)}
}
case class DataSet(title:TitleLine,data:Seq[DataLine]) {
def filter(f:DataLine => Boolean) = DataSet(title,data.filter(f(_)))
}
trait Axis {
def index:Int = ???
def transform(ds:DataSet):Seq[Double] = {
val data = ds.data.map(_.line(index).fold(x=>x,???))
val max = data.max
val min = data.min
val range = max-min
data.map(_-min/range)
}
}
object PPT {
def main(args:Array[String]) {
val fileName = args(0)
using(new java.io.BufferedReader(new java.io.FileReader(fileName))){in =>
}
}
}
| ggovan/ppt | src/main/scala/org/lambdaunbound/ppt/DataSet.scala | Scala | gpl-2.0 | 1,306 |
/**
* Copyright 2013 wuhaixing (wuhaixing at gmail dot com) - weibo: @数据水墨
* qiuzhanghua (qiuzhanghua at gmail.com) - weibo: qiuzhanghua
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package securesocial.core.providers
import play.api.libs.ws.WSResponse
import securesocial.core._
import securesocial.core.services.{ CacheService, RoutesService }
import scala.concurrent.Future
/**
* A Weibo provider
*
*/
class WeiboProvider(
routesService: RoutesService,
cacheService: CacheService,
client: OAuth2Client)
extends OAuth2Provider(routesService, client, cacheService) {
val GetAuthenticatedUser = "https://api.weibo.com/2/users/show.json?uid=%s&access_token=%s"
val AccessToken = "access_token"
val Message = "error"
val UId = "uid"
val Id = "idstr"
val Name = "name"
val AvatarUrl = "profile_image_url"
val GetUserEmail = "https://api.weibo.com/2/account/profile/email.json?access_token=%s"
val Email = "email"
override val id = WeiboProvider.Weibo
/**
*
* According to the weibo.com's OAuth2 implemention,I use TokenType position place UId param
* So please check http://open.weibo.com/wiki/OAuth2/access_token to ensure they stay weird
* before you use this.
*
*/
override protected def buildInfo(response: WSResponse): OAuth2Info = {
val json = response.json
logger.debug("[securesocial] got json back [" + json + "]")
//UId occupied TokenType in the weibo.com provider
OAuth2Info(
(json \\ OAuth2Constants.AccessToken).as[String],
(json \\ UId).asOpt[String],
(json \\ OAuth2Constants.ExpiresIn).asOpt[Int],
(json \\ OAuth2Constants.RefreshToken).asOpt[String])
}
/**
* Subclasses need to implement this method to populate the User object with profile
* information from the service provider.
*
* @param info The OAuth2Info
* @return A copy of the user object with the new values set
*/
def fillProfile(info: OAuth2Info): Future[BasicProfile] = {
val weiboUserId = info.tokenType.getOrElse {
logger.error("[securesocial] Can't found weiboUserId")
throw new AuthenticationException()
}
client.retrieveProfile(GetAuthenticatedUser.format(weiboUserId, info.accessToken)).flatMap { me =>
(me \\ Message).asOpt[String] match {
case Some(msg) =>
logger.error("[securesocial] error retrieving profile information from Weibo. Message = %s".format(msg))
throw new AuthenticationException()
case _ =>
val userId = (me \\ Id).as[String]
val displayName = (me \\ Name).asOpt[String]
val avatarUrl = (me \\ AvatarUrl).asOpt[String]
getEmail(info.accessToken).map { email =>
BasicProfile(id, userId, None, None, displayName, email, avatarUrl, authMethod, None, Some(info))
}
}
} recover {
case e =>
logger.error("[securesocial] error retrieving profile information from weibo", e)
throw new AuthenticationException()
}
}
def getEmail(accessToken: String): Future[Option[String]] = {
client.httpService.url(GetUserEmail.format(accessToken)).get().map { response =>
val me = response.json
(me \\ Message).asOpt[String] match {
case Some(msg) =>
logger.error("[securesocial] error retrieving email information from Weibo. Message = %s".format(msg))
None
case _ =>
(me \\ Email).asOpt[String].filter(!_.isEmpty)
}
} recover {
case e: Exception =>
logger.error("[securesocial] error retrieving profile information from weibo", e)
None
}
}
}
object WeiboProvider {
val Weibo = "weibo"
}
| jaliss/securesocial | module-code/app/securesocial/core/providers/WeiboProvider.scala | Scala | apache-2.0 | 4,194 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.examples.scala.graph
import org.apache.flink.api.java.utils.ParameterTool
import org.apache.flink.api.scala._
import org.apache.flink.examples.java.graph.util.ConnectedComponentsData
import org.apache.flink.util.Collector
object TransitiveClosureNaive {
def main (args: Array[String]): Unit = {
val params: ParameterTool = ParameterTool.fromArgs(args)
// set up execution environment
val env = ExecutionEnvironment.getExecutionEnvironment
// make parameters available in the web interface
env.getConfig.setGlobalJobParameters(params)
val edges =
if (params.has("edges")) {
env.readCsvFile[(Long, Long)](
filePath = params.get("edges"),
fieldDelimiter = " ",
includedFields = Array(0, 1))
.map { x => (x._1, x._2)}
} else {
println("Executing TransitiveClosure example with default edges data set.")
println("Use --edges to specify file input.")
val edgeData = ConnectedComponentsData.EDGES map {
case Array(x, y) => (x.asInstanceOf[Long], y.asInstanceOf[Long])
}
env.fromCollection(edgeData)
}
val maxIterations = params.getInt("iterations", 10)
val paths = edges.iterateWithTermination(maxIterations) { prevPaths: DataSet[(Long, Long)] =>
val nextPaths = prevPaths
.join(edges)
.where(1).equalTo(0) {
(left, right) => (left._1,right._2)
}.withForwardedFieldsFirst("_1").withForwardedFieldsSecond("_2")
.union(prevPaths)
.groupBy(0, 1)
.reduce((l, r) => l).withForwardedFields("_1; _2")
val terminate = prevPaths
.coGroup(nextPaths)
.where(0).equalTo(0) {
(prev, next, out: Collector[(Long, Long)]) => {
val prevPaths = prev.toSet
for (n <- next)
if (!prevPaths.contains(n)) out.collect(n)
}
}.withForwardedFieldsSecond("*")
(nextPaths, terminate)
}
if (params.has("output")) {
paths.writeAsCsv(params.get("output"), "\\n", " ")
env.execute("Scala Transitive Closure Example")
} else {
println("Printing result to stdout. Use --output to specify output path.")
paths.print()
}
}
}
| WangTaoTheTonic/flink | flink-examples/flink-examples-batch/src/main/scala/org/apache/flink/examples/scala/graph/TransitiveClosureNaive.scala | Scala | apache-2.0 | 3,073 |
package gie
import com.github.nscala_time.time.Imports.DateTime
//import scala.slick.driver.H2Driver.simple._
import scala.slick.lifted.MappedTypeMapper.base
import scala.slick.lifted.TypeMapper
//////////////////////////////////////////////////////////////////////////////////
object DateTimeMapper {
def map(t:DateTime) = new java.sql.Timestamp(t.getMillis)
implicit val date2dateTime = base[DateTime, java.sql.Timestamp] (
dateTime => new java.sql.Timestamp(dateTime.getMillis),
date => new DateTime(date)
)
}
//////////////////////////////////////////////////////////////////////////////////
| igorge/gbb | src/main/scala/code/db/db_joda.scala | Scala | gpl-2.0 | 614 |
package org.jetbrains.plugins.scala.codeInsight.implicits
import java.awt.event._
import java.awt.{Cursor, Point}
import com.intellij.codeInsight.hint.{HintManager, HintManagerImpl, HintUtil}
import com.intellij.openapi.command.CommandProcessor
import com.intellij.openapi.components.ProjectComponent
import com.intellij.openapi.editor.event._
import com.intellij.openapi.editor.{Editor, EditorFactory, Inlay}
import com.intellij.openapi.project.Project
import com.intellij.openapi.startup.StartupManager
import com.intellij.openapi.util.{Key, SystemInfo}
import com.intellij.ui.{AncestorListenerAdapter, LightweightHint}
import com.intellij.util.ui.{JBUI, UIUtil}
import javax.swing.SwingUtilities
import javax.swing.event.AncestorEvent
import org.jetbrains.plugins.scala.codeInsight.implicits.MouseHandler.EscKeyListenerKey
import org.jetbrains.plugins.scala.components.HighlightingAdvisor
import org.jetbrains.plugins.scala.extensions.ObjectExt
import org.jetbrains.plugins.scala.settings.ScalaProjectSettings
import scala.collection.JavaConverters._
class MouseHandler(project: Project,
startupManager: StartupManager,
editorFactory: EditorFactory) extends ProjectComponent {
private var activeHyperlink = Option.empty[(Inlay, Text)]
private var highlightedMatches = Set.empty[(Inlay, Text)]
private var hyperlinkTooltip = Option.empty[LightweightHint]
private var errorTooltip = Option.empty[LightweightHint]
private val mousePressListener = new EditorMouseAdapter {
override def mousePressed(e: EditorMouseEvent): Unit = {
if (handlingRequired) {
MouseHandler.mousePressLocation = e.getMouseEvent.getPoint
}
}
override def mouseClicked(e: EditorMouseEvent): Unit = {
if (handlingRequired && !e.isConsumed && project.isInitialized && !project.isDisposed) {
val editor = e.getEditor
val event = e.getMouseEvent
if (SwingUtilities.isLeftMouseButton(event)) {
if (SystemInfo.isMac && event.isMetaDown || event.isControlDown) {
hyperlinkAt(editor, event.getPoint).foreach { case (_, text) =>
e.consume()
deactivateActiveHyperlink(editor)
navigateTo(text)
}
} else {
expandableAt(editor, event.getPoint).foreach { case (inlay, text) =>
inlay.getRenderer.asOptionOf[TextRenderer].foreach { renderer =>
renderer.expand(text)
inlay.updateSize()
if (!ImplicitHints.expanded) {
addEscKeyListenerTo(editor)
}
}
}
}
} else if (SwingUtilities.isMiddleMouseButton(event) && activeHyperlink.isEmpty) {
hyperlinkAt(editor, event.getPoint).foreach { case (_, text) =>
e.consume()
navigateTo(text)
}
} else {
deactivateActiveHyperlink(editor)
}
}
}
}
private val mouseMovedListener = new EditorMouseMotionAdapter {
override def mouseMoved(e: EditorMouseEvent): Unit = {
if (handlingRequired && !e.isConsumed && project.isInitialized && !project.isDisposed) {
val textAtPoint = textAt(e.getEditor, e.getMouseEvent.getPoint)
if (SystemInfo.isMac && e.getMouseEvent.isMetaDown || e.getMouseEvent.isControlDown) {
textAtPoint match {
case Some((inlay, text)) if text.navigatable.isDefined =>
if (!activeHyperlink.contains((inlay, text))) {
deactivateActiveHyperlink (e.getEditor)
activateHyperlink(e.getEditor, inlay, text, e.getMouseEvent)
}
case _ =>
deactivateActiveHyperlink(e.getEditor)
}
textAtPoint match {
case Some((inlay, text)) =>
highlightMatches(e.getEditor, inlay, text)
case None =>
clearHighlightedMatches()
}
} else {
textAtPoint.foreach { case (_, text) =>
if (text.errorTooltip.nonEmpty && !errorTooltip.exists(_.isVisible)) {
errorTooltip = text.errorTooltip.map(showTooltip(e.getEditor, e.getMouseEvent, _))
errorTooltip.foreach(_.addHintListener(_ => errorTooltip = None))
}
}
deactivateActiveHyperlink(e.getEditor)
}
}
}
}
override def projectOpened(): Unit = {
val multicaster = editorFactory.getEventMulticaster
multicaster.addEditorMouseListener(mousePressListener, project)
multicaster.addEditorMouseMotionListener(mouseMovedListener, project)
}
override def projectClosed(): Unit = {
val multicaster = editorFactory.getEventMulticaster
multicaster.removeEditorMouseListener(mousePressListener)
multicaster.removeEditorMouseMotionListener(mouseMovedListener)
activeHyperlink = None
highlightedMatches = Set.empty
hyperlinkTooltip = None
errorTooltip = None
}
private def handlingRequired = ImplicitHints.enabled ||
(HighlightingAdvisor.getInstance(project).enabled &&
ScalaProjectSettings.getInstance(project).isShowNotFoundImplicitArguments)
private def activateHyperlink(editor: Editor, inlay: Inlay, text: Text, event: MouseEvent): Unit = {
text.hyperlink = true
activeHyperlink = Some(inlay, text)
inlay.repaint()
UIUtil.setCursor(editor.getContentComponent, Cursor.getPredefinedCursor(Cursor.HAND_CURSOR))
if (!hyperlinkTooltip.exists(_.isVisible)) {
hyperlinkTooltip = text.tooltip.map(showTooltip(editor, event, _))
}
editor.getContentComponent.addKeyListener(new KeyAdapter {
override def keyPressed(keyEvent: KeyEvent): Unit = {
// Why, in Windows, Control key press events are generated on mouse movement?
if (keyEvent.getKeyCode != KeyEvent.VK_CONTROL) {
handle()
}
}
override def keyReleased(keyEvent: KeyEvent): Unit = handle()
private def handle(): Unit = {
editor.getContentComponent.removeKeyListener(this)
deactivateActiveHyperlink(editor)
}
})
}
private def deactivateActiveHyperlink(editor: Editor): Unit = {
activeHyperlink.foreach { case (inlay, text) =>
text.hyperlink = false
inlay.repaint()
editor.getContentComponent.setCursor(Cursor.getPredefinedCursor(Cursor.TEXT_CURSOR))
}
activeHyperlink = None
hyperlinkTooltip.foreach(_.hide())
hyperlinkTooltip = None
}
private def navigateTo(text: Text): Unit = {
CommandProcessor.getInstance.executeCommand(project,
() => text.navigatable.filter(_.canNavigate).foreach(_.navigate(true)), null, null)
}
private def expandableAt(editor: Editor, point: Point): Option[(Inlay, Text)] = textAt(editor, point).filter {
case (_, text) => text.expansion.isDefined
}
private def hyperlinkAt(editor: Editor, point: Point): Option[(Inlay, Text)] = textAt(editor, point).filter {
case (_, text) => text.navigatable.isDefined
}
private def textAt(editor: Editor, point: Point): Option[(Inlay, Text)] =
Option(editor.getInlayModel.getElementAt(point)).flatMap { inlay =>
inlay.getRenderer.asOptionOf[TextRenderer].flatMap { renderer =>
val inlayPoint = editor.visualPositionToXY(inlay.getVisualPosition)
renderer.textAt(editor, point.x - inlayPoint.x).map((inlay, _))
}
}
private def addEscKeyListenerTo(editor: Editor): Unit = {
if (editor.getUserData(EscKeyListenerKey) == null) {
val keyListener = new KeyAdapter {
override def keyTyped(keyEvent: KeyEvent): Unit = {
if (keyEvent.getKeyChar == KeyEvent.VK_ESCAPE) {
ImplicitHints.collapseIn(editor)
}
}
}
val component = editor.getContentComponent
component.addKeyListener(keyListener)
editor.putUserData(EscKeyListenerKey, keyListener)
component.addAncestorListener(new AncestorListenerAdapter {
override def ancestorRemoved(event: AncestorEvent): Unit = {
component.removeKeyListener(keyListener)
editor.putUserData(EscKeyListenerKey, null)
component.removeAncestorListener(this)
}
})
}
}
private def showTooltip(editor: Editor, e: MouseEvent, text: String): LightweightHint = {
val hint = {
val label = HintUtil.createInformationLabel(text)
label.setBorder(JBUI.Borders.empty(6, 6, 5, 6))
new LightweightHint(label)
}
val constraint = HintManager.ABOVE
val point = {
val p = HintManagerImpl.getHintPosition(hint, editor,
editor.xyToVisualPosition(e.getPoint), constraint)
p.x = e.getXOnScreen - editor.getContentComponent.getTopLevelAncestor.getLocationOnScreen.x
p
}
val manager = HintManagerImpl.getInstanceImpl
manager.showEditorHint(hint, editor, point,
HintManager.HIDE_BY_ANY_KEY | HintManager.HIDE_BY_TEXT_CHANGE | HintManager.HIDE_BY_SCROLLING, 0, false,
HintManagerImpl.createHintHint(editor, point, hint, constraint).setContentActive(false))
hint
}
private def highlightMatches(editor: Editor, inlay: Inlay, text: Text): Unit = {
findPairFor(editor, (inlay, text)) match {
case Some((pairInlay, pairText)) =>
val matches = Set((inlay, text), (pairInlay, pairText))
if (highlightedMatches != matches) {
clearHighlightedMatches()
text.highlighted = true
pairText.highlighted = true
inlay.repaint()
pairInlay.repaint()
highlightedMatches = matches
editor.getContentComponent.addKeyListener(new KeyAdapter {
override def keyReleased(keyEvent: KeyEvent): Unit = {
editor.getContentComponent.removeKeyListener(this)
clearHighlightedMatches()
}
})
}
case _ =>
clearHighlightedMatches()
}
}
private def findPairFor(editor: Editor, element: (Inlay, Text)): Option[(Inlay, Text)] = {
val (lineStart, lineEnd) = {
val line = editor.getDocument.getLineNumber(element._1.getOffset)
(editor.getDocument.getLineStartOffset(line), editor.getDocument.getLineEndOffset(line))
}
val elements = editor.getInlayModel.getInlineElementsInRange(lineStart, lineEnd).asScala.toSeq.flatMap { inlay =>
inlay.getRenderer.asOptionOf[TextRenderer].toSeq.flatMap(_.parts.map((inlay, _)))
}
pairFor[(Inlay, Text)](element, elements, _._2.string == "(", _._2.string == ")")
}
private def clearHighlightedMatches(): Unit = {
highlightedMatches.foreach { case (inlay, text) =>
text.highlighted = false
inlay.repaint()
}
highlightedMatches = Set.empty
}
}
private object MouseHandler {
private val EscKeyListenerKey: Key[KeyListener] = Key.create[KeyListener]("SCALA_IMPLICIT_HINTS_KEY_LISTENER")
var mousePressLocation: Point = new Point(0, 0)
def removeEscKeyListeners(): Unit = {
EditorFactory.getInstance.getAllEditors.foreach(removeEscKeyListenerFrom)
}
private def removeEscKeyListenerFrom(editor: Editor): Unit = {
Option(editor.getUserData(MouseHandler.EscKeyListenerKey)).foreach { listener =>
editor.getContentComponent.removeKeyListener(listener)
editor.putUserData(MouseHandler.EscKeyListenerKey, null)
}
}
}
| jastice/intellij-scala | scala/scala-impl/src/org/jetbrains/plugins/scala/codeInsight/implicits/MouseHandler.scala | Scala | apache-2.0 | 11,352 |
package fr.renoux.gaston.engine.assignment
import com.typesafe.scalalogging.Logger
import fr.renoux.gaston.model._
import fr.renoux.gaston.util.Context
import fr.renoux.gaston.util.Context.chrono
import scala.annotation.tailrec
import scala.collection.concurrent.TrieMap
import scala.collection.immutable.Queue
import scala.collection.mutable
import scala.util.Random
/**
* Improves an existing Schedule by moving persons around. Does not reschedule topics, or remove them.
*/
final class AssignmentImprover(implicit private val problem: Problem, private val ctx: Context) {
private val cache: mutable.Map[Schedule.Planning, Schedule] = TrieMap[Schedule.Planning, Schedule]()
private val slotCache: mutable.Map[Set[Topic], SlotSchedule] = TrieMap[Set[Topic], SlotSchedule]()
private val log = Logger[AssignmentImprover]
val defaultRoundsCount = 1000
/** Main method. Returns a schedule that's better than the initial one. Ends either because the schedule can't be
* perfected any more or because the limit number of rounds has been reached. */
def improve(scoredSchedule: Schedule, rounds: Int = defaultRoundsCount)(implicit rand: Random): Schedule =
chrono("PersonPlacementImprover > improve") {
cache.getOrElseUpdate(scoredSchedule.planning, recImprove(scoredSchedule, rounds))
}
/** Recursive method improving the schedule. Works a bit on a slot before getting to the next one (slotRoundsLimit is
* the parameter controlling how much we work on a single slot before going on, so that if we hit the global limit we
* had a good pass at all slots anyway). */
@tailrec
private def recImprove(
schedule: Schedule,
maxRounds: Int,
slots: Queue[Slot] = Queue(problem.slotsList: _*),
slotRoundsLimit: Int = 1000
)(implicit rand: Random): Schedule =
if (maxRounds == 0) {
log.warn("Stopping improvement because max number of rounds was reached")
schedule
} else if (slots.isEmpty) {
log.debug(s"Stopping improvement because all slots are perfect ($maxRounds rounds left)")
schedule
} else {
val (slot, slotsTail) = slots.dequeue
val slotSchedule = schedule.on(slot)
slotCache.get(slotSchedule.topicsSet) match {
case Some(ss) =>
recImprove(schedule.set(ss), maxRounds - 1, slotsTail) // slot read from the cache, go to the next one
case None => goodMoveOnSlot(schedule, slot) match {
case None =>
/* can't improve this slot any more ! Store in cache, then go to next slot */
slotCache.update(slotSchedule.topicsSet, slotSchedule)
recImprove(schedule, maxRounds - 1, slotsTail)
case Some(candidate) =>
/* The slot was perfected! If there are rounds left stay on the same slot, otherwise move to the next one */
if (slotRoundsLimit > 0) recImprove(candidate, maxRounds - 1, slotsTail.enqueue(slot), slotRoundsLimit - 1)
else recImprove(candidate, maxRounds - 1, slotsTail.enqueue(slot))
}
}
}
/** Returns the first move or swap it finds that makes the schedule better, or None if there is nothing to do on that
* slot anymore. */
private def goodMoveOnSlot(currentSchedule: Schedule, slot: Slot)(implicit rand: Random): Option[Schedule] =
chrono("PersonPlacementImprover > improve > goodMoveOnSlot") {
val slotSchedule = currentSchedule.on(slot)
lazy val records: Iterable[Record] = rand.shuffle(slotSchedule.records)
lazy val recordsRemovable: Iterable[Record] = rand.shuffle(slotSchedule.recordsThatCanRemovePersons)
lazy val recordsAddable: Iterable[Record] = rand.shuffle(slotSchedule.recordsThatCanAddPersons)
/* All schedules on which we swapped two persons */
lazy val swappedSchedules = for {
r1 <- records.view
r2 <- records.view if r1 < r2 // avoiding duplicates (cases where we just swap r1 and r2)
t1 = r1.topic
t2 = r2.topic
p1 <- (r1.optionalPersons -- t2.forbidden).view
p2 <- (r2.optionalPersons -- t1.forbidden).view
scoreImprovement = currentSchedule.deltaScoreIfSwapPerson(slot, (t1, p1), (t2, p2))
if scoreImprovement.value > 0
/*_ = {
if (math.abs(improvedSchedule.score.value - currentSchedule.score.value - scoreImprovement.value) < 0.01) ()
else throw new IllegalStateException(s"${improvedSchedule.score} - ${currentSchedule.score} <> $scoreImprovement")
}*/
} yield currentSchedule.swapPersons(slot, (t1, p1), (t2, p2))
/* All schedules on which we moved one person from one topic to another */
lazy val movedSchedules = for {
r1 <- recordsRemovable.view
r2 <- recordsAddable.view if r1 != r2
t1 = r1.topic
t2 = r2.topic
p <- (r1.optionalPersons -- t2.forbidden).view
improvedSchedule = currentSchedule.movePerson(slot, t1, t2, p)
if improvedSchedule.score > currentSchedule.score
} yield improvedSchedule
swappedSchedules.headOption orElse movedSchedules.headOption
}
}
| gaelrenoux/gaston | src/main/scala/fr/renoux/gaston/engine/assignment/AssignmentImprover.scala | Scala | apache-2.0 | 5,116 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package models.tradingpremises
import jto.validation.{From, Rule, Write}
import jto.validation.forms.UrlFormEncoded
import play.api.libs.json.Json
case class ConfirmAddress(confirmAddress: Boolean)
object ConfirmAddress {
implicit val formats = Json.format[ConfirmAddress]
import utils.MappingUtils.Implicits._
implicit val formRule: Rule[UrlFormEncoded, ConfirmAddress] =
From[UrlFormEncoded] { __ =>
import jto.validation.forms.Rules._
(__ \\ "confirmAddress").read[Boolean].withMessage("error.required.tp.confirm.address") map ConfirmAddress.apply
}
implicit val formWrites: Write[ConfirmAddress, UrlFormEncoded] =
Write {
case ConfirmAddress(b) =>
Map("confirmAddress" -> Seq(b.toString))
}
} | hmrc/amls-frontend | app/models/tradingpremises/ConfirmAddress.scala | Scala | apache-2.0 | 1,356 |
package com.hj.examples
import com.hj.constant.Const
import org.apache.spark.{SparkConf, SparkContext}
object RDFS7 {
def main(args: Array[String]): Unit = {
val conf = new SparkConf().setAppName("RDFS7").setMaster("local[2]")
val sc = new SparkContext(conf)
val lines = sc.textFile("input/RDFS7.in")
val triples = lines.map(x => {
val arr = x.split(" ")
(arr(0), arr(1), arr(2))
})
/*
s p o
p rdfs:subPropertyOf q
=>
s q o
*/
val subProp = triples.filter(x => x._2.equals(Const.RDFS_SUBPROPERTY_OF)).map(x => (x._1, x._3))
val pso = triples.map(x => (x._2, (x._1, x._3)))
val joined = pso.join(subProp)
val res = joined.map(x => (x._2._1._1, x._2._2, x._2._1._2))
res.foreach(t => println(t))
}
}
| huangjuegeek/SparkSRE | src/main/scala/com/hj/examples/RDFS7.scala | Scala | apache-2.0 | 770 |
package is.hail.annotations
import java.io.{ObjectInputStream, ObjectOutputStream}
import com.esotericsoftware.kryo.{Kryo, KryoSerializable}
import com.esotericsoftware.kryo.io.{Input, Output}
import is.hail.types.virtual._
import is.hail.types.physical._
import is.hail.utils._
import is.hail.variant.Locus
import org.apache.spark.sql.Row
import sun.reflect.generics.reflectiveObjects.NotImplementedException
trait UnKryoSerializable extends KryoSerializable {
def write(kryo: Kryo, output: Output): Unit = {
throw new NotImplementedException()
}
def read(kryo: Kryo, input: Input): Unit = {
throw new NotImplementedException()
}
}
class UnsafeIndexedSeq(
val t: PContainer,
val region: Region, val aoff: Long) extends IndexedSeq[Annotation] with UnKryoSerializable {
val length: Int = t.loadLength(aoff)
def apply(i: Int): Annotation = {
if (i < 0 || i >= length)
throw new IndexOutOfBoundsException(i.toString)
if (t.isElementDefined(aoff, i)) {
UnsafeRow.read(t.elementType, region, t.loadElement(aoff, length, i))
} else
null
}
override def toString: String = s"[${this.mkString(",")}]"
}
object UnsafeRow {
def readBinary(boff: Long, t: PBinary): Array[Byte] =
t.loadBytes(boff)
def readArray(t: PContainer, region: Region, aoff: Long): IndexedSeq[Any] =
new UnsafeIndexedSeq(t, region, aoff)
def readBaseStruct(t: PBaseStruct, region: Region, offset: Long): UnsafeRow =
new UnsafeRow(t, region, offset)
def readString(boff: Long, t: PString): String =
new String(readBinary(boff, t.binaryRepresentation))
def readLocus(offset: Long, t: PLocus): Locus = {
Locus(
t.contig(offset),
t.position(offset))
}
def readNDArray(offset: Long, region: Region, nd: PNDArray): UnsafeNDArray = {
new UnsafeNDArray(nd, region, offset)
}
def readAnyRef(t: PType, region: Region, offset: Long): AnyRef = read(t, region, offset).asInstanceOf[AnyRef]
def read(t: PType, region: Region, offset: Long): Any = {
t match {
case _: PBoolean =>
Region.loadBoolean(offset)
case _: PInt32 | _: PCall => Region.loadInt(offset)
case _: PInt64 => Region.loadLong(offset)
case _: PFloat32 => Region.loadFloat(offset)
case _: PFloat64 => Region.loadDouble(offset)
case t: PArray =>
readArray(t, region, offset)
case t: PSet =>
readArray(t, region, offset).toSet
case t: PString => readString(offset, t)
case t: PBinary => readBinary(offset, t)
case td: PDict =>
val a = readArray(td, region, offset)
a.asInstanceOf[IndexedSeq[Row]].map(r => (r.get(0), r.get(1))).toMap
case t: PBaseStruct => readBaseStruct(t, region, offset)
case x: PLocus => readLocus(offset, x)
case x: PInterval =>
val start: Annotation =
if (x.startDefined(offset))
read(x.pointType, region, x.loadStart(offset))
else
null
val end =
if (x.endDefined(offset))
read(x.pointType, region, x.loadEnd(offset))
else
null
val includesStart = x.includesStart(offset)
val includesEnd = x.includesEnd(offset)
Interval(start, end, includesStart, includesEnd)
case nd: PNDArray => {
readNDArray(offset, region, nd)
}
}
}
}
class UnsafeRow(val t: PBaseStruct,
var region: Region, var offset: Long) extends Row with UnKryoSerializable {
override def toString: String = {
if (t.isInstanceOf[PStruct]) {
val sb = new StringBuilder()
var i = 0
sb += '{'
while (i < t.size) {
if (i != 0) {
sb ++= ", "
}
sb ++= t.fieldNames(i)
sb ++= ": "
val x = get(i)
sb ++= (if (x == null) "null" else x.toString())
i += 1
}
sb += '}'
sb.toString
} else if (t.isInstanceOf[PTuple]) {
val sb = new StringBuilder()
var i = 0
sb += '('
while (i < t.size) {
if (i != 0) {
sb ++= ", "
}
val x = get(i)
sb ++= (if (x == null) "null" else x.toString())
i += 1
}
sb += ')'
sb.toString
} else {
super.toString
}
}
def this(t: PBaseStruct, rv: RegionValue) = this(t, rv.region, rv.offset)
def this(t: PBaseStruct) = this(t, null, 0)
def this() = this(null, null, 0)
def set(newRegion: Region, newOffset: Long) {
region = newRegion
offset = newOffset
}
def set(rv: RegionValue): Unit = set(rv.region, rv.offset)
def length: Int = t.size
private def assertDefined(i: Int) {
if (isNullAt(i))
throw new NullPointerException(s"null value at index $i")
}
def get(i: Int): Any = {
if (isNullAt(i))
null
else
UnsafeRow.read(t.types(i), region, t.loadField(offset, i))
}
def copy(): Row = new UnsafeRow(t, region, offset)
def pretty(): String = Region.pretty(t, offset)
override def getInt(i: Int): Int = {
assertDefined(i)
Region.loadInt(t.loadField(offset, i))
}
override def getLong(i: Int): Long = {
assertDefined(i)
Region.loadLong(t.loadField(offset, i))
}
override def getFloat(i: Int): Float = {
assertDefined(i)
Region.loadFloat(t.loadField(offset, i))
}
override def getDouble(i: Int): Double = {
assertDefined(i)
Region.loadDouble(t.loadField(offset, i))
}
override def getBoolean(i: Int): Boolean = {
assertDefined(i)
Region.loadBoolean(t.loadField(offset, i))
}
override def getByte(i: Int): Byte = {
assertDefined(i)
Region.loadByte(t.loadField(offset, i))
}
override def isNullAt(i: Int): Boolean = {
if (i < 0 || i >= t.size)
throw new IndexOutOfBoundsException(i.toString)
!t.isFieldDefined(offset, i)
}
private def writeObject(s: ObjectOutputStream): Unit = {
throw new NotImplementedException()
}
private def readObject(s: ObjectInputStream): Unit = {
throw new NotImplementedException()
}
}
object SafeRow {
def apply(t: PBaseStruct, off: Long): Row = {
Annotation.copy(t.virtualType, new UnsafeRow(t, null, off)).asInstanceOf[Row]
}
def apply(t: PBaseStruct, rv: RegionValue): Row = SafeRow(t, rv.offset)
def selectFields(t: PBaseStruct, region: Region, off: Long)(selectIdx: Array[Int]): Row = {
val fullRow = new UnsafeRow(t, region, off)
Row.fromSeq(selectIdx.map(i => Annotation.copy(t.types(i).virtualType, fullRow.get(i))))
}
def selectFields(t: PBaseStruct, rv: RegionValue)(selectIdx: Array[Int]): Row =
SafeRow.selectFields(t, rv.region, rv.offset)(selectIdx)
def read(t: PType, off: Long): Annotation =
Annotation.copy(t.virtualType, UnsafeRow.read(t, null, off))
def read(t: PType, rv: RegionValue): Annotation =
read(t, rv.offset)
def isSafe(a: Any): Boolean = {
a match {
case _: UnsafeRow => false
case _: UnsafeIndexedSeq => false
case _: UnsafeNDArray => false
case r: Row =>
r.toSeq.forall(isSafe)
case a: IndexedSeq[_] =>
a.forall(isSafe)
case i: Interval =>
isSafe(i.start) && isSafe(i.end)
case nd: NDArray =>
nd.getRowMajorElements().forall(isSafe)
case _ => true
}
}
}
object SafeIndexedSeq {
def apply(t: PArray, off: Long): IndexedSeq[Annotation] =
Annotation.copy(t.virtualType, new UnsafeIndexedSeq(t, null, off))
.asInstanceOf[IndexedSeq[Annotation]]
def apply(t: PArray, rv: RegionValue): IndexedSeq[Annotation] =
apply(t, rv.offset)
}
class SelectFieldsRow(
private[this] var old: Row,
private[this] val fieldMapping: Array[Int]
) extends Row {
def this(
old: Row,
oldPType: TStruct,
newPType: TStruct
) = this(old, newPType.fieldNames.map(name => oldPType.fieldIdx(name)))
def this(
old: Row,
oldPType: PStruct,
newPType: PStruct
) = {
this(old,
(require(
oldPType.fields.length <= old.length &&
newPType.fields.length <= old.length,
s"${oldPType}, ${newPType} ${old.length} $old")
->
newPType.fieldNames.map(name => oldPType.fieldIdx(name)))._2
)
}
require(fieldMapping.forall(x => x < old.length),
s"${fieldMapping.toSeq}, ${old.length} $old")
override def length = fieldMapping.length
override def get(i: Int) = old.get(fieldMapping(i))
override def isNullAt(i: Int) = old.isNullAt(fieldMapping(i))
override def copy(): Row = new SelectFieldsRow(old.copy(), fieldMapping)
def set(newRow: Row): SelectFieldsRow = {
old = newRow
this
}
}
trait NDArray {
val shape: IndexedSeq[Long]
lazy val numElements = shape.foldLeft(1L)(_ * _)
def lookupElement(indices: IndexedSeq[Long]): Annotation
def getRowMajorElements(): IndexedSeq[Annotation]
def forall(pred: Annotation => Boolean): Boolean
override def equals(that: Any): Boolean = {
if (that.isInstanceOf[NDArray]) {
val thatNd = that.asInstanceOf[NDArray]
this.shape == thatNd.shape && this.getRowMajorElements() == thatNd.getRowMajorElements()
} else {
false
}
}
}
class UnsafeNDArray(val pnd: PNDArray, val region: Region, val ndAddr: Long) extends NDArray {
val shape: IndexedSeq[Long] = (0 until pnd.nDims).map(i => pnd.loadShape(ndAddr, i))
val elementType = pnd.elementType.virtualType
val coordStorageArray = new Array[Long](shape.size)
def lookupElement(indices: IndexedSeq[Long]): Annotation = {
val elementAddress = pnd.getElementAddress(indices, ndAddr)
UnsafeRow.read(pnd.elementType, region, pnd.elementType.unstagedLoadFromNested(elementAddress))
}
def getRowMajorElements(): IndexedSeq[Annotation] = {
val indices = (0 until pnd.nDims).map(_ => 0L).toArray
var curIdx = indices.size - 1
var idxIntoFlat = 0
val flat = new Array[Annotation](numElements.toInt)
if (numElements > Int.MaxValue) {
throw new IllegalArgumentException(s"Cannot make an UnsafeNDArray with greater than Int.MaxValue entries. Shape was ${shape}")
}
while (idxIntoFlat < numElements) {
flat(idxIntoFlat) = lookupElement(indices)
while (curIdx >= 0L && indices(curIdx) >= shape(curIdx) - 1) {
indices(curIdx) = 0L
curIdx -= 1
}
// found the index that needs incrementing, so long as we haven't run out of room
if (curIdx >= 0) {
indices(curIdx) += 1L
curIdx = indices.size - 1
}
idxIntoFlat += 1
}
flat
}
override def forall(pred: Annotation => Boolean): Boolean = {
val indices = (0 until pnd.nDims).map(_ => 0L).toArray
var curIdx = indices.size - 1
var idxIntoFlat = 0
while (idxIntoFlat < numElements) {
if (!pred(lookupElement(indices))) {
return false
}
while (curIdx >= 0L && indices(curIdx) >= shape(curIdx) - 1) {
indices(curIdx) = 0L
curIdx -= 1
}
// found the index that needs incrementing, so long as we haven't run out of room
if (curIdx >= 0) {
indices(curIdx) += 1L
curIdx = indices.size - 1
}
idxIntoFlat += 1
}
true
}
override def toString: String = {
s"UnsafeNDArray of shape (${shape.mkString(", ")}) with elements ${getRowMajorElements()}"
}
}
case class SafeNDArray(val shape: IndexedSeq[Long], rowMajorElements: IndexedSeq[Annotation]) extends NDArray {
assert(shape.foldLeft(1L)(_ * _) == rowMajorElements.size)
override def getRowMajorElements: IndexedSeq[Annotation] = rowMajorElements
override def lookupElement(indices: IndexedSeq[Long]): Annotation = {
val flatIdx = indices.zip(shape).foldLeft(0L){ case (flatIdx, (index, shape)) =>
flatIdx + index * shape
}
rowMajorElements(flatIdx.toInt)
}
override def forall(pred: Annotation => Boolean): Boolean = this.rowMajorElements.forall(pred)
}
| hail-is/hail | hail/src/main/scala/is/hail/annotations/UnsafeRow.scala | Scala | mit | 11,861 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.streaming.dstream
import scala.util.control.NonFatal
import org.apache.spark.streaming.StreamingContext
import org.apache.spark.storage.StorageLevel
import org.apache.spark.util.NextIterator
import scala.reflect.ClassTag
import java.io._
import java.net.{UnknownHostException, Socket}
import org.apache.spark.Logging
import org.apache.spark.streaming.receiver.Receiver
private[streaming]
class SocketInputDStream[T: ClassTag](
@transient ssc_ : StreamingContext,
host: String,
port: Int,
bytesToObjects: InputStream => Iterator[T],
storageLevel: StorageLevel
) extends ReceiverInputDStream[T](ssc_) {
def getReceiver(): Receiver[T] = {
new SocketReceiver(host, port, bytesToObjects, storageLevel)
}
}
private[streaming]
class SocketReceiver[T: ClassTag](
host: String,
port: Int,
bytesToObjects: InputStream => Iterator[T],
storageLevel: StorageLevel
) extends Receiver[T](storageLevel) with Logging {
def onStart() {
// Start the thread that receives data over a connection
//启动接收到连接上的数据的线程
new Thread("Socket Receiver") {
setDaemon(true)
override def run() { receive() }
}.start()
}
def onStop() {
// There is nothing much to do as the thread calling receive()
//没有什么可做的线程调用receive()
// is designed to stop by itself isStopped() returns false
//是为了阻止自己isstopped()返回false
}
/**
* Create a socket connection and receive data until receiver is stopped
* 创建一个套接字连接并接收数据,直到停止接收为止
* */
def receive() {
var socket: Socket = null
try {
logInfo("Connecting to " + host + ":" + port)
socket = new Socket(host, port)
logInfo("Connected to " + host + ":" + port)
val iterator = bytesToObjects(socket.getInputStream())
while(!isStopped && iterator.hasNext) {
store(iterator.next)
}
if (!isStopped()) {
//套接字数据流没有更多的数据
restart("Socket data stream had no more data")
} else {
logInfo("Stopped receiving")
}
} catch {
case e: java.net.ConnectException =>
restart("Error connecting to " + host + ":" + port, e)
case NonFatal(e) =>
logWarning("Error receiving data", e)
restart("Error receiving data", e)
} finally {
if (socket != null) {
socket.close()
logInfo("Closed socket to " + host + ":" + port)
}
}
}
}
private[streaming]
object SocketReceiver {
/**
* This methods translates the data from an inputstream (say, from a socket)
* to '\n' delimited strings and returns an iterator to access the strings.
* 该方法将数据从一个输入流(比如,来自一个网络连接)对于"\n"分隔的字符串,并返回一个访问字符串的迭代器
* '\n'是换行
*/
def bytesToLines(inputStream: InputStream): Iterator[String] = {
val dataInputStream = new BufferedReader(new InputStreamReader(inputStream, "UTF-8"))
new NextIterator[String] {
protected override def getNext() = {
val nextValue = dataInputStream.readLine()
if (nextValue == null) {
finished = true
}
nextValue
}
protected override def close() {
dataInputStream.close()
}
}
}
}
| tophua/spark1.52 | streaming/src/main/scala/org/apache/spark/streaming/dstream/SocketInputDStream.scala | Scala | apache-2.0 | 4,209 |
package controllers
import play.api.mvc._
object AppController extends Controller {
def index = Action {
Ok(views.html.app())
}
}
| nagirrab/Karaoke | app/controllers/AppController.scala | Scala | mit | 140 |
/*
* Copyright (c) 2014-2020 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.reactive.internal.operators
import monix.execution.Ack
import scala.util.control.NonFatal
import monix.reactive.Observable.Operator
import monix.reactive.observers.Subscriber
import scala.concurrent.Future
private[reactive] final class DefaultIfEmptyOperator[A](default: () => A) extends Operator[A, A] {
def apply(out: Subscriber[A]): Subscriber[A] =
new Subscriber[A] {
implicit val scheduler = out.scheduler
private[this] var isEmpty = true
def onNext(elem: A): Future[Ack] = {
if (isEmpty) isEmpty = false
out.onNext(elem)
}
def onError(ex: Throwable): Unit =
out.onError(ex)
def onComplete(): Unit = {
if (!isEmpty) out.onComplete()
else {
var streamErrors = true
try {
val value = default()
streamErrors = false
out.onNext(value)
out.onComplete()
} catch {
case NonFatal(ex) if streamErrors =>
out.onError(ex)
}
}
}
}
}
| alexandru/monifu | monix-reactive/shared/src/main/scala/monix/reactive/internal/operators/DefaultIfEmptyOperator.scala | Scala | apache-2.0 | 1,736 |
import sbt._
class LibTestProject(info: ProjectInfo) extends DefaultProject(info)
{
lazy val useJar = task { injar.Test.other; None }
} | sbt/sbt-zero-seven | src/sbt-test/project/Class.forName/changes/LibTestProject.scala | Scala | bsd-3-clause | 137 |
package com.softwaremill.codebrag.usecases.user
import org.scalatest.{BeforeAndAfter, FlatSpec}
import org.scalatest.matchers.ShouldMatchers
import org.scalatest.mock.MockitoSugar
import com.softwaremill.codebrag.dao.user.UserDAO
import org.mockito.Mockito._
import com.softwaremill.codebrag.domain.builder.UserAssembler
import com.softwaremill.codebrag.finders.user.{LoggedInUserView, UserFinder}
import com.softwaremill.codebrag.finders.browsingcontext.UserBrowsingContext
class LoginUserUseCaseSpec extends FlatSpec with ShouldMatchers with BeforeAndAfter with MockitoSugar {
val userDao = mock[UserDAO]
val userFinder = mock[UserFinder]
val loginUseCase = new LoginUserUseCase(userDao, userFinder)
before {
reset(userDao, userFinder)
}
it should "not try to authenticate when user is inactive" in {
// given
val inactiveUser = UserAssembler.randomUser.withActive(set = false).get
when(userDao.findByLoginOrEmail(inactiveUser.emailLowerCase)).thenReturn(Some(inactiveUser))
// when
val loginForm = LoginForm(inactiveUser.emailLowerCase, "dummy", false)
val Left(result) = loginUseCase.execute(loginForm) {
fail("Authenticatin block should not be called when user is inactive")
}
// then
val expectedErrors = Map("general" -> List("User account inactive"))
result should be(expectedErrors)
}
it should "not try to authenticate when user not found by login/email" in {
// given
val nonExistingUser = UserAssembler.randomUser.withActive(set = false).get
when(userDao.findByLoginOrEmail(nonExistingUser.emailLowerCase)).thenReturn(None)
// when
val loginForm = LoginForm(nonExistingUser.emailLowerCase, "dummy", false)
val exceptionCaught = intercept[LoginFailedException] {
loginUseCase.execute(loginForm) {
fail("Authenticatin block should not be called when user is inactive")
}
}
// then
exceptionCaught.msg should be("Invalid login credentials")
}
it should "return logged in user view" in {
// given
val user = UserAssembler.randomUser.get
val userContext = UserBrowsingContext(user.id, "codebrag", "master")
when(userDao.findByLoginOrEmail(user.emailLowerCase)).thenReturn(Some(user))
val loggedInUserView = LoggedInUserView(user, userContext)
when(userFinder.findLoggedInUser(user)).thenReturn(loggedInUserView)
// when
val loginForm = LoginForm(user.emailLowerCase, "dummy", false)
val Right(loggedInUser) = loginUseCase.execute(loginForm) { Some(user) }
// then
loggedInUser should be(loggedInUserView)
}
it should "raise exception when use cannot authenticate due to bad credentials" in {
// given
val user = UserAssembler.randomUser.get
when(userDao.findByLoginOrEmail(user.emailLowerCase)).thenReturn(Some(user))
// when
val loginForm = LoginForm(user.emailLowerCase, "dummy", false)
val exceptionCaught = intercept[LoginFailedException] {
loginUseCase.execute(loginForm) { None }
}
// then
exceptionCaught.msg should be("Invalid login credentials")
}
}
| softwaremill/codebrag | codebrag-service/src/test/scala/com/softwaremill/codebrag/usecases/user/LoginUserUseCaseSpec.scala | Scala | agpl-3.0 | 3,101 |
import leon.lang._
import leon.collection._
import leon.annotation._
import leon.proof._
import scala.language.postfixOps
import ListSpecs._
object LZW {
sealed abstract class Bit
case object Z extends Bit
case object O extends Bit
/** This method attempts to prove
* the main theorem of the assignment
*/
def theorem(data: List[Bit], table: List[List[Bit]]): Boolean = {
require(algorithmProperties(data, table))
data == decompress(compress(data, table), table)
}.holds because tableTheorem(data, table)
def tableTheorem(data: List[Bit], table: List[List[Bit]]): Boolean = {
require(algorithmProperties(data, table))
def subTableTheorem(subData: List[Bit], cmpTable: List[List[Bit]], subCmpData: List[BigInt], decmpTable: List[List[Bit]]): (List[List[Bit]], List[List[Bit]]) = {
require(cmpTable == decmpTable && algorithmProperties(subData, cmpTable) &&
compressedDataProperties(subCmpData, decmpTable) && algorithmProperties(Nil[Bit](), decmpTable) &&
subData.isEmpty == subCmpData.isEmpty)
subData match {
case Nil() => (cmpTable, decmpTable)
case Cons(x, xs) => {
val cmpStep: (BigInt, List[Bit], List[Bit]) = compressStep(subData, cmpTable)
val newCmpTable: List[List[Bit]] = compressStepTable(subData, cmpTable)
val decmpStep: (List[Bit], BigInt, List[BigInt]) = decompressStep(subCmpData, decmpTable)
val newDecmpTable: List[List[Bit]] = decompressStepTable(subCmpData, decmpTable)
subTableTheorem(cmpStep._3, newCmpTable, decmpStep._3, newDecmpTable)
}
}
} ensuring {
res: (List[List[Bit]], List[List[Bit]]) =>
res._1 == res._2 && algorithmProperties(subData, res._1) &&
algorithmProperties(Nil[Bit](), res._2)
}
val tables: (List[List[Bit]], List[List[Bit]]) = subTableTheorem(data, table, compress(data, table), table)
tables._1 == tables._2
}.holds
/** This method simply compresses
* 'data' according to the LZW
* algorithm and returns the final
* list of indexes
*/
@induct
def compress(data: List[Bit], table: List[List[Bit]]): List[BigInt] = {
require(algorithmProperties(data, table))
data match {
/** While data is not Nil():
* 1- call compressStep (to get next compressed index)
* 2- call compressStepTable (to get new table)
* 3- call compress recursively, using the above methods'
* return values as parameters
*/
case Nil() => Nil[BigInt]()
case Cons(x, xs) => {
val cmpStep: (BigInt, List[Bit], List[Bit]) = compressStep(data, table)
val newTable: List[List[Bit]] = compressStepTable(data, table)
cmpStep._1 :: compress(cmpStep._3, newTable)
}
}
} ensuring {
res: List[BigInt] => decompress(res, table) == data && compressedDataProperties(res, table)
}
/** POSTCONDITIONS:
*
* 1- decompress(res, table) == data
*/
/** This method performs one
* compression step and returns a
* tuple containing:
*
* 1- compressed index
* 2- compressed sequence
* 3- remaining uncompressed data
*/
def compressStep(data: List[Bit], table: List[List[Bit]]): (BigInt, List[Bit], List[Bit]) = {
require(algorithmProperties(data, table))
@induct
def subCompressStep(matchedSeqIndex: BigInt, matchedSeq: List[Bit], unmatchedSeq: List[Bit]): (BigInt, List[Bit], List[Bit]) = {
require(matchedSeq ++ unmatchedSeq == data because appendProperty(matchedSeq, unmatchedSeq) &&
(matchedSeq.isEmpty || (uniqueConditions(matchedSeqIndex, matchedSeq, table) && uniqueProperty(matchedSeqIndex, matchedSeq, table))))
unmatchedSeq match {
case Nil() => (matchedSeqIndex, matchedSeq, unmatchedSeq)
case Cons(x, xs) => {
val newSeq: List[Bit] = matchedSeq :+ x
if (table.contains(newSeq)) subCompressStep(table.indexOf(newSeq), newSeq, xs) else (matchedSeqIndex, matchedSeq, unmatchedSeq)
}
}
} ensuring {
res: (BigInt, List[Bit], List[Bit]) => res._2 ++ res._3 == data because appendProperty(res._2, res._3) &&
(res._2.isEmpty || uniqueProperty(res._1, res._2, table))
}
/** POSTCONDITIONS:
*
* 1- matchedSeq ++ unmatchedSeq == data
* 2- matchedSeq.isEmpty || table.indexOf(matchedSeq) == matchedSeqIndex
*/
subCompressStep(0, Nil[Bit](), data)
} ensuring {
res: (BigInt, List[Bit], List[Bit]) => res._2 ++ res._3 == data &&
(data.isEmpty || (uniqueConditions(res._1, res._2, table) && uniqueProperty(res._1, res._2, table) &&
decompressStep(List[BigInt](res._1), table)._1 == res._2))
}
/** POSTCONDITIONS:
*
* 1- table(res._1) == res._2
* 2- decompressStep(res._1, table)._1 == res._2
* 3- res._2 ++ res._3 == data
*/
/** This method simply calculates
* the new entry for the table and
* returns an already updated table
*/
def compressStepTable(data: List[Bit], table: List[List[Bit]]): List[List[Bit]] = {
require(algorithmProperties(data, table))
@induct
def subCompressStepTable(matchedSeq: List[Bit], unmatchedSeq: List[Bit]): List[Bit] = {
unmatchedSeq match {
case Nil() => Nil[Bit]()
case Cons(x, xs) => {
val newSeq: List[Bit] = matchedSeq ++ List[Bit](x)
if (table.contains(newSeq)) subCompressStepTable(newSeq, xs) else newSeq
}
}
} ensuring {
res: List[Bit] => !table.contains(res)
}
val newTableEntry: List[Bit] = subCompressStepTable(Nil[Bit](), data)
updateTable(newTableEntry, table)
} ensuring {
res: List[List[Bit]] => algorithmProperties(data, res)
}
/** POSTCONDITIONS:
*
* 1- re-check every condition for 'TABLE RULES'
* 2- check that res contains every element of table (?)
*/
/** This method simply decompresses
* 'cmpData' according to the LZW
* algorithm and returns the final
* list of Bits
*/
@induct
def decompress(cmpData: List[BigInt], table: List[List[Bit]]): List[Bit] = {
require(compressedDataProperties(cmpData, table) && algorithmProperties(Nil[Bit](), table))
cmpData match {
/** While data is not Nil():
* 1- call decompressStep (to get next decompressed sequence)
* 2- call decompressStepTable (to get new table)
* 3- call decompress recursively, using the above methods'
* return values as parameters
*/
case Nil() => Nil[Bit]()
case Cons(x, xs) => {
val decmpStep: (List[Bit], BigInt, List[BigInt]) = decompressStep(cmpData, table)
val newTable: List[List[Bit]] = decompressStepTable(cmpData, table)
decmpStep._1 ++ decompress(decmpStep._3, newTable)
}
}
}
/** This method performs one
* decompression step and returns a
* tuple containing:
*
* 1- decompressed sequence
* 2- decompressed sequence's index
* 3- remaining compressed data
*/
def decompressStep(cmpData: List[BigInt], table: List[List[Bit]]): (List[Bit], BigInt, List[BigInt]) = {
require(cmpData.nonEmpty && isValidIndex(cmpData.head, table) && algorithmProperties(Nil[Bit](), table))
(table(cmpData.head), cmpData.head, cmpData.tail)
} ensuring {
res: (List[Bit], BigInt, List[BigInt]) => uniqueProperty(res._2, res._1, table) &&
res._2 :: res._3 == cmpData because prependProperty(res._2, res._3)
}
/** POSTCONDITIONS:
*
* 1- table.indexOf(res._1) == res._2
* 2- res._2 :: res._3 == cmpData
*/
/** This method simply calculates
* the new entry for the table and
* returns an already updated table
*/
def decompressStepTable(cmpData: List[BigInt], table: List[List[Bit]]): List[List[Bit]] = {
require(algorithmProperties(Nil[Bit](), table) &&
compressedDataProperties(cmpData, table))
def subDecompressStepTable(): List[Bit] = (cmpData match {
case Cons(x1, Cons(x2, xs)) => {
if(isValidIndex(x2, table)) table(x1) :+ table(x2).head
else table(x1) :+ table(x1).head
}
case Nil() => Nil[Bit]()
case Cons(x, xs) => Nil[Bit]()
}) ensuring {
res: List[Bit] => !table.contains(res)
}
val newEntry: List[Bit] = subDecompressStepTable()
updateTable(newEntry, table)
} ensuring {
res: List[List[Bit]] => algorithmProperties(Nil[Bit](), res)
}
/** POSTCONDITIONS:
*
* 1- re-check every condition for 'TABLE RULES'
* 2- check that res contains every element of table
*/
/** ----------------------
* Auxiliary Methods
* ----------------------
*/
def updateTable(newEntry: List[Bit], table: List[List[Bit]]): List[List[Bit]] = {
require(algorithmProperties(Nil[Bit](), table) && !table.contains(newEntry))
newEntry match {
case Nil() => table
case Cons(x, xs) => table :+ newEntry
}
} ensuring {
res: List[List[Bit]] => table.forall(tableElement => res.contains(tableElement) && algorithmProperties(Nil[Bit](), res))
}
/** POSTCONDITIONS:
*
* 1- check that res contains every element of table
*/
/** ALGORITHM RULES:
*
* 1- Table cannot be Nil()
* 2- Table cannot have duplicate entries
* 3- No element of table is Nil()
* 4- Each element of data has to be present in table
*/
def algorithmProperties[T](data: List[T], table: List[List[T]]): Boolean = {
table.nonEmpty &&
isUnique(table) &&
table.forall(tableElement => tableElement.nonEmpty) &&
data.forall(dataElement => table.contains(List[T](dataElement)))
}
def isValidIndex[T](index: BigInt, table: List[T]): Boolean = {
index >= 0 && index < table.size
}
def appendProperty[T](left: List[T], right: List[T]): Boolean = (right match {
case Nil() => true
case Cons(x, xs) => ((left :+ x) ++ xs) == (left ++ right) because {
snocIsAppend(left, x) && appendAssoc(left, List[T](x), xs)
}
}).holds
def prependProperty[T](element: T, list: List[T]): Boolean = {
element :: list == List[T](element) ++ list && appendProperty(List[T](element), list)
}
def compressedDataProperties(cmpData: List[BigInt], table: List[List[Bit]]): Boolean = {
def validHeadIndex(list: List[BigInt]): Boolean = {
list.nonEmpty && isValidIndex(list.head, table)
}
cmpData.isEmpty || validHeadIndex(cmpData)
}
/** Proves that 'findApplyCycle' holds
* for lists without duplicate elements
*/
def uniqueProperty[T](index: BigInt, element: T, list: List[T]): Boolean = {
require(uniqueConditions(index, element, list))
findApplyCycle(index, element, list)
}.holds
def findApplyCycle[T](index: BigInt, element: T, list: List[T]): Boolean = {
require(isValidIndex(index, list))
list.indexOf(element) == index &&
list(index) == element
}
def isUnique[T](list: List[T]): Boolean = {
list == list.unique
}
def uniqueConditions[T](index: BigInt, element: T, list: List[T]): Boolean = {
isValidIndex(index, list) && isUnique(list) && (list.indexOf(element) == index || list(index) == element)
}
}
| regb/leon | testcases/verification/algorithms/LZW.scala | Scala | gpl-3.0 | 12,414 |
/*
* MIT License
*
* Copyright (c) 2017 Gonçalo Marques
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
package com.byteslounge.slickrepo.test.postgres
import com.byteslounge.slickrepo.test.{LongLocalDateTimeVersionedRepositoryTest, PostgresConfig}
class PostgresLongLocalDateTimeVersionedRepositoryTest extends LongLocalDateTimeVersionedRepositoryTest(PostgresConfig.config)
| gonmarques/slick-repo | src/test/scala/com/byteslounge/slickrepo/test/postgres/PostgresLongLocalDateTimeVersionedRepositoryTest.scala | Scala | mit | 1,414 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.knockdata.spark.highcharts.demo
import java.io.PrintWriter
import com.knockdata.spark.highcharts._
import com.knockdata.spark.highcharts.model.Chart
import org.apache.spark.sql.functions._
import org.junit.Test
// # Drilldown Demo
//
// Based on [Column With Drilldown](http://www.highcharts.com/demo/column-drilldown)
//
class DemoDrillDown {
val bank = DataSet.dfBank
// ## Drilldown Basic
//
// Based on [Column With Drilldown](http://www.highcharts.com/demo/column-drilldown)
//
// A line chart with
//
// * x axis data from column $"marital"
// * y axis aggregated the average balance
//
// Then it drilldown to
//
// * x axis data from column $"job"
// * y axis aggregated the average balance
@Test
def demoDrilldownBasic(): Unit = {
val chart = highcharts(bank
.series("name" -> "marital",
"y" -> avg(col("balance")))
.drilldown("name" -> "job",
"y" -> avg(col("balance"))))
.chart(Chart.column)
chart.plot()
new PrintWriter("target/demoDrilldownBasic.json") { write(chart.replaced); close }
}
// ## Drilldown 2 Levels
//
// Based on [Column With Drilldown](http://www.highcharts.com/demo/column-drilldown)
//
// A line chart with
//
// * x axis data from column $"marital"
// * y axis aggregated the average balance
//
// Then it drilldown to
//
// * x axis data from column $"job"
// * y axis aggregated the average balance
//
// Then it drill down to
//
// * x axis data from column $"education"
// * y axis aggregated the max balance
//
// with 3 levels, the output is pretty big
// number of data point is
// size(marital) + size(marital) * size(balance)
// + size(marital) * size(balance) + size(education)
@Test
def demoDrilldown2Level(): Unit = {
val chart = highcharts(bank
.series("name" -> "marital",
"y" -> avg(col("balance")))
.drilldown("name" -> "job",
"y" -> avg(col("balance")))
.drilldown("name" -> "education",
"y" -> max(col("balance"))))
.chart(Chart.column)
chart.plot()
new PrintWriter("target/demoDrilldown2Level.json") { write(chart.replaced); close }
}
// ## Drilldown Multiple Series Chart
//
// Based on [Column With Drilldown](http://www.highcharts.com/demo/column-drilldown)
//
// A line chart with
//
// * multiple series from column $"marital"
// * x axis data from column $"job"
// * y axis aggregated the average balance
//
// Then it drill down to
//
// * x axis data from column $"education"
// * y axis aggregated the max balance
//
// series with one level drilldown, the output is pretty big
// number of data point is
// size(marital) + size(marital) * size(balance)
// + size(marital) * size(balance) + size(education)
@Test
def demoLineBasicDesc(): Unit = {
val chart = highcharts(bank
.seriesCol("marital")
.series("name" -> "job",
"y" -> avg(col("balance")))
.drilldown("name" -> "education",
"y" -> avg(col("balance"))))
chart.plot()
new PrintWriter("target/demoLineBasicDesc.json") { write(chart.replaced); close }
}
}
| knockdata/spark-highcharts | src/test/scala/com/knockdata/spark/highcharts/demo/DemoDrillDown.scala | Scala | apache-2.0 | 3,974 |
/*******************************************************************************
* Copyright (c) 2014 Łukasz Szpakowski.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
******************************************************************************/
package pl.luckboy.purfuncor.frontend.instant
import scalaz._
import scalaz.Scalaz._
trait InstantiationEnvironmental[E, L, M]
{
def copyEnvironment(env: E): E
def getLambdaInfoFromEnvironment(env: E)(lambdaIdx: Int): Option[InstantiationLambdaInfo[L]]
def withCurrentCombinatorLocation(env: E)(loc: Option[L]): E
def treeGlobalInstanceTreeFromEnvironment(env: E): InstanceTree[AbstractPolyFunction[L], M, GlobalInstance[L]]
def instanceArgTableFromFromEnvironment(env: E): InstanceArgTable[L, M]
}
| luckboy/Purfuncor | src/main/scala/pl/luckboy/purfuncor/frontend/instant/InstantiationEnvironmental.scala | Scala | mpl-2.0 | 932 |
package scuff.web
import javax.servlet.http.HttpServletRequest
class Attribute[T](name: String) {
def remove(req: HttpServletRequest): Unit = req removeAttribute name
def set(req: HttpServletRequest, value: T) = req.setAttribute(name, value)
def get(req: HttpServletRequest): Option[T] =
req.getAttribute(name) match {
case null => None
case value => new Some(value.asInstanceOf[T])
}
}
| nilskp/scuff | src/main/scala/scuff/web/Attribute.scala | Scala | mit | 415 |
package com.tehasdf.discord
import akka.actor.{ActorRef, Actor, ActorSystem}
import akka.http.scaladsl.model.ws.{Message, TextMessage}
import akka.pattern.ask
import akka.stream.scaladsl.{Keep, Sink, Source}
import akka.stream.{SourceQueue, ActorMaterializer, OverflowStrategy}
import akka.testkit.TestKit
import akka.util.Timeout
import org.scalatest.concurrent.{IntegrationPatience, Eventually}
import scala.collection.immutable
import scala.concurrent.{Future, Await}
import scala.concurrent.duration._
import com.tehasdf.discord.ClientActor.ConnectionState
import org.scalatest._
class ClientActorSpec(_sys: ActorSystem) extends TestKit(_sys)
with FlatSpecLike with BeforeAndAfterAll with ShouldMatchers with Eventually {
implicit val materializer = ActorMaterializer()
implicit val iTimeout = Timeout(5 seconds)
val defaultTimeout = 5 seconds
def this() = this(ActorSystem("discord-akka-test"))
import system.dispatcher
override def afterAll {
TestKit.shutdownActorSystem(system)
}
"Discord Client" should "handle a standard payload" in {
withTrace("/traces/basic.txt") { (queue, client, rest) =>
rest foreach queue.offer
val expected = Set("khionu", "ceezy")
val msg1 = expectMsgAnyClassOf(classOf[Api.Message])
expected should contain(msg1.user)
val msg2 = expectMsgAnyClassOf(classOf[Api.Message])
expected should contain(msg2.user)
val stateF = (client ? Api.GetState).mapTo[ConnectionState]
val state = Await.result(stateF, defaultTimeout)
state.guilds.values.head.presences(BigInt(91377040883744768L).underlying)._2.status shouldBe "idle"
}
}
it should "handle a guild add message" in {
withTrace("/traces/add_membership.txt") { (queue, client, rest) =>
val stateF = (client ? Api.GetState).mapTo[ConnectionState]
val state = Await.result(stateF, defaultTimeout)
state.guilds.values.head.members.get(BigInt(71671589866831872L).underlying) shouldBe None
Await.result(Future.sequence(rest map queue.offer), defaultTimeout)
eventually {
val state2F = (client ? Api.GetState).mapTo[ConnectionState]
val state2 = Await.result(state2F, defaultTimeout)
val membership = state2.guilds.values.head.members(BigInt(71671589866831872L).underlying)
membership.user.username shouldBe "FMendonca"
}
}
}
private def withTrace(filename: String)(f: (SourceQueue[Message], ActorRef, Iterable[Message]) => Unit) = {
val dummyQueue = Source.queue[Message](64, OverflowStrategy.fail).to(Sink.ignore).run()
val clientProps = ClientActor.props(dummyQueue, testActor, "test")
val sink = Sink.actorSubscriber(clientProps)
val src = Source.queue[Message](64, OverflowStrategy.fail)
val (queue, client) = src.toMat(sink)(Keep.both).run()
val messages = readTrace(filename)
val (init, rest) = (messages.head, messages.tail)
queue.offer(init)
expectMsg(Api.Ready)
f(queue, client, rest)
}
private def readTrace(filename: String): immutable.Iterable[Message] = {
immutable.Seq(io.Source.fromInputStream(getClass.getResourceAsStream(filename)).getLines().map(TextMessage(_)).toSeq: _*)
}
}
| eaceaser/discord-akka | src/test/scala/ClientActorSpec.scala | Scala | mit | 3,203 |
package com.twitter.finagle.http.netty
import java.nio.charset.Charset
import org.jboss.netty.buffer._
import org.junit.runner.RunWith
import org.scalatest.FunSuite
import org.scalatest.junit.JUnitRunner
// Test assumptions of how Netty works
@RunWith(classOf[JUnitRunner])
class NettyTest extends FunSuite {
test("compose buffers") {
val bufferA = ChannelBuffers.wrappedBuffer("A".getBytes)
assert(bufferA.readableBytes == 1)
val bufferB = ChannelBuffers.wrappedBuffer("B".getBytes)
assert(bufferB.readableBytes == 1)
val bufferAB = ChannelBuffers.wrappedBuffer(bufferA, bufferB)
assert(bufferAB.readableBytes == 2)
assert(bufferAB.toString(Charset.forName("UTF-8")) == "AB")
val bufferC = ChannelBuffers.wrappedBuffer("C".getBytes)
val bufferABC = ChannelBuffers.wrappedBuffer(bufferAB, bufferC)
assert(bufferABC.readableBytes == 3)
assert(bufferABC.toString(Charset.forName("UTF-8")) == "ABC")
}
test("empty buffers are immutable") {
assert {
try {
ChannelBuffers.EMPTY_BUFFER.writeInt(23)
false
} catch {
case _: IndexOutOfBoundsException => true
case _: Throwable => false
}
}
}
}
| koshelev/finagle | finagle-http/src/test/scala/com/twitter/finagle/http/netty/NettyTest.scala | Scala | apache-2.0 | 1,206 |
/*
* Copyright © 2015-2019 the contributors (see Contributors.md).
*
* This file is part of Knora.
*
* Knora is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Knora is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public
* License along with Knora. If not, see <http://www.gnu.org/licenses/>.
*/
package org.knora.webapi.util
import akka.actor.{ActorRef, Status}
import akka.event.LoggingAdapter
import akka.http.scaladsl.util.FastFuture
import akka.util.Timeout
import org.knora.webapi._
import scala.concurrent.{ExecutionContext, Future}
import scala.reflect.ClassTag
import scala.util.{Failure, Success, Try}
object ActorUtil {
/**
* A convenience function that simplifies and centralises error-handling in the `receive` method of supervised Akka
* actors that expect to receive messages sent using the `ask` pattern.
*
* Such an actor should handle errors by returning a [[Status.Failure]] message to the sender. If this is not done,
* the sender's `ask` times out, and the sender has no way of finding out why. This function ensures that a
* [[Status.Failure]] is sent.
*
* If an error occurs that isn't the client's fault, the actor will also want to report it to the actor's supervisor,
* so the supervisor can carry out its own error-handling policy.
*
* It is also useful to give the sender the original exception object so that a stack trace can be
* logged. However, some exception classes are not serializable and therefore cannot be sent as Akka messages. This
* function ensures that stack traces are logged in those cases.
*
* The function takes as arguments the sender of the `ask` request, and a [[Future]] representing the result
* of processing the request. It first converts the `Future` to a reply message, which it sends back to the sender.
* If the `Future` succeeded, the reply message is the object it contains. If the `Future` failed, the reply message
* is a [[Status.Failure]]. It will contain the original exception if it is serializable; otherwise, the original
* exception is logged, and a [[WrapperException]] is sent instead. The [[WrapperException]] will contain the result
* of calling `toString` on the original exception.
*
* After the reply message is sent, if the `Future` succeeded, or contained a [[RequestRejectedException]],
* nothing more is done. If it contained any other exception, the function triggers the supervisor's error-handling
* policy by throwing whatever exception was returned to the sender.
*
* @param sender the actor that made the request in the `ask` pattern.
* @param future a [[Future]] that will provide the result of the sender's request.
* @param log a [[LoggingAdapter]] for logging non-serializable exceptions.
*/
def future2Message[ReplyT](sender: ActorRef, future: Future[ReplyT], log: LoggingAdapter)(implicit executionContext: ExecutionContext): Unit = {
future.onComplete {
tryObj: Try[ReplyT] =>
try2Message(
sender = sender,
tryObj = tryObj,
log = log
)
}
}
/**
* Like `future2Message`, but takes a `Try` instead of a `Future`.
*
* @param sender the actor that made the request in the `ask` pattern.
* @param tryObj a [[Try]] that will provide the result of the sender's request.
* @param log a [[LoggingAdapter]] for logging non-serializable exceptions.
*/
def try2Message[ReplyT](sender: ActorRef, tryObj: Try[ReplyT], log: LoggingAdapter)(implicit executionContext: ExecutionContext): Unit = {
tryObj match {
case Success(result) => sender ! result
case Failure(e) => e match {
case rejectedEx: RequestRejectedException =>
// The error was the client's fault, so just tell the client.
log.debug("future2Message - rejectedException: {}", rejectedEx)
sender ! akka.actor.Status.Failure(rejectedEx)
case otherEx: Exception =>
// The error wasn't the client's fault. Log the exception, and also
// let the client know.
val exToReport = ExceptionUtil.logAndWrapIfNotSerializable(otherEx, log)
log.debug("future2Message - otherException: {}", exToReport)
sender ! akka.actor.Status.Failure(exToReport)
throw exToReport
case otherThrowable: Throwable =>
// Don't try to recover from a Throwable that isn't an Exception.
throw otherThrowable
}
}
}
/**
* An actor that expects to receive messages sent using the `ask` pattern can use this method to handle
* unexpected request messages in a consistent way.
*
* @param sender the actor that made the request in the `ask` pattern.
* @param message the message that was received.
* @param log a [[LoggingAdapter]].
*/
def handleUnexpectedMessage(sender: ActorRef, message: Any, log: LoggingAdapter, who: String)(implicit executionContext: ExecutionContext): Unit = {
val unexpectedMessageException = UnexpectedMessageException(s"$who received an unexpected message $message of type ${message.getClass.getCanonicalName}")
sender ! akka.actor.Status.Failure(unexpectedMessageException)
}
/**
* Converts a [[Map]] containing futures of values into a future containing a [[Map]] of values.
*
* @param mapToSequence the [[Map]] to be converted.
* @return a future that will provide the results of the futures that were in the [[Map]].
*/
def sequenceFuturesInMap[KeyT: ClassTag, ValueT](mapToSequence: Map[KeyT, Future[ValueT]])(implicit timeout: Timeout, executionContext: ExecutionContext): Future[Map[KeyT, ValueT]] = {
Future.sequence {
mapToSequence.map {
case (key: KeyT, futureValue: Future[ValueT]) =>
futureValue.map {
value => key -> value
}
}
}.map(_.toMap)
}
/**
* Converts a [[Map]] containing futures of sequences into a future containing a [[Map]] containing sequences.
*
* @param mapToSequence the [[Map]] to be converted.
* @return a future that will provide the results of the futures that were in the [[Map]].
*/
def sequenceFutureSeqsInMap[KeyT: ClassTag, ElemT](mapToSequence: Map[KeyT, Future[Seq[ElemT]]])(implicit timeout: Timeout, executionContext: ExecutionContext): Future[Map[KeyT, Seq[ElemT]]] = {
// See http://stackoverflow.com/a/17479415
Future.sequence {
mapToSequence.map {
case (key: KeyT, futureSeq: Future[Seq[ElemT]]) =>
futureSeq.map {
elements: Seq[ElemT] => (key, elements)
}
}
}.map(_.toMap)
}
/**
* Converts a [[Map]] containing sequences of futures into a future containing a [[Map]] containing sequences.
*
* @param mapToSequence the [[Map]] to be converted.
* @return a future that will provide the results of the futures that were in the [[Map]].
*/
def sequenceSeqFuturesInMap[KeyT: ClassTag, ElemT](mapToSequence: Map[KeyT, Seq[Future[ElemT]]])(implicit timeout: Timeout, executionContext: ExecutionContext): Future[Map[KeyT, Seq[ElemT]]] = {
val transformedMap: Map[KeyT, Future[Seq[ElemT]]] = mapToSequence.map {
case (key: KeyT, seqFuture: Seq[Future[ElemT]]) => key -> Future.sequence(seqFuture)
}
sequenceFutureSeqsInMap(transformedMap)
}
/**
* Converts an option containing a future to a future containing an option.
*
* @param optionFuture an option containing a future.
* @return a future containing an option.
*/
def optionFuture2FutureOption[A](optionFuture: Option[Future[A]])(implicit executionContext: ExecutionContext): Future[Option[A]] = {
optionFuture match {
case Some(f) => f.map(Some(_))
case None => Future.successful(None)
}
}
/**
* Runs a sequence of tasks.
*
* @param firstTask the first task in the sequence.
* @tparam T the type of the underlying task result.
* @return the result of the last task in the sequence.
*/
def runTasks[T](firstTask: Task[T])(implicit timeout: Timeout, executionContext: ExecutionContext): Future[TaskResult[T]] = {
runTasksRec(previousResult = None, nextTask = firstTask)
}
/**
* Recursively runs a sequence of tasks.
*
* @param previousResult the previous result or `None` if this is the first task in the sequence.
* @param nextTask the next task to be run.
* @tparam T the type of the underlying task result.
* @return the result of the last task in the sequence.
*/
private def runTasksRec[T](previousResult: Option[TaskResult[T]], nextTask: Task[T])(implicit timeout: Timeout, executionContext: ExecutionContext): Future[TaskResult[T]] = {
for {
taskResult: TaskResult[T] <- nextTask.runTask(previousResult)
recResult: TaskResult[T] <- taskResult.nextTask match {
case Some(definedNextTask) => runTasksRec(previousResult = Some(taskResult), nextTask = definedNextTask)
case None => FastFuture.successful(taskResult)
}
} yield recResult
}
}
/**
* Represents the result of a task in a sequence of tasks.
*
* @tparam T the type of the underlying task result.
*/
trait TaskResult[T] {
/**
* Returns the underlying result of this task.
*/
def underlyingResult: T
/**
* Returns the next task, or `None` if this was the last task.
*/
def nextTask: Option[Task[T]]
}
/**
* Represents a task in a sequence of tasks.
*
* @tparam T the type of the underlying task result.
*/
trait Task[T] {
/**
* Runs the task.
*
* @param previousResult the result of the previous task, or `None` if this is the first task in the sequence.
* @return the result of this task.
*/
def runTask(previousResult: Option[TaskResult[T]])(implicit timeout: Timeout, executionContext: ExecutionContext): Future[TaskResult[T]]
}
| musicEnfanthen/Knora | webapi/src/main/scala/org/knora/webapi/util/ActorUtil.scala | Scala | agpl-3.0 | 11,034 |
/*
* The MIT License (MIT)
*
* Copyright (c) 2014 MineFormers
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package de.mineformers.core.mod
import scala.reflect.ClassTag
/**
* HoldsBlocks
*
* @author PaleoCrafter
*/
trait HoldsBlocks[B <: BlockHolder] extends MFMod {
def initBlocks()(implicit ev: ClassTag[B]): Unit = {
_blocks = ev.runtimeClass.newInstance().asInstanceOf[B]
blocks.init()
}
private var _blocks: B = _
def blocks = _blocks
}
| MineFormers/MFCore | src/main/scala/de/mineformers/core/mod/HoldsBlocks.scala | Scala | mit | 1,503 |
package lila.shutup
/**
* - words are automatically pluralized. "tit" will also match "tits"
* - words are automatically leetified. "tit" will also match "t1t", "t-i-t", and more.
* - words do not partial match. "anal" will NOT match "analysis".
*/
object Dictionary {
def en: List[String] = dict("""
(c|k)oc?k(y|suc?ker|)
(c|k)um(shot|)
(c|k)unt(ing|)
(f+|ph)(u{1,}|a{1,}|e{1,})c?k(er|r|u|k|ed|d|t|ing?|en|tard?|face|off?|)
fck(er|r|u|k|ed|d|t|ing?|tard?|face|off?|)
abortion
adol(f|ph)
anal(plug|sex|)
anus
arse(hole|wipe|)
ass
ass?(hole|fag)
aus?c?hwitz
ball
bastard?
bewb
bimbo
bitche?
blow
blowjob
blumpkin
bollock
boner
boob
bugger
buk?kake
bull?shit
cancer
cawk
chink
choad
clit
clitoris
clown
condom
coon
cooter
cornhole
coward?
crap
cunn?ilingu
dic?k(head|face|suc?ker|)
dildo
dogg?ystyle
dong
douche(bag|)
dyke
(f|ph)ag
(f|ph)agg?ot
fanny
(f|ph)art
foreskin
gangbang
gay
genital
genitalia
gobshite?
gook
gypo
handjob
hell
hitler+
homm?o(sexual|)
honkey
hooker
hore
horny
humping
idiot
incest
jerk
jizz?(um|)
kaffir
kike
labia
lesbo
masturbat(e|ion|ing)
milf
molest
moron
mother
motherfuc?k(er|)
mthrfckr
muff
nazi
negro
nigg?(er|a|ah)
nonce
nutsac?k
pa?edo
pa?edo(f|ph)ile
paki
pecker
pederast
pen(1|i)s
pig
pimp
piss
poof
poon
poop
porn
pric?k
pron
prostitute
punani
puss(i|y|ie)
queef
queer
quim
raped?
rapist
rect(al|um)
retard(ed|)
rimjob
schlong
screw(d|ed|)
scrotum
scum(bag|)
semen
sex
shag
shemale
shit(z|e|y|ty|bag|)
sister
slag
slut
spastic
spaz
sperm
spick
spoo
spooge
spunk
stfu
stripper
stupid
suc?k
taint
tart
terrorist
tit(s|ies|ties|ty)(fuc?k)
tosser
turd
twat
vag
vagin(a|al|)
vibrator
vulva
wanc?k(er|)
wetback
whore?
wog
""")
private def dict(words: String) = words.lines.filter(_.nonEmpty).toList
}
| Happy0/lila | modules/shutup/src/main/Dictionary.scala | Scala | mit | 1,740 |
/*
* Scala (https://www.scala-lang.org)
*
* Copyright EPFL and Lightbend, Inc.
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package scala
package reflect
package internal
import scala.annotation.unchecked.uncheckedStable
import settings.MutableSettings
/** Provides delegates to the reporter doing the actual work.
* All forwarding methods should be marked final,
* but some subclasses out of our reach still override them.
*
* Eventually, this interface should be reduced to one method: `reporter`,
* and clients should indirect themselves (reduce duplication of forwarders).
*/
trait Reporting { self : Positions =>
def reporter: Reporter
def currentRun: RunReporting
trait RunReporting {
val reporting: PerRunReporting = PerRunReporting
}
type PerRunReporting <: PerRunReportingBase
protected def PerRunReporting: PerRunReporting
abstract class PerRunReportingBase {
def deprecationWarning(pos: Position, msg: String, since: String, site: String, origin: String): Unit
/** Have we already supplemented the error message of a compiler crash? */
private[this] var supplementedError = false
def supplementErrorMessage(errorMessage: String): String =
if (supplementedError) errorMessage
else {
supplementedError = true
supplementTyperState(errorMessage)
}
}
// overridden in Global
def supplementTyperState(errorMessage: String): String = errorMessage
def supplementErrorMessage(errorMessage: String) = currentRun.reporting.supplementErrorMessage(errorMessage)
@deprecatedOverriding("This forwards to the corresponding method in reporter -- override reporter instead", "2.11.2")
def inform(msg: String): Unit = inform(NoPosition, msg)
@deprecatedOverriding("This forwards to the corresponding method in reporter -- override reporter instead", "2.11.2")
@deprecated("Use `runReporting.warning` instead", since = "2.13.4")
def warning(msg: String): Unit = warning(NoPosition, msg)
// globalError(msg: String) used to abort -- not sure that was a good idea, so I made it more regular
// (couldn't find any uses that relied on old behavior)
@deprecatedOverriding("This forwards to the corresponding method in reporter -- override reporter instead", "2.11.2")
def globalError(msg: String): Unit = globalError(NoPosition, msg)
def abort(msg: String): Nothing = {
val augmented = supplementErrorMessage(msg)
// Needs to call error to make sure the compile fails.
globalError(augmented)
throw new FatalError(augmented)
}
@deprecatedOverriding("This forwards to the corresponding method in reporter -- override reporter instead", "2.11.2")
def inform(pos: Position, msg: String) = reporter.echo(pos, msg)
@deprecatedOverriding("This forwards to the corresponding method in reporter -- override reporter instead", "2.11.2")
@deprecated("Use `runReporting.warning` instead", since = "2.13.4")
def warning(pos: Position, msg: String) = reporter.warning(pos, msg)
@deprecatedOverriding("This forwards to the corresponding method in reporter -- override reporter instead", "2.11.2")
def globalError(pos: Position, msg: String) = reporter.error(pos, msg)
}
import util.Position
/** Report information, warnings and errors.
*
* This describes the internal interface for issuing information, warnings and errors.
* Implementers of scala.tools.nsc.Reporter such as sbt/ide must define info0.
* Implementers of scala.tools.nsc.FilteringReporter must define its extension point doReport.
*/
abstract class Reporter {
private[this] var _errorCount = 0
private[this] var _warningCount = 0
// sbt source compatibility
final type Severity = Reporter.Severity
@uncheckedStable final def INFO: Severity = Reporter.INFO
@uncheckedStable final def WARNING: Severity = Reporter.WARNING
@uncheckedStable final def ERROR: Severity = Reporter.ERROR
// TODO: rename to `doReport`, remove the `force` parameter.
// Note: `force` is ignored. It used to mean: if `!force`, the reporter may skip INFO messages.
// If `force`, INFO messages were always printed. Now, INFO messages are always printed.
protected def info0(pos: Position, msg: String, severity: Severity, force: Boolean): Unit
/** @return Reporter.Display, or override for Count, Suppress
*/
def filter(pos: Position, msg: String, severity: Severity): Int = Reporter.Display
final def echo(msg: String): Unit = echo(util.NoPosition, msg)
final def echo(pos: Position, msg: String): Unit = if (filter(pos, msg, INFO) == 0) info0(pos, msg, INFO, force = true)
final def warning(pos: Position, msg: String): Unit = filteredInfo(pos, msg, WARNING)
final def error(pos: Position, msg: String): Unit = filteredInfo(pos, msg, ERROR)
private def filteredInfo(pos: Position, msg: String, severity: Severity): Unit = {
val f = filter(pos, msg, severity)
if (f <= 1) increment(severity)
if (f == 0) info0(pos, msg, severity, force = false)
}
def increment(severity: Severity): Unit = severity match {
case _: Reporter.ERROR.type => _errorCount += 1
case _: Reporter.WARNING.type => _warningCount += 1
case _ =>
}
def errorCount: Int = _errorCount
def warningCount: Int = _warningCount
def hasErrors: Boolean = errorCount > 0
def hasWarnings: Boolean = warningCount > 0
def reset(): Unit = {
_errorCount = 0
_warningCount = 0
}
def flush(): Unit = ()
/** Finish reporting: print summaries, release resources. */
def finish(): Unit = ()
/** After reporting, offer advice on getting more details.
* Does not access `this`, but not static because it's overridden in ReplReporterImpl.
*/
def rerunWithDetails(setting: MutableSettings#Setting, name: String): String =
setting.value match {
case b: Boolean if !b => s"; re-run with ${name} for details"
case _ => s"; re-run enabling ${name} for details, or try -help"
}
}
object Reporter {
final val Display = 0 // display and count toward hasWarnings
final val Count = 1 // no display but count toward hasWarnings
final val Suppress = 2 // no display, does not bump count for hasWarnings
sealed abstract class Severity(val id: Int, override val toString: String)
object INFO extends Severity(0, "INFO")
object WARNING extends Severity(1, "WARNING")
object ERROR extends Severity(2, "ERROR")
}
| scala/scala | src/reflect/scala/reflect/internal/Reporting.scala | Scala | apache-2.0 | 6,600 |
/***********************************************************************
* Copyright (c) 2013-2016 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
*************************************************************************/
package org.locationtech.geomesa.tools.accumulo.commands
import java.io._
import java.util.Locale
import java.util.zip.{Deflater, GZIPOutputStream}
import com.beust.jcommander.{JCommander, Parameter, ParameterException, Parameters}
import com.typesafe.scalalogging.LazyLogging
import org.geotools.data.Query
import org.geotools.data.simple.SimpleFeatureCollection
import org.geotools.filter.text.ecql.ECQL
import org.locationtech.geomesa.accumulo.data.AccumuloFeatureStore
import org.locationtech.geomesa.tools.accumulo.Utils.Formats
import org.locationtech.geomesa.tools.accumulo.Utils.Formats._
import org.locationtech.geomesa.tools.accumulo._
import org.locationtech.geomesa.tools.accumulo.commands.ExportCommand.ExportParameters
import org.locationtech.geomesa.tools.common.{FeatureTypeNameParam, OptionalCQLFilterParam}
import org.locationtech.geomesa.utils.geotools.RichSimpleFeatureType.RichSimpleFeatureType
import org.opengis.filter.Filter
import scala.util.{Failure, Success, Try}
class ExportCommand(parent: JCommander) extends CommandWithCatalog(parent) with LazyLogging {
override val command = "export"
override val params = new ExportParameters
override def execute() = {
val fmt = Formats.withName(params.format.toLowerCase(Locale.US))
val features = getFeatureCollection(fmt)
lazy val avroCompression = Option(params.gzip).map(_.toInt).getOrElse(Deflater.DEFAULT_COMPRESSION)
val exporter: FeatureExporter = fmt match {
case CSV | TSV => new DelimitedExport(getWriter(), fmt)
case SHP => new ShapefileExport(checkShpFile())
case GeoJson | JSON => new GeoJsonExport(getWriter())
case GML => new GmlExport(createOutputStream())
case BIN => BinFileExport(createOutputStream(), params)
case AVRO => new AvroExport(createOutputStream(true), features.getSchema, avroCompression)
// shouldn't happen unless someone adds a new format and doesn't implement it here
case _ => throw new UnsupportedOperationException(s"Format $fmt can't be exported")
}
try {
exporter.write(features)
logger.info(s"Feature export complete to ${Option(params.file).map(_.getPath).getOrElse("standard out")}")
} finally {
exporter.flush()
exporter.close()
ds.dispose()
}
}
def getFeatureCollection(fmt: Formats): SimpleFeatureCollection = {
lazy val sft = ds.getSchema(params.featureName)
fmt match {
case SHP =>
val schemaString =
if (params.attributes == null) {
ShapefileExport.modifySchema(sft)
} else {
ShapefileExport.replaceGeomInAttributesString(params.attributes, sft)
}
getFeatureCollection(Some(schemaString))
case BIN =>
sft.getDtgField.foreach(BinFileExport.DEFAULT_TIME = _)
getFeatureCollection(Some(BinFileExport.getAttributeList(params)))
case _ => getFeatureCollection()
}
}
def getFeatureCollection(overrideAttributes: Option[String] = None): SimpleFeatureCollection = {
val filter = Option(params.cqlFilter).map(ECQL.toFilter).getOrElse(Filter.INCLUDE)
logger.debug(s"Applying CQL filter ${filter.toString}")
val q = new Query(params.featureName, filter)
Option(params.maxFeatures).foreach(q.setMaxFeatures(_))
// If there are override attributes given as an arg or via command line params
// split attributes by "," meanwhile allowing to escape it by "\\,".
overrideAttributes.orElse(Option(params.attributes)).foreach { attributes =>
val splitAttrs = attributes.split("""(?<!\\\\),""").map(_.trim.replace("\\\\,", ","))
logger.debug("Attributes used for query transform: " + splitAttrs.mkString("|"))
q.setPropertyNames(splitAttrs)
}
// get the feature store used to query the GeoMesa data
val fs = ds.getFeatureSource(params.featureName).asInstanceOf[AccumuloFeatureStore]
// and execute the query
Try(fs.getFeatures(q)) match {
case Success(fc) => fc
case Failure(ex) =>
throw new Exception("Error: Could not create a SimpleFeatureCollection to export. Please ensure " +
"that all arguments are correct in the previous command.", ex)
}
}
def createOutputStream(skipCompression: Boolean = false): OutputStream = {
val out = if (params.file == null) System.out else new FileOutputStream(params.file)
val compressed = if (skipCompression || params.gzip == null) out else new GZIPOutputStream(out) {
`def`.setLevel(params.gzip) // hack to access the protected deflate level
}
new BufferedOutputStream(compressed)
}
// noinspection AccessorLikeMethodIsEmptyParen
def getWriter(): Writer = new OutputStreamWriter(createOutputStream())
def checkShpFile(): File = {
if (params.file != null) {
params.file
} else {
throw new ParameterException("Error: -o or --output for file-based output is required for " +
"shapefile export (stdout not supported for shape files)")
}
}
}
object ExportCommand {
@Parameters(commandDescription = "Export a GeoMesa feature")
class ExportParameters extends GeoMesaConnectionParams
with FeatureTypeNameParam
with OptionalCQLFilterParam {
@Parameter(names = Array("-F", "--format"), description = "Format to export (csv|tsv|gml|json|shp|bin|avro)")
var format: String = "csv"
@Parameter(names = Array("-m", "--max-features"), description = "Maximum number of features to return. default: Unlimited")
var maxFeatures: Integer = null
@Parameter(names = Array("-a", "--attributes"), description = "Attributes from feature to export " +
"(comma-separated)...Comma-separated expressions with each in the format " +
"attribute[=filter_function_expression]|derived-attribute=filter_function_expression. " +
"filter_function_expression is an expression of filter function applied to attributes, literals " +
"and filter functions, i.e. can be nested")
var attributes: String = null
@Parameter(names = Array("--gzip"), description = "level of gzip compression to apply to output, from 1-9")
var gzip: Integer = null
@Parameter(names = Array("--id-attribute"), description = "name of the id attribute to export")
var idAttribute: String = null
@Parameter(names = Array("--lat-attribute"), description = "name of the latitude attribute to export")
var latAttribute: String = null
@Parameter(names = Array("--lon-attribute"), description = "name of the longitude attribute to export")
var lonAttribute: String = null
@Parameter(names = Array("--dt-attribute"), description = "name of the date attribute to export")
var dateAttribute: String = null
@Parameter(names = Array("--label-attribute"), description = "name of the attribute to use as a bin file label")
var labelAttribute: String = null
@Parameter(names = Array("-o", "--output"), description = "name of the file to output to instead of std out")
var file: File = null
}
}
| mdzimmerman/geomesa | geomesa-tools/src/main/scala/org/locationtech/geomesa/tools/accumulo/commands/ExportCommand.scala | Scala | apache-2.0 | 7,521 |
/*
* Scala (https://www.scala-lang.org)
*
* Copyright EPFL and Lightbend, Inc.
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package scala.async
package run
package exceptions
import scala.async.Async.{async, await}
import scala.concurrent.{Future, ExecutionContext, Await}
import ExecutionContext.Implicits._
import scala.concurrent.duration._
import scala.reflect.ClassTag
import org.junit.Test
class ExceptionsSpec {
@Test
def `uncaught exception within async`(): Unit = {
val fut = async { throw new Exception("problem") }
intercept[Exception] { Await.result(fut, 2.seconds) }
}
@Test
def `uncaught exception within async after await`(): Unit = {
val base = Future { "five!".length }
val fut = async {
val len = await(base)
throw new Exception(s"illegal length: $len")
}
intercept[Exception] { Await.result(fut, 2.seconds) }
}
@Test
def `await failing future within async`(): Unit = {
val base = Future[Int] { throw new Exception("problem") }
val fut = async {
val x = await(base)
x * 2
}
intercept[Exception] { Await.result(fut, 2.seconds) }
}
@Test
def `await failing future within async after await`(): Unit = {
val base = Future[Any] { "five!".length }
val fut = async {
val a = await(base.mapTo[Int]) // result: 5
val b = await((Future { (a * 2).toString }).mapTo[Int]) // result: ClassCastException
val c = await(Future { (7 * 2).toString }) // result: "14"
b + "-" + c
}
intercept[ClassCastException] { Await.result(fut, 2.seconds) }
}
}
| scala/async | src/test/scala/scala/async/run/exceptions/ExceptionsSpec.scala | Scala | bsd-3-clause | 1,789 |
package im.tox.antox.tox
import java.sql.Timestamp
import im.tox.antox.data.State
import im.tox.antox.utils.{Friend, FriendInfo, FriendRequest}
import rx.lang.scala.subjects.BehaviorSubject
object Reactive {
val chatActive = BehaviorSubject[Boolean](false)
val chatActiveSub = chatActive.subscribe(x => State.chatActive(x))
val activeKey = BehaviorSubject[Option[String]](None)
val activeKeySub = activeKey.subscribe(x => State.activeKey(x))
val friendList = BehaviorSubject[Array[Friend]](new Array[Friend](0))
val friendRequests = BehaviorSubject[Array[FriendRequest]](new Array[FriendRequest](0))
val lastMessages = BehaviorSubject[Map[String, (String, Timestamp)]](Map.empty[String, (String, Timestamp)])
val unreadCounts = BehaviorSubject[Map[String, Integer]](Map.empty[String, Integer])
val typing = BehaviorSubject[Boolean](false)
val updatedMessages = BehaviorSubject[Boolean](true)
val friendInfoList = friendList
.combineLatestWith(lastMessages)((fl, lm) => (fl, lm))
.combineLatestWith(unreadCounts)((tup, uc) => {
tup match {
case (fl, lm) => {
fl.map(f => {
val lastMessageTup: Option[(String, Timestamp)] = lm.get(f.clientId)
val unreadCount: Option[Integer] = uc.get(f.clientId)
(lastMessageTup, unreadCount) match {
case (Some((lastMessage, lastMessageTimestamp)), Some(unreadCount)) => {
new FriendInfo(f.isOnline, f.name, f.status, f.statusMessage, f.clientId, lastMessage, lastMessageTimestamp, unreadCount, f.alias)
}
case (Some((lastMessage, lastMessageTimestamp)), None) => {
new FriendInfo(f.isOnline, f.name, f.status, f.statusMessage, f.clientId, lastMessage, lastMessageTimestamp, 0, f.alias)
}
case _ => {
new FriendInfo(f.isOnline, f.name, f.status, f.statusMessage, f.clientId, "", new Timestamp(0, 0, 0, 0, 0, 0, 0), 0, f.alias)
}
}
})
}
}
})
val friendListAndRequests = friendInfoList.combineLatestWith(friendRequests)((fi, fr) => (fi, fr))
}
| 0xPoly/Antox | app/src/main/scala/im/tox/antox/tox/Reactive.scala | Scala | gpl-3.0 | 2,142 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.admin
import joptsimple.OptionParser
import org.I0Itec.zkclient.ZkClient
import kafka.utils._
import scala.collection.Map
import kafka.common.TopicAndPartition
object CheckReassignmentStatus extends Logging {
def main(args: Array[String]): Unit = {
val parser = new OptionParser
val jsonFileOpt = parser.accepts("path-to-json-file", "REQUIRED: The JSON file with the list of partitions and the " +
"new replicas they should be reassigned to")
.withRequiredArg
.describedAs("partition reassignment json file path")
.ofType(classOf[String])
val zkConnectOpt = parser.accepts("zookeeper", "REQUIRED: The connection string for the zookeeper connection in the " +
"form host:port. Multiple URLS can be given to allow fail-over.")
.withRequiredArg
.describedAs("urls")
.ofType(classOf[String])
val options = parser.parse(args : _*)
for(arg <- List(jsonFileOpt, zkConnectOpt)) {
if(!options.has(arg)) {
System.err.println("Missing required argument \"" + arg + "\"")
parser.printHelpOn(System.err)
System.exit(1)
}
}
val jsonFile = options.valueOf(jsonFileOpt)
val zkConnect = options.valueOf(zkConnectOpt)
val jsonString = Utils.readFileAsString(jsonFile)
val zkClient = new ZkClient(zkConnect, 30000, 30000, ZKStringSerializer)
try {
// read the json file into a string
val partitionsToBeReassigned = Json.parseFull(jsonString) match {
case Some(reassignedPartitions) =>
val partitions = reassignedPartitions.asInstanceOf[Array[Map[String, String]]]
partitions.map { m =>
val topic = m.asInstanceOf[Map[String, String]].get("topic").get
val partition = m.asInstanceOf[Map[String, String]].get("partition").get.toInt
val replicasList = m.asInstanceOf[Map[String, String]].get("replicas").get
val newReplicas = replicasList.split(",").map(_.toInt)
(TopicAndPartition(topic, partition), newReplicas.toSeq)
}.toMap
case None => Map.empty[TopicAndPartition, Seq[Int]]
}
val reassignedPartitionsStatus = checkIfReassignmentSucceeded(zkClient, partitionsToBeReassigned)
reassignedPartitionsStatus.foreach { partition =>
partition._2 match {
case ReassignmentCompleted =>
println("Partition %s reassignment completed successfully".format(partition._1))
case ReassignmentFailed =>
println("Partition %s reassignment failed".format(partition._1))
case ReassignmentInProgress =>
println("Partition %s reassignment in progress".format(partition._1))
}
}
}
}
def checkIfReassignmentSucceeded(zkClient: ZkClient, partitionsToBeReassigned: Map[TopicAndPartition, Seq[Int]])
:Map[TopicAndPartition, ReassignmentStatus] = {
val partitionsBeingReassigned = ZkUtils.getPartitionsBeingReassigned(zkClient).mapValues(_.newReplicas)
// for all partitions whose replica reassignment is complete, check the status
partitionsToBeReassigned.map { topicAndPartition =>
(topicAndPartition._1, checkIfPartitionReassignmentSucceeded(zkClient, topicAndPartition._1,
topicAndPartition._2, partitionsToBeReassigned, partitionsBeingReassigned))
}
}
def checkIfPartitionReassignmentSucceeded(zkClient: ZkClient, topicAndPartition: TopicAndPartition,
reassignedReplicas: Seq[Int],
partitionsToBeReassigned: Map[TopicAndPartition, Seq[Int]],
partitionsBeingReassigned: Map[TopicAndPartition, Seq[Int]]): ReassignmentStatus = {
val newReplicas = partitionsToBeReassigned(topicAndPartition)
partitionsBeingReassigned.get(topicAndPartition) match {
case Some(partition) => ReassignmentInProgress
case None =>
// check if AR == RAR
val assignedReplicas = ZkUtils.getReplicasForPartition(zkClient, topicAndPartition.topic, topicAndPartition.partition)
if(assignedReplicas == newReplicas)
ReassignmentCompleted
else
ReassignmentFailed
}
}
} | kavink92/kafka-0.8.0-beta1-src | core/src/main/scala/kafka/admin/CheckReassignmentStatus.scala | Scala | apache-2.0 | 5,027 |
package graphique.backends
import java.nio.file.Path
import graphique.images
import org.apache.commons.codec.digest.DigestUtils
/**
* The single authoritative point for where files should be stored.
*/
class Paths(val pathPrefix: Path) {
/**
* The path to the image.
*/
def ofImage(id: ImageId): Path = pathPrefix resolve id
/**
* The scheme in which the given image is stored in the cache.
*
* The result should be interpreted as the directory path where the images are stored
* and the filename prefix of all the files belonging to the given tag.
*/
def imagePathScheme(imageTag: String): (Path, String) = (pathPrefix, s"$imageTag-")
}
| amrhassan/graphique | src/main/scala/graphique/backends/Paths.scala | Scala | mit | 674 |
/*
* Copyright 2010-2011 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package scalaz.camel.core
import org.scalatest.{WordSpec, BeforeAndAfterAll}
import org.scalatest.matchers.MustMatchers
import scalaz._
import scalaz.concurrent.Strategy._
/**
* @author Martin Krasser
*/
class CamelJettyTest extends CamelTestContext with WordSpec with MustMatchers with BeforeAndAfterAll {
import Scalaz._
processorConcurrencyStrategy = Naive
override def beforeAll = router.start
override def afterAll = router.stop
def support = afterWord("support")
"scalaz.camel.core.Camel" should support {
"non-blocking routing with asynchronous Jetty endpoints" in {
// non-blocking server route with asynchronous CPS processors
// and a Jetty endpoint using Jetty continuations.
from("jetty:http://localhost:8766/test") {
convertBodyToString >=> repeatBody
}
// non-blocking server route with asynchronous CPS processors
// and a Jetty endpoint using an asynchronous HTTP client.
from("direct:test-1") {
to("jetty:http://localhost:8766/test") >=> appendToBody("-done")
}
// the only blocking operation here (waits for an answer)
template.requestBody("direct:test-1", "test") must equal("testtest-done")
}
"passing the latest update of messages to error handlers" in {
// latest update before failure is conversion of body to string
from("jetty:http://localhost:8761/test") {
attempt {
convertBodyToString >=> failWithMessage("failure")
} fallback {
case e: Exception => appendToBody("-handled")
}
}
template.requestBody("http://localhost:8761/test", "test", classOf[String]) must equal ("test-handled")
}
}
} | krasserm/scalaz-camel | scalaz-camel-core/src/test/scala/scalaz/camel/core/CamelJettyTest.scala | Scala | apache-2.0 | 2,325 |
package com.monsanto.arch.cloudformation.model.resource
import com.monsanto.arch.cloudformation.model.{Template, `Fn::GetAtt`, ResourceRef}
import org.scalatest.{FunSpec, Matchers}
import spray.json
import spray.json._
class Route53_UT extends FunSpec with Matchers {
describe("Custom::RemoteRecordSet"){
it ("should serialize as expected") {
val record = `Custom::RemoteRoute53RecordSet`.generalRecord(
"TestRecord",
"TestServiceToken",
"TestDestinationRole",
"TestHostName",
Route53RecordType.CNAME,
"TestZone",
Seq("cnn.com"),
"60")
val expectedJson =
"""
|{
| "Resources": {
| "TestRecord": {
| "Properties": {
| "DestinationRole": "TestDestinationRole",
| "Name": "TestHostName",
| "ServiceToken": "TestServiceToken",
| "HostedZoneName": "TestZone",
| "ResourceRecords": [
| "cnn.com"
| ],
| "TTL": "60",
| "Type": "CNAME"
| },
| "Type": "Custom::RemoteRoute53RecordSet"
| }
| }
|}
""".stripMargin.parseJson
Template.fromResource(record).toJson should be (expectedJson)
}
}
}
| MonsantoCo/cloudformation-template-generator | src/test/scala/com/monsanto/arch/cloudformation/model/resource/Route53_UT.scala | Scala | bsd-3-clause | 1,347 |
/*
* Copyright 2014–2017 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.main
import slamdata.Predef._
import java.io.File
import scala.collection.Seq // uh, yeah
import scalaz.IList
import scalaz.concurrent.Task
import scalaz.syntax.traverse._
sealed trait BackendConfig extends Product with Serializable
/**
* APaths relative to real filesystem root
*/
object BackendConfig {
/**
* This should only be used for testing purposes. It represents a
* configuration in which no backends will be loaded at all.
*/
val Empty: BackendConfig = ExplodedDirs(IList.empty)
def fromBackends(backends: IList[(String, Seq[File])]): Task[BackendConfig] = {
val entriesM = backends traverse {
case (name, paths) =>
Task delay {
ClassName(name) -> ClassPath(IList(paths.filter(_.exists()): _*))
}
}
entriesM.map(BackendConfig.ExplodedDirs(_))
}
/**
* A single directory containing jars, each of which will be
* loaded as a backend. With each jar, the `BackendModule` class
* name will be determined from the `Manifest.mf` file.
*/
final case class JarDirectory(dir: File) extends BackendConfig
/**
* Any files in the classpath will be loaded as jars; any directories
* will be assumed to contain class files (e.g. the target output of
* SBT compile). The class name should be the fully qualified Java
* class name of the `BackendModule` implemented as a Scala object.
* In most cases, this means the class name here will end with a `$`
*/
final case class ExplodedDirs(backends: IList[(ClassName, ClassPath)]) extends BackendConfig
}
| drostron/quasar | interface/src/main/scala/quasar/main/BackendConfig.scala | Scala | apache-2.0 | 2,181 |
package rpgboss.editor.dialog.db
import scala.swing.GridPanel
import scala.swing.Window
import javax.swing.BorderFactory
import rpgboss.editor.Internationalized.getMessage
import rpgboss.editor.Internationalized.getMessageColon
import rpgboss.editor.StateMaster
import rpgboss.editor.dialog.DatabaseDialog
import rpgboss.editor.resourceselector.SpriteField
import rpgboss.editor.uibase.DesignGridPanel
import rpgboss.editor.uibase.SwingUtils.boolField
import rpgboss.editor.uibase.SwingUtils.lbl
import rpgboss.editor.uibase.SwingUtils.textField
import rpgboss.model.Constants
class VehiclesPane(
owner: Window,
sm: StateMaster,
val dbDiag: DatabaseDialog)
extends GridPanel(2, 2)
with DatabasePanel {
def panelName = getMessage("Vehicles")
for (i <- 0 until Constants.NUM_VEHICLES) {
val fPanel = new DesignGridPanel {
border =
BorderFactory.createTitledBorder(getMessage("Vehicle_%d".format(i)))
val model = dbDiag.model.vehicles(i)
val fSprite = new SpriteField(owner, sm, model.sprite, model.sprite = _)
val fName = textField(model.name, model.name = _)
val fCanFly =
boolField(getMessage("Can_Fly"), model.canFly, model.canFly = _)
row().grid(lbl(getMessageColon("Sprite"))).add(fSprite)
row().grid(lbl(getMessageColon("Name"))).add(fName)
row().grid().add(fCanFly)
}
contents += fPanel
}
} | toastythought/rpgboss | editor/src/main/scala/rpgboss/editor/dialog/db/VehiclesPanel.scala | Scala | agpl-3.0 | 1,393 |
/*
* Copyright (C) Lightbend Inc. <https://www.lightbend.com>
*/
package play.it.http.parsing
import java.util.concurrent.TimeUnit
import akka.stream.Materializer
import akka.stream.scaladsl.Source
import akka.util.ByteString
import com.fasterxml.jackson.databind.JsonNode
import com.fasterxml.jackson.databind.ObjectMapper
import org.specs2.execute.Failure
import org.specs2.matcher.Matchers
import play.api.Application
import play.api.Configuration
import play.api.test._
import play.libs.F
import play.mvc.BodyParser
import play.mvc.Http
import play.mvc.Result
import play.test.Helpers
class JacksonJsonBodyParserSpec extends PlaySpecification with Matchers {
// Jackson Json support in Play relies on static
// global variables so these tests must run sequentially
sequential
private def tolerantJsonBodyParser(implicit app: Application): BodyParser[JsonNode] =
app.injector.instanceOf(classOf[BodyParser.TolerantJson])
"The JSON body parser" should {
def parse(json: String)(
implicit mat: Materializer,
app: Application
): F.Either[Result, JsonNode] = {
val encoding: String = "utf-8"
val bodyParser = tolerantJsonBodyParser
val fakeRequest: Http.RequestHeader = Helpers.fakeRequest().header(CONTENT_TYPE, "application/json").build()
await(
bodyParser(fakeRequest).asScala().run(Source.single(ByteString(json.getBytes(encoding))))
)
}
"uses JacksonJsonNodeModule" in new WithApplication() {
private val mapper: ObjectMapper = implicitly[Application].injector.instanceOf[ObjectMapper]
mapper.getRegisteredModuleIds.contains("play.utils.JacksonJsonNodeModule") must_== true
}
"parse a simple JSON body with custom Jackson json-read-features" in new WithApplication(
guiceBuilder =>
guiceBuilder.configure(
"akka.serialization.jackson.play.json-read-features.ALLOW_SINGLE_QUOTES" -> "true"
)
) {
val configuration: Configuration = implicitly[Application].configuration
configuration.get[Boolean]("akka.serialization.jackson.play.json-read-features.ALLOW_SINGLE_QUOTES") must beTrue
val either: F.Either[Result, JsonNode] = parse("""{ 'field1':'value1' }""")
either.left.ifPresent(verboseFailure)
either.right.get().get("field1").asText() must_=== "value1"
}
"parse very deep JSON bodies" in new WithApplication() {
val depth = 50000
private val either: F.Either[Result, JsonNode] = parse(s"""{"foo": ${"[" * depth} "asdf" ${"]" * depth} }""")
private var node: JsonNode = either.right.get().at("/foo")
while (node.isArray) {
node = node.get(0)
}
node.asText() must_== "asdf"
}
}
def verboseFailure(result: Result)(implicit mat: Materializer): Failure = {
val errorMessage = s"""Parse failure. Play-produced error HTML page:
| ${resultToString(result)}
|""".stripMargin
failure(errorMessage)
}
def resultToString(r: Result)(implicit mat: Materializer): String = {
r.body()
.consumeData(mat)
.toCompletableFuture
.get(6, TimeUnit.SECONDS)
.utf8String
}
}
| playframework/playframework | core/play-integration-test/src/it/scala/play/it/http/parsing/JacksonJsonBodyParserSpec.scala | Scala | apache-2.0 | 3,294 |
package net.rrm.ehour.ui.common.chart
import net.rrm.ehour.AbstractSpec
import org.joda.time.{DateTimeConstants, LocalDate}
import com.google.gson.JsonElement
class DateTimeSerializerSpec extends AbstractSpec {
"DateTime Serializer" should {
"serialize localDate's to yyyy-mm-dd format" in {
val element: JsonElement = new DateTimeSerializer().serialize(new LocalDate(2014, DateTimeConstants.MARCH, 14).toDateTimeAtStartOfDay, null, null)
element.getAsString should equal("2014-03-14")
}
}
}
| momogentoo/ehour | eHour-wicketweb/src/test/scala/net/rrm/ehour/ui/common/chart/DateTimeSerializerSpec.scala | Scala | gpl-2.0 | 519 |
package com.gu.management
import management.{ MemoryPoolMXBean, ManagementFactory }
import collection.JavaConversions._
import org.slf4j.{ Logger, LoggerFactory }
object JvmMetrics {
private val logger = LoggerFactory.getLogger("com.gu.management.JvmMetrics")
lazy val all = numThreads.toList ::: totalThreads.toList ::: gcRates ::: memoryUsage
lazy val numThreads =
try {
ManagementFactory.getThreadMXBean.getThreadCount
Some(
new GaugeMetric(
group = "jvm",
name = "num_threads",
title = "Number of active threads",
description = "Number of threads currently active as reported by the jvm",
getValue = () => ManagementFactory.getThreadMXBean.getThreadCount
)
)
} catch {
case e: Exception =>
logger.debug("Failed to initialise active threads metric")
None
}
lazy val totalThreads =
try {
ManagementFactory.getThreadMXBean.getTotalStartedThreadCount
Some(
new GaugeMetric(
group = "jvm",
name = "total_threads",
title = "Thread started threads",
description = "Threads started since the application started as reported by the jvm",
getValue = () => ManagementFactory.getThreadMXBean.getTotalStartedThreadCount
)
)
} catch {
case e: Exception =>
logger.debug("Failed to initialise total threads metric")
None
}
lazy val gcRates =
try {
ManagementFactory.getGarbageCollectorMXBeans.toList map { gc =>
new TimingMetric(
group = "jvm",
name = "gc_" + gc.getName.toLowerCase.replace(' ', '_'),
title = "GC " + gc.getName,
description = "Collection rates for the " + gc.getName + " garbage collector"
) {
override def count = gc.getCollectionCount
override def totalTimeInMillis = gc.getCollectionTime
}
}
} catch {
case e: Exception =>
logger.trace("Failed to initialise gc metrics", e)
Nil
}
lazy val memoryUsage =
try {
ManagementFactory.getMemoryPoolMXBeans().toList flatMap { memPool: MemoryPoolMXBean =>
{
Option(memPool.getUsage) match {
case Some(_) => Some(new GaugeMetric(
group = "jvm",
name = "%s memory usage".format(memPool.getName),
title = "%s Memory Usage".format(memPool.getName),
description = "%s memory usage as percentage of max value".format(memPool.getName),
getValue = () => {
val currentUsage = memPool.getUsage
math.round(currentUsage.getUsed.toFloat / currentUsage.getMax * 100)
}
))
case None => None
}
}
}
} catch {
case e: Exception =>
logger.trace("Failed to initialise memory usage metrics", e)
Nil
}
}
| guardian/guardian-management | management/src/main/scala/com/gu/management/JvmMetrics.scala | Scala | apache-2.0 | 2,951 |
package com.twitter.finagle.client
import com.twitter.finagle._
import com.twitter.finagle.stats.InMemoryStatsReceiver
import com.twitter.finagle.util.{StackRegistry, TestParam, TestParam2}
import com.twitter.util.{Var, Return, Activity, Future, Await}
import com.twitter.util.registry.{GlobalRegistry, SimpleRegistry, Entry}
import com.twitter.conversions.time.intToTimeableNumber
import org.junit.runner.RunWith
import org.mockito.Matchers.anyObject
import org.mockito.Mockito
import org.mockito.Mockito.{never, times, verify, when}
import org.scalatest.{BeforeAndAfter, FunSuite}
import org.scalatest.concurrent.{Eventually, IntegrationPatience}
import org.scalatest.junit.JUnitRunner
import org.scalatest.mock.MockitoSugar
import java.net.SocketAddress
object crtnamer {
val va = Var[Addr](Addr.Pending)
}
class crtnamer extends Namer {
import crtnamer._
def enum(prefix: Path): Activity[Dtab] = Activity.pending
def lookup(path: Path): Activity[NameTree[Name]] = {
Activity(Var.value(Activity.Ok(NameTree.Leaf(Name.Bound(va, new Object())))))
}
}
@RunWith(classOf[JUnitRunner])
class ClientRegistryTest extends FunSuite
with StringClient
with Eventually
with IntegrationPatience
with BeforeAndAfter
with MockitoSugar {
trait Ctx {
val sr = new InMemoryStatsReceiver
val stackClient = stringClient
.configured(param.Stats(sr))
.configured(param.ProtocolLibrary("fancy"))
}
before {
ClientRegistry.clear()
}
test("ClientRegistry.expAllRegisteredClientsResolved zero clients")(new Ctx {
val allResolved0 = ClientRegistry.expAllRegisteredClientsResolved()
assert(allResolved0.poll == Some(Return(Set())))
})
test("ClientRegistry.expAllRegisteredClientsResolved handles Addr.Bound")(new Ctx {
val va = Var[Addr](Addr.Pending)
val c = stackClient.newClient(Name.Bound(va, new Object()), "foo")
val allResolved = ClientRegistry.expAllRegisteredClientsResolved()
assert(allResolved.poll == None)
va() = Addr.Bound(Set.empty[SocketAddress])
eventually {
assert(allResolved.poll == Some(Return(Set("foo"))))
}
})
test("ClientRegistry.expAllRegisteredClientsResolved handles Addr.Failed")(new Ctx {
val va = Var[Addr](Addr.Pending)
val c = stackClient.newClient(Name.Bound(va, new Object()), "foo")
val allResolved = ClientRegistry.expAllRegisteredClientsResolved()
assert(allResolved.poll == None)
va() = Addr.Failed(new Exception("foo"))
eventually {
assert(allResolved.poll == Some(Return(Set("foo"))))
}
})
test("ClientRegistry.expAllRegisteredClientsResolved handles Addr.Neg")(new Ctx {
val va = Var[Addr](Addr.Pending)
val c = stackClient.newClient(Name.Bound(va, new Object()), "foo")
val allResolved = ClientRegistry.expAllRegisteredClientsResolved()
assert(allResolved.poll == None)
va() = Addr.Neg
eventually {
assert(allResolved.poll == Some(Return(Set("foo"))))
}
})
test("ClientRegistry.expAllRegisteredClientsResolved more than one client")(new Ctx {
val va0 = Var[Addr](Addr.Pending)
val va1 = Var[Addr](Addr.Pending)
val c0 = stackClient.newClient(Name.Bound(va0, new Object()), "foo")
val allResolved0 = ClientRegistry.expAllRegisteredClientsResolved()
assert(allResolved0.poll == None)
va0() = Addr.Bound(Set.empty[SocketAddress])
eventually {
assert(allResolved0.poll == Some(Return(Set("foo"))))
}
val c1 = stackClient.newClient(Name.Bound(va1, new Object()), "bar")
val allResolved1 = ClientRegistry.expAllRegisteredClientsResolved()
assert(allResolved1.poll == None)
va1() = Addr.Bound(Set.empty[SocketAddress])
eventually {
assert(allResolved1.poll == Some(Return(Set("foo", "bar"))))
}
})
test("ClientRegistry.expAllRegisteredClientsResolved handles Name.Path")(new Ctx {
val path = Path.read("/$/com.twitter.finagle.client.crtnamer/foo")
val c = stackClient.newClient(Name.Path(path), "foo")
val allResolved = ClientRegistry.expAllRegisteredClientsResolved()
assert(allResolved.poll == None)
crtnamer.va() = Addr.Bound(Set.empty[SocketAddress])
eventually {
assert(allResolved.poll == Some(Return(Set("foo"))))
}
})
test("ClientRegistry registers clients in registry")(new Ctx {
val path = Path.read("/$/com.twitter.finagle.client.crtnamer/foo")
val simple = new SimpleRegistry
GlobalRegistry.withRegistry(simple) {
val c = stackClient.newClient(Name.Path(path), "foo")
val prefix = Seq("client", "fancy", "foo", "/$/com.twitter.finagle.client.crtnamer/foo", "Pool")
val filtered = GlobalRegistry.get.toSet.filter { e =>
e.key.startsWith(prefix)
}
val expected = Seq(
"high" -> "2147483647",
"low" -> "0",
"idleTime" -> "Duration.Top",
"maxWaiters" -> "2147483647"
).map { case (key, value) => Entry(prefix :+ key, value) }
expected.foreach { entry =>
assert(filtered.contains(entry))
}
}
})
// copied from StackRegistryTest
val headRole = Stack.Role("head")
val nameRole = Stack.Role("name")
val param1 = TestParam(999)
def newStack(): Stack[ServiceFactory[Int, Int]] = {
val mockSvc = mock[Service[Int, Int]]
when(mockSvc.apply(anyObject[Int])).thenReturn(Future.value(10))
val factory = ServiceFactory.const(mockSvc)
val stack = new StackBuilder(Stack.Leaf(new Stack.Head {
def role: Stack.Role = headRole
def description: String = "the head!!"
def parameters: Seq[Stack.Param[_]] = Seq(TestParam2.param)
}, factory))
val stackable: Stackable[ServiceFactory[Int, Int]] = new Stack.Module1[TestParam, ServiceFactory[Int, Int]] {
def make(p: TestParam, l: ServiceFactory[Int, Int]): ServiceFactory[Int, Int] = l.map { _.map { _ + p.p1 }}
val description: String = "description"
val role: Stack.Role = nameRole
}
stack.push(stackable)
stack.result
}
test("RegistryEntryLifecycle module registers a Stack and then deregisters it") {
val stk = newStack()
val params = Stack.Params.empty + param1 + param.Label("foo") + param.ProtocolLibrary("fancy")
val simple = new SimpleRegistry()
GlobalRegistry.withRegistry(simple) {
val factory = (RegistryEntryLifecycle.module[Int, Int] +: stk).make(params)
val expected = {
Set(
Entry(Seq("client", "fancy", "foo", "/$/fail", "name", "p1"), "999"),
Entry(Seq("client", "fancy", "foo", "/$/fail", "head", "p2"), "1")
)
}
assert(GlobalRegistry.get.toSet == expected)
Await.result(factory.close())
assert(GlobalRegistry.get.isEmpty)
}
}
test("RegistryEntryLifecycle module cleans up duplicates after service closes") {
val stk = newStack()
val params = Stack.Params.empty + param.Label("foo")
ClientRegistry.register("first", stk, params)
ClientRegistry.register("second", stk, params)
val factory = (RegistryEntryLifecycle.module[Int, Int] +: stk).make(params)
assert(ClientRegistry.registeredDuplicates.size == 2)
assert(ClientRegistry.registeredDuplicates(0).name == "foo")
assert(ClientRegistry.registeredDuplicates(0).addr == "second")
assert(ClientRegistry.registeredDuplicates(1).name == "foo")
assert(ClientRegistry.registeredDuplicates(1).addr == "/$/fail")
factory.close()
assert(ClientRegistry.registeredDuplicates.size == 1)
assert(ClientRegistry.registeredDuplicates(0).name == "foo")
assert(ClientRegistry.registeredDuplicates(0).addr == "/$/fail")
}
}
| liamstewart/finagle | finagle-core/src/test/scala/com/twitter/finagle/client/ClientRegistryTest.scala | Scala | apache-2.0 | 7,614 |
package sangria.validation.rules
import sangria.ast
import sangria.ast.AstVisitorCommand
import sangria.validation._
import scala.collection.mutable.{Set ⇒ MutableSet, ListBuffer}
import scala.language.postfixOps
/**
* No unused fragments
*
* A GraphQL document is only valid if all fragment definitions are spread
* within operations, or spread within other fragments spread within operations.
*/
class NoUnusedFragments extends ValidationRule {
override def visitor(ctx: ValidationContext) = new AstValidatingVisitor {
val fragmentDefs = ListBuffer[ast.FragmentDefinition]()
val operationDefs = ListBuffer[ast.OperationDefinition]()
override val onEnter: ValidationVisit = {
case od: ast.OperationDefinition ⇒
operationDefs += od
AstVisitorCommand.RightSkip
case fd: ast.FragmentDefinition ⇒
fragmentDefs += fd
AstVisitorCommand.RightSkip
}
override def onLeave: ValidationVisit = {
case ast.Document(_, _, _, _) ⇒
val fragmentNameUsed = MutableSet[String]()
operationDefs.foreach(operation ⇒
ctx.documentAnalyzer.getRecursivelyReferencedFragments(operation)
.foreach(fragment ⇒ fragmentNameUsed += fragment.name))
val errors = fragmentDefs.toVector
.filter(fd ⇒ !fragmentNameUsed.contains(fd.name))
.map(fd ⇒ UnusedFragmentViolation(fd.name, ctx.sourceMapper, fd.position.toList))
if (errors.nonEmpty) Left(errors) else AstVisitorCommand.RightContinue
}
}
} | PhilAndrew/JumpMicro | JMSangriaGraphql/src/main/scala/sangria/validation/rules/NoUnusedFragments.scala | Scala | mit | 1,543 |
package models.service.visualization
import models.database.alias.Artist
import models.database.facade.ArtistFacade
import models.database.facade.service._
import models.util.GroupMeasureConversion
import play.api.libs.json.{JsValue, Json}
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
class ServiceData(identifier:Either[Int,String]) extends GroupMeasureConversion {
val serviceAlbumFacadeList:List[ServiceAlbumFacade] = List(SpotifyAlbumFacade, DeezerAlbumFacade, NapsterAlbumFacade)
val serviceArtistFacadeList: List[ServiceArtistTrait] = List(SpotifyArtistFacade, DeezerArtistFacade, NapsterArtistFacade)
def retrieve() = {
val usersArtists = ArtistFacade(identifier).usersFavouriteArtistsWithTrackCountAndScore()
val artistIds = usersArtists.map(_._1.id)
val result:Future[List[(String, Map[Long, Long])]] = Future.sequence {
serviceArtistFacadeList.map { serviceArtistFacade =>
Future {
val (serviceId, counts) = serviceArtistFacade.countArtistsAlbums(artistIds)
(serviceId, toMap(counts))
}
}
}
for {
albumCounts <- result
missingCounts <- Future(missingAlbumCounts(artistIds))
} yield {
val totals = mergeMaps(albumCounts.map(_._2))
val serviceCountJson = albumCounts.foldLeft(Json.obj()) { (accumulated,albumCount) =>
accumulated + (albumCount._1, toJson(albumCount._2))
}
Json.obj(
"user" -> artistsToJson(usersArtists),
"total" -> toJson(totals),
"missing" -> missingCounts
) ++ serviceCountJson
}
}
private def artistsToJson(artists:List[(Artist,Long,Double)]): List[JsValue] = {
artists.map { case (artist,trackCount,score) =>
Json.obj(
"id" -> artist.id,
"name" -> artist.name,
"trackCount" -> trackCount,
"score" -> score
)
}
}
private def missingAlbumCounts(artistIds: List[Long]): JsValue = {
serviceAlbumFacadeList.foldLeft(Json.obj()) { (accumulated, albumFacade) =>
accumulated + (albumFacade.serviceId, Json.toJson(albumFacade(identifier).countMissingUserAlbums(artistIds)))
}
}
}
| haffla/stream-compare | app/models/service/visualization/ServiceData.scala | Scala | gpl-3.0 | 2,184 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.runtime.stream.sql
import org.apache.flink.streaming.api.scala._
import org.apache.flink.table.api.bridge.scala._
import org.apache.flink.table.data.RowData
import org.apache.flink.table.planner.runtime.utils.{StreamingTestBase, TestingAppendRowDataSink}
import org.apache.flink.table.runtime.typeutils.InternalTypeInfo
import org.apache.flink.table.types.logical.{IntType, VarCharType}
import org.junit.Assert._
import org.junit.Test
class ValuesITCase extends StreamingTestBase {
@Test
def testValues(): Unit = {
val sqlQuery = "SELECT * FROM (VALUES (1, 'Bob'), (1, 'Alice')) T(a, b)"
val outputType = InternalTypeInfo.ofFields(
new IntType(),
new VarCharType(5))
val result = tEnv.sqlQuery(sqlQuery).toAppendStream[RowData]
val sink = new TestingAppendRowDataSink(outputType)
result.addSink(sink).setParallelism(1)
env.execute()
val expected = List("+I(1,Alice)", "+I(1,Bob)")
assertEquals(expected.sorted, sink.getAppendResults.sorted)
}
}
| apache/flink | flink-table/flink-table-planner/src/test/scala/org/apache/flink/table/planner/runtime/stream/sql/ValuesITCase.scala | Scala | apache-2.0 | 1,849 |
/*
* Copyright 2001-2015 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest.examples.asyncfunspec.loanfixture
import java.util.concurrent.ConcurrentHashMap
import scala.concurrent.Future
import scala.concurrent.ExecutionContext
object DbServer { // Simulating a database server
type Db = StringBuffer
private final val databases = new ConcurrentHashMap[String, Db]
def createDb(name: String): Db = {
val db = new StringBuffer // java.lang.StringBuffer is thread-safe
databases.put(name, db)
db
}
def removeDb(name: String): Unit = {
databases.remove(name)
}
}
// Defining actor messages
sealed abstract class StringOp
case object Clear extends StringOp
case class Append(value: String) extends StringOp
case object GetValue
class StringActor { // Simulating an actor
private final val sb = new StringBuilder
def !(op: StringOp): Unit =
synchronized {
op match {
case Append(value) => sb.append(value)
case Clear => sb.clear()
}
}
def ?(get: GetValue.type)(implicit c: ExecutionContext): Future[String] =
Future {
synchronized { sb.toString }
}
}
import org.scalatest._
import DbServer._
import java.util.UUID.randomUUID
class ExampleSpec extends AsyncFunSpec {
def withDatabase(testCode: Future[Db] => Future[Assertion]) = {
val dbName = randomUUID.toString // generate a unique db name
val futureDb = Future { createDb(dbName) } // create the fixture
complete {
val futurePopulatedDb =
futureDb map { db =>
db.append("ScalaTest is ") // perform setup
}
testCode(futurePopulatedDb) // "loan" the fixture to the test code
} lastly {
removeDb(dbName) // ensure the fixture will be cleaned up
}
}
def withActor(testCode: StringActor => Future[Assertion]) = {
val actor = new StringActor
complete {
actor ! Append("ScalaTest is ") // set up the fixture
testCode(actor) // "loan" the fixture to the test code
} lastly {
actor ! Clear // ensure the fixture will be cleaned up
}
}
describe("Testing") {
// This test needs the actor fixture
it("should be productive") {
withActor { actor =>
actor ! Append("productive!")
val futureString = actor ? GetValue
futureString map { s =>
assert(s == "ScalaTest is productive!")
}
}
}
}
describe("Test code") {
// This test needs the database fixture
it("should be readable") {
withDatabase { futureDb =>
futureDb map { db =>
db.append("readable!")
assert(db.toString == "ScalaTest is readable!")
}
}
}
// This test needs both the actor and the database
it("should be clear and concise") {
withDatabase { futureDb =>
withActor { actor => // loan-fixture methods compose
actor ! Append("concise!")
val futureString = actor ? GetValue
val futurePair: Future[(Db, String)] =
futureDb zip futureString
futurePair map { case (db, s) =>
db.append("clear!")
assert(db.toString == "ScalaTest is clear!")
assert(s == "ScalaTest is concise!")
}
}
}
}
}
}
| dotty-staging/scalatest | examples/src/test/scala/org/scalatest/examples/asyncfunspec/loanfixture/ExampleSpec.scala | Scala | apache-2.0 | 3,802 |
package vaadin.scala
import vaadin.scala.mixins.FormLayoutMixin
package mixins {
trait FormLayoutMixin extends AbstractOrderedLayoutMixin
}
class FormLayout(override val p: com.vaadin.ui.FormLayout with FormLayoutMixin = new com.vaadin.ui.FormLayout with FormLayoutMixin) extends AbstractOrderedLayout(p) | CloudInABox/scalavaadinutils | src/main/scala/vaadin/scala/FormLayout.scala | Scala | mit | 309 |
package regolic.sat
import scala.collection.mutable.HashMap
import scala.collection.mutable.HashSet
import scala.collection.mutable.Stack
import scala.collection.mutable.ArrayBuffer
object ProofPrinter {
def toDot(inferences: Array[Inference]): String = {
def nodeId(id: Int): String = "v" + id
val infToIndex = inferences.zipWithIndex.toMap
val vertexLabels: Seq[String] = infToIndex.map(p =>
nodeId(p._2) + " [label=\\"" + p._1.clause.mkString(", ") + "\\"];").toSeq
val edges: Seq[String] = infToIndex.flatMap{
case (ResolutionInference(cl, left, right), i) => List(
nodeId(infToIndex(left)) + " -> " + nodeId(i) + ";",
nodeId(infToIndex(right)) + " -> " + nodeId(i) + ";")
case (_, _) => List()
}.toSeq
"digraph proof {\\n" +
vertexLabels.mkString(" ", "\\n ", "\\n") +
edges.mkString(" ", "\\n ", "\\n") + "}"
}
def toString(inferences: Array[Inference]): String = {
val infToIndex = inferences.zipWithIndex.toMap
inferences.zipWithIndex.map{
case (InputInference(cl), i) =>
"[" + i + "] " +
cl.mkString(", ") +
" INPUT"
case (ResolutionInference(cl, left, right), i) =>
"[" + i + "] " +
cl.mkString(", ") +
" RESOL {" + infToIndex(left) + ", " + infToIndex(right) + "}"
}.mkString("\\n")
}
}
| regb/scabolic | src/main/scala/regolic/sat/ProofPrinter.scala | Scala | mit | 1,366 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.internal
import java.util.Locale
import java.util.concurrent.TimeUnit
import org.apache.spark.launcher.SparkLauncher
import org.apache.spark.metrics.GarbageCollectionMetrics
import org.apache.spark.network.shuffle.Constants
import org.apache.spark.network.util.ByteUnit
import org.apache.spark.scheduler.{EventLoggingListener, SchedulingMode}
import org.apache.spark.shuffle.sort.io.LocalDiskShuffleDataIO
import org.apache.spark.storage.{DefaultTopologyMapper, RandomBlockReplicationPolicy}
import org.apache.spark.unsafe.array.ByteArrayMethods
import org.apache.spark.util.Utils
import org.apache.spark.util.collection.unsafe.sort.UnsafeSorterSpillReader.MAX_BUFFER_SIZE_BYTES
package object config {
private[spark] val SPARK_DRIVER_PREFIX = "spark.driver"
private[spark] val SPARK_EXECUTOR_PREFIX = "spark.executor"
private[spark] val SPARK_TASK_PREFIX = "spark.task"
private[spark] val LISTENER_BUS_EVENT_QUEUE_PREFIX = "spark.scheduler.listenerbus.eventqueue"
private[spark] val RESOURCES_DISCOVERY_PLUGIN =
ConfigBuilder("spark.resources.discoveryPlugin")
.doc("Comma-separated list of class names implementing" +
"org.apache.spark.api.resource.ResourceDiscoveryPlugin to load into the application." +
"This is for advanced users to replace the resource discovery class with a " +
"custom implementation. Spark will try each class specified until one of them " +
"returns the resource information for that resource. It tries the discovery " +
"script last if none of the plugins return information for that resource.")
.version("3.0.0")
.stringConf
.toSequence
.createWithDefault(Nil)
private[spark] val DRIVER_RESOURCES_FILE =
ConfigBuilder("spark.driver.resourcesFile")
.internal()
.doc("Path to a file containing the resources allocated to the driver. " +
"The file should be formatted as a JSON array of ResourceAllocation objects. " +
"Only used internally in standalone mode.")
.version("3.0.0")
.stringConf
.createOptional
private[spark] val DRIVER_CLASS_PATH =
ConfigBuilder(SparkLauncher.DRIVER_EXTRA_CLASSPATH)
.version("1.0.0")
.stringConf
.createOptional
private[spark] val DRIVER_JAVA_OPTIONS =
ConfigBuilder(SparkLauncher.DRIVER_EXTRA_JAVA_OPTIONS)
.withPrepended(SparkLauncher.DRIVER_DEFAULT_JAVA_OPTIONS)
.version("1.0.0")
.stringConf
.createOptional
private[spark] val DRIVER_LIBRARY_PATH =
ConfigBuilder(SparkLauncher.DRIVER_EXTRA_LIBRARY_PATH)
.version("1.0.0")
.stringConf
.createOptional
private[spark] val DRIVER_USER_CLASS_PATH_FIRST =
ConfigBuilder("spark.driver.userClassPathFirst")
.version("1.3.0")
.booleanConf
.createWithDefault(false)
private[spark] val DRIVER_CORES = ConfigBuilder("spark.driver.cores")
.doc("Number of cores to use for the driver process, only in cluster mode.")
.version("1.3.0")
.intConf
.createWithDefault(1)
private[spark] val DRIVER_MEMORY = ConfigBuilder(SparkLauncher.DRIVER_MEMORY)
.doc("Amount of memory to use for the driver process, in MiB unless otherwise specified.")
.version("1.1.1")
.bytesConf(ByteUnit.MiB)
.createWithDefaultString("1g")
private[spark] val DRIVER_MEMORY_OVERHEAD = ConfigBuilder("spark.driver.memoryOverhead")
.doc("The amount of non-heap memory to be allocated per driver in cluster mode, " +
"in MiB unless otherwise specified.")
.version("2.3.0")
.bytesConf(ByteUnit.MiB)
.createOptional
private[spark] val DRIVER_LOG_DFS_DIR =
ConfigBuilder("spark.driver.log.dfsDir").version("3.0.0").stringConf.createOptional
private[spark] val DRIVER_LOG_LAYOUT =
ConfigBuilder("spark.driver.log.layout")
.version("3.0.0")
.stringConf
.createOptional
private[spark] val DRIVER_LOG_PERSISTTODFS =
ConfigBuilder("spark.driver.log.persistToDfs.enabled")
.version("3.0.0")
.booleanConf
.createWithDefault(false)
private[spark] val DRIVER_LOG_ALLOW_EC =
ConfigBuilder("spark.driver.log.allowErasureCoding")
.version("3.0.0")
.booleanConf
.createWithDefault(false)
private[spark] val EVENT_LOG_ENABLED = ConfigBuilder("spark.eventLog.enabled")
.version("1.0.0")
.booleanConf
.createWithDefault(false)
private[spark] val EVENT_LOG_DIR = ConfigBuilder("spark.eventLog.dir")
.version("1.0.0")
.stringConf
.createWithDefault(EventLoggingListener.DEFAULT_LOG_DIR)
private[spark] val EVENT_LOG_COMPRESS =
ConfigBuilder("spark.eventLog.compress")
.version("1.0.0")
.booleanConf
.createWithDefault(false)
private[spark] val EVENT_LOG_BLOCK_UPDATES =
ConfigBuilder("spark.eventLog.logBlockUpdates.enabled")
.version("2.3.0")
.booleanConf
.createWithDefault(false)
private[spark] val EVENT_LOG_ALLOW_EC =
ConfigBuilder("spark.eventLog.erasureCoding.enabled")
.version("3.0.0")
.booleanConf
.createWithDefault(false)
private[spark] val EVENT_LOG_TESTING =
ConfigBuilder("spark.eventLog.testing")
.internal()
.version("1.0.1")
.booleanConf
.createWithDefault(false)
private[spark] val EVENT_LOG_OUTPUT_BUFFER_SIZE = ConfigBuilder("spark.eventLog.buffer.kb")
.doc("Buffer size to use when writing to output streams, in KiB unless otherwise specified.")
.version("1.0.0")
.bytesConf(ByteUnit.KiB)
.createWithDefaultString("100k")
private[spark] val EVENT_LOG_STAGE_EXECUTOR_METRICS =
ConfigBuilder("spark.eventLog.logStageExecutorMetrics")
.doc("Whether to write per-stage peaks of executor metrics (for each executor) " +
"to the event log.")
.version("3.0.0")
.booleanConf
.createWithDefault(false)
private[spark] val EVENT_LOG_GC_METRICS_YOUNG_GENERATION_GARBAGE_COLLECTORS =
ConfigBuilder("spark.eventLog.gcMetrics.youngGenerationGarbageCollectors")
.doc("Names of supported young generation garbage collector. A name usually is " +
" the return of GarbageCollectorMXBean.getName. The built-in young generation garbage " +
s"collectors are ${GarbageCollectionMetrics.YOUNG_GENERATION_BUILTIN_GARBAGE_COLLECTORS}")
.version("3.0.0")
.stringConf
.toSequence
.createWithDefault(GarbageCollectionMetrics.YOUNG_GENERATION_BUILTIN_GARBAGE_COLLECTORS)
private[spark] val EVENT_LOG_GC_METRICS_OLD_GENERATION_GARBAGE_COLLECTORS =
ConfigBuilder("spark.eventLog.gcMetrics.oldGenerationGarbageCollectors")
.doc("Names of supported old generation garbage collector. A name usually is " +
"the return of GarbageCollectorMXBean.getName. The built-in old generation garbage " +
s"collectors are ${GarbageCollectionMetrics.OLD_GENERATION_BUILTIN_GARBAGE_COLLECTORS}")
.version("3.0.0")
.stringConf
.toSequence
.createWithDefault(GarbageCollectionMetrics.OLD_GENERATION_BUILTIN_GARBAGE_COLLECTORS)
private[spark] val EVENT_LOG_OVERWRITE =
ConfigBuilder("spark.eventLog.overwrite")
.version("1.0.0")
.booleanConf
.createWithDefault(false)
private[spark] val EVENT_LOG_CALLSITE_LONG_FORM =
ConfigBuilder("spark.eventLog.longForm.enabled")
.version("2.4.0")
.booleanConf
.createWithDefault(false)
private[spark] val EVENT_LOG_ENABLE_ROLLING =
ConfigBuilder("spark.eventLog.rolling.enabled")
.doc("Whether rolling over event log files is enabled. If set to true, it cuts down " +
"each event log file to the configured size.")
.version("3.0.0")
.booleanConf
.createWithDefault(false)
private[spark] val EVENT_LOG_ROLLING_MAX_FILE_SIZE =
ConfigBuilder("spark.eventLog.rolling.maxFileSize")
.doc(s"When ${EVENT_LOG_ENABLE_ROLLING.key}=true, specifies the max size of event log file" +
" to be rolled over.")
.version("3.0.0")
.bytesConf(ByteUnit.BYTE)
.checkValue(_ >= ByteUnit.MiB.toBytes(10), "Max file size of event log should be " +
"configured to be at least 10 MiB.")
.createWithDefaultString("128m")
private[spark] val EXECUTOR_ID =
ConfigBuilder("spark.executor.id").version("1.2.0").stringConf.createOptional
private[spark] val EXECUTOR_CLASS_PATH =
ConfigBuilder(SparkLauncher.EXECUTOR_EXTRA_CLASSPATH)
.version("1.0.0")
.stringConf
.createOptional
private[spark] val EXECUTOR_HEARTBEAT_DROP_ZERO_ACCUMULATOR_UPDATES =
ConfigBuilder("spark.executor.heartbeat.dropZeroAccumulatorUpdates")
.internal()
.version("3.0.0")
.booleanConf
.createWithDefault(true)
private[spark] val EXECUTOR_HEARTBEAT_INTERVAL =
ConfigBuilder("spark.executor.heartbeatInterval")
.version("1.1.0")
.timeConf(TimeUnit.MILLISECONDS)
.createWithDefaultString("10s")
private[spark] val EXECUTOR_HEARTBEAT_MAX_FAILURES =
ConfigBuilder("spark.executor.heartbeat.maxFailures")
.internal()
.version("1.6.2")
.intConf
.createWithDefault(60)
private[spark] val EXECUTOR_PROCESS_TREE_METRICS_ENABLED =
ConfigBuilder("spark.executor.processTreeMetrics.enabled")
.doc("Whether to collect process tree metrics (from the /proc filesystem) when collecting " +
"executor metrics.")
.version("3.0.0")
.booleanConf
.createWithDefault(false)
private[spark] val EXECUTOR_METRICS_POLLING_INTERVAL =
ConfigBuilder("spark.executor.metrics.pollingInterval")
.doc("How often to collect executor metrics (in milliseconds). " +
"If 0, the polling is done on executor heartbeats. " +
"If positive, the polling is done at this interval.")
.version("3.0.0")
.timeConf(TimeUnit.MILLISECONDS)
.createWithDefaultString("0")
private[spark] val EXECUTOR_METRICS_FILESYSTEM_SCHEMES =
ConfigBuilder("spark.executor.metrics.fileSystemSchemes")
.doc("The file system schemes to report in executor metrics.")
.version("3.1.0")
.stringConf
.createWithDefaultString("file,hdfs")
private[spark] val EXECUTOR_JAVA_OPTIONS =
ConfigBuilder(SparkLauncher.EXECUTOR_EXTRA_JAVA_OPTIONS)
.withPrepended(SparkLauncher.EXECUTOR_DEFAULT_JAVA_OPTIONS)
.version("1.0.0")
.stringConf
.createOptional
private[spark] val EXECUTOR_LIBRARY_PATH =
ConfigBuilder(SparkLauncher.EXECUTOR_EXTRA_LIBRARY_PATH)
.version("1.0.0")
.stringConf
.createOptional
private[spark] val EXECUTOR_USER_CLASS_PATH_FIRST =
ConfigBuilder("spark.executor.userClassPathFirst")
.version("1.3.0")
.booleanConf
.createWithDefault(false)
private[spark] val EXECUTOR_CORES = ConfigBuilder(SparkLauncher.EXECUTOR_CORES)
.version("1.0.0")
.intConf
.createWithDefault(1)
private[spark] val EXECUTOR_MEMORY = ConfigBuilder(SparkLauncher.EXECUTOR_MEMORY)
.doc("Amount of memory to use per executor process, in MiB unless otherwise specified.")
.version("0.7.0")
.bytesConf(ByteUnit.MiB)
.createWithDefaultString("1g")
private[spark] val EXECUTOR_MEMORY_OVERHEAD = ConfigBuilder("spark.executor.memoryOverhead")
.doc("The amount of non-heap memory to be allocated per executor, in MiB unless otherwise" +
" specified.")
.version("2.3.0")
.bytesConf(ByteUnit.MiB)
.createOptional
private[spark] val CORES_MAX = ConfigBuilder("spark.cores.max")
.doc("When running on a standalone deploy cluster or a Mesos cluster in coarse-grained " +
"sharing mode, the maximum amount of CPU cores to request for the application from across " +
"the cluster (not from each machine). If not set, the default will be " +
"`spark.deploy.defaultCores` on Spark's standalone cluster manager, or infinite " +
"(all available cores) on Mesos.")
.version("0.6.0")
.intConf
.createOptional
private[spark] val MEMORY_OFFHEAP_ENABLED = ConfigBuilder("spark.memory.offHeap.enabled")
.doc("If true, Spark will attempt to use off-heap memory for certain operations. " +
"If off-heap memory use is enabled, then spark.memory.offHeap.size must be positive.")
.version("1.6.0")
.withAlternative("spark.unsafe.offHeap")
.booleanConf
.createWithDefault(false)
private[spark] val MEMORY_OFFHEAP_SIZE = ConfigBuilder("spark.memory.offHeap.size")
.doc("The absolute amount of memory which can be used for off-heap allocation, " +
" in bytes unless otherwise specified. " +
"This setting has no impact on heap memory usage, so if your executors' total memory " +
"consumption must fit within some hard limit then be sure to shrink your JVM heap size " +
"accordingly. This must be set to a positive value when spark.memory.offHeap.enabled=true.")
.version("1.6.0")
.bytesConf(ByteUnit.BYTE)
.checkValue(_ >= 0, "The off-heap memory size must not be negative")
.createWithDefault(0)
private[spark] val MEMORY_STORAGE_FRACTION = ConfigBuilder("spark.memory.storageFraction")
.doc("Amount of storage memory immune to eviction, expressed as a fraction of the " +
"size of the region set aside by spark.memory.fraction. The higher this is, the " +
"less working memory may be available to execution and tasks may spill to disk more " +
"often. Leaving this at the default value is recommended. ")
.version("1.6.0")
.doubleConf
.checkValue(v => v >= 0.0 && v < 1.0, "Storage fraction must be in [0,1)")
.createWithDefault(0.5)
private[spark] val MEMORY_FRACTION = ConfigBuilder("spark.memory.fraction")
.doc("Fraction of (heap space - 300MB) used for execution and storage. The " +
"lower this is, the more frequently spills and cached data eviction occur. " +
"The purpose of this config is to set aside memory for internal metadata, " +
"user data structures, and imprecise size estimation in the case of sparse, " +
"unusually large records. Leaving this at the default value is recommended. ")
.version("1.6.0")
.doubleConf
.createWithDefault(0.6)
private[spark] val STORAGE_SAFETY_FRACTION = ConfigBuilder("spark.storage.safetyFraction")
.version("1.1.0")
.doubleConf
.createWithDefault(0.9)
private[spark] val STORAGE_UNROLL_MEMORY_THRESHOLD =
ConfigBuilder("spark.storage.unrollMemoryThreshold")
.doc("Initial memory to request before unrolling any block")
.version("1.1.0")
.longConf
.createWithDefault(1024 * 1024)
private[spark] val STORAGE_REPLICATION_PROACTIVE =
ConfigBuilder("spark.storage.replication.proactive")
.doc("Enables proactive block replication for RDD blocks. " +
"Cached RDD block replicas lost due to executor failures are replenished " +
"if there are any existing available replicas. This tries to " +
"get the replication level of the block to the initial number")
.version("2.2.0")
.booleanConf
.createWithDefault(true)
private[spark] val STORAGE_MEMORY_MAP_THRESHOLD =
ConfigBuilder("spark.storage.memoryMapThreshold")
.doc("Size in bytes of a block above which Spark memory maps when " +
"reading a block from disk. " +
"This prevents Spark from memory mapping very small blocks. " +
"In general, memory mapping has high overhead for blocks close to or below " +
"the page size of the operating system.")
.version("0.9.2")
.bytesConf(ByteUnit.BYTE)
.createWithDefaultString("2m")
private[spark] val STORAGE_REPLICATION_POLICY =
ConfigBuilder("spark.storage.replication.policy")
.version("2.1.0")
.stringConf
.createWithDefaultString(classOf[RandomBlockReplicationPolicy].getName)
private[spark] val STORAGE_REPLICATION_TOPOLOGY_MAPPER =
ConfigBuilder("spark.storage.replication.topologyMapper")
.version("2.1.0")
.stringConf
.createWithDefaultString(classOf[DefaultTopologyMapper].getName)
private[spark] val STORAGE_CACHED_PEERS_TTL = ConfigBuilder("spark.storage.cachedPeersTtl")
.version("1.1.1")
.intConf
.createWithDefault(60 * 1000)
private[spark] val STORAGE_MAX_REPLICATION_FAILURE =
ConfigBuilder("spark.storage.maxReplicationFailures")
.version("1.1.1")
.intConf
.createWithDefault(1)
private[spark] val STORAGE_DECOMMISSION_ENABLED =
ConfigBuilder("spark.storage.decommission.enabled")
.doc("Whether to decommission the block manager when decommissioning executor")
.version("3.1.0")
.booleanConf
.createWithDefault(false)
private[spark] val STORAGE_DECOMMISSION_SHUFFLE_BLOCKS_ENABLED =
ConfigBuilder("spark.storage.decommission.shuffleBlocks.enabled")
.doc("Whether to transfer shuffle blocks during block manager decommissioning. Requires " +
"a migratable shuffle resolver (like sort based shuffle)")
.version("3.1.0")
.booleanConf
.createWithDefault(false)
private[spark] val STORAGE_DECOMMISSION_SHUFFLE_MAX_THREADS =
ConfigBuilder("spark.storage.decommission.shuffleBlocks.maxThreads")
.doc("Maximum number of threads to use in migrating shuffle files.")
.version("3.1.0")
.intConf
.checkValue(_ > 0, "The maximum number of threads should be positive")
.createWithDefault(8)
private[spark] val STORAGE_DECOMMISSION_RDD_BLOCKS_ENABLED =
ConfigBuilder("spark.storage.decommission.rddBlocks.enabled")
.doc("Whether to transfer RDD blocks during block manager decommissioning.")
.version("3.1.0")
.booleanConf
.createWithDefault(false)
private[spark] val STORAGE_DECOMMISSION_MAX_REPLICATION_FAILURE_PER_BLOCK =
ConfigBuilder("spark.storage.decommission.maxReplicationFailuresPerBlock")
.internal()
.doc("Maximum number of failures which can be handled for the replication of " +
"one RDD block when block manager is decommissioning and trying to move its " +
"existing blocks.")
.version("3.1.0")
.intConf
.createWithDefault(3)
private[spark] val STORAGE_DECOMMISSION_REPLICATION_REATTEMPT_INTERVAL =
ConfigBuilder("spark.storage.decommission.replicationReattemptInterval")
.internal()
.doc("The interval of time between consecutive cache block replication reattempts " +
"happening on each decommissioning executor (due to storage decommissioning).")
.version("3.1.0")
.timeConf(TimeUnit.MILLISECONDS)
.checkValue(_ > 0, "Time interval between two consecutive attempts of " +
"cache block replication should be positive.")
.createWithDefaultString("30s")
private[spark] val STORAGE_DECOMMISSION_FALLBACK_STORAGE_PATH =
ConfigBuilder("spark.storage.decommission.fallbackStorage.path")
.doc("The location for fallback storage during block manager decommissioning. " +
"For example, `s3a://spark-storage/`. In case of empty, fallback storage is disabled. " +
"The storage should be managed by TTL because Spark will not clean it up.")
.version("3.1.0")
.stringConf
.checkValue(_.endsWith(java.io.File.separator), "Path should end with separator.")
.createOptional
private[spark] val STORAGE_DECOMMISSION_FALLBACK_STORAGE_CLEANUP =
ConfigBuilder("spark.storage.decommission.fallbackStorage.cleanUp")
.doc("If true, Spark cleans up its fallback storage data during shutting down.")
.version("3.2.0")
.booleanConf
.createWithDefault(false)
private[spark] val STORAGE_DECOMMISSION_SHUFFLE_MAX_DISK_SIZE =
ConfigBuilder("spark.storage.decommission.shuffleBlocks.maxDiskSize")
.doc("Maximum disk space to use to store shuffle blocks before rejecting remote " +
"shuffle blocks. Rejecting remote shuffle blocks means that an executor will not receive " +
"any shuffle migrations, and if there are no other executors available for migration " +
"then shuffle blocks will be lost unless " +
s"${STORAGE_DECOMMISSION_FALLBACK_STORAGE_PATH.key} is configured.")
.version("3.2.0")
.bytesConf(ByteUnit.BYTE)
.createOptional
private[spark] val STORAGE_REPLICATION_TOPOLOGY_FILE =
ConfigBuilder("spark.storage.replication.topologyFile")
.version("2.1.0")
.stringConf
.createOptional
private[spark] val STORAGE_EXCEPTION_PIN_LEAK =
ConfigBuilder("spark.storage.exceptionOnPinLeak")
.version("1.6.2")
.booleanConf
.createWithDefault(false)
private[spark] val STORAGE_BLOCKMANAGER_TIMEOUTINTERVAL =
ConfigBuilder("spark.storage.blockManagerTimeoutIntervalMs")
.version("0.7.3")
.timeConf(TimeUnit.MILLISECONDS)
.createWithDefaultString("60s")
private[spark] val STORAGE_BLOCKMANAGER_MASTER_DRIVER_HEARTBEAT_TIMEOUT =
ConfigBuilder("spark.storage.blockManagerMasterDriverHeartbeatTimeoutMs")
.doc("A timeout used for block manager master's driver heartbeat endpoint.")
.version("3.2.0")
.timeConf(TimeUnit.MILLISECONDS)
.createWithDefaultString("10m")
private[spark] val STORAGE_BLOCKMANAGER_HEARTBEAT_TIMEOUT =
ConfigBuilder("spark.storage.blockManagerHeartbeatTimeoutMs")
.version("0.7.0")
.withAlternative("spark.storage.blockManagerSlaveTimeoutMs")
.timeConf(TimeUnit.MILLISECONDS)
.createOptional
private[spark] val STORAGE_CLEANUP_FILES_AFTER_EXECUTOR_EXIT =
ConfigBuilder("spark.storage.cleanupFilesAfterExecutorExit")
.doc("Whether or not cleanup the files not served by the external shuffle service " +
"on executor exits.")
.version("2.4.0")
.booleanConf
.createWithDefault(true)
private[spark] val DISKSTORE_SUB_DIRECTORIES =
ConfigBuilder("spark.diskStore.subDirectories")
.doc("Number of subdirectories inside each path listed in spark.local.dir for " +
"hashing Block files into.")
.version("0.6.0")
.intConf
.checkValue(_ > 0, "The number of subdirectories must be positive.")
.createWithDefault(64)
private[spark] val BLOCK_FAILURES_BEFORE_LOCATION_REFRESH =
ConfigBuilder("spark.block.failures.beforeLocationRefresh")
.doc("Max number of failures before this block manager refreshes " +
"the block locations from the driver.")
.version("2.0.0")
.intConf
.createWithDefault(5)
private[spark] val IS_PYTHON_APP =
ConfigBuilder("spark.yarn.isPython")
.internal()
.version("1.5.0")
.booleanConf
.createWithDefault(false)
private[spark] val CPUS_PER_TASK =
ConfigBuilder("spark.task.cpus").version("0.5.0").intConf.createWithDefault(1)
private[spark] val DYN_ALLOCATION_ENABLED =
ConfigBuilder("spark.dynamicAllocation.enabled")
.version("1.2.0")
.booleanConf
.createWithDefault(false)
private[spark] val DYN_ALLOCATION_TESTING =
ConfigBuilder("spark.dynamicAllocation.testing")
.version("1.2.0")
.booleanConf
.createWithDefault(false)
private[spark] val DYN_ALLOCATION_MIN_EXECUTORS =
ConfigBuilder("spark.dynamicAllocation.minExecutors")
.version("1.2.0")
.intConf
.createWithDefault(0)
private[spark] val DYN_ALLOCATION_INITIAL_EXECUTORS =
ConfigBuilder("spark.dynamicAllocation.initialExecutors")
.version("1.3.0")
.fallbackConf(DYN_ALLOCATION_MIN_EXECUTORS)
private[spark] val DYN_ALLOCATION_MAX_EXECUTORS =
ConfigBuilder("spark.dynamicAllocation.maxExecutors")
.version("1.2.0")
.intConf
.createWithDefault(Int.MaxValue)
private[spark] val DYN_ALLOCATION_EXECUTOR_ALLOCATION_RATIO =
ConfigBuilder("spark.dynamicAllocation.executorAllocationRatio")
.version("2.4.0")
.doubleConf
.createWithDefault(1.0)
private[spark] val DYN_ALLOCATION_CACHED_EXECUTOR_IDLE_TIMEOUT =
ConfigBuilder("spark.dynamicAllocation.cachedExecutorIdleTimeout")
.version("1.4.0")
.timeConf(TimeUnit.SECONDS)
.checkValue(_ >= 0L, "Timeout must be >= 0.")
.createWithDefault(Integer.MAX_VALUE)
private[spark] val DYN_ALLOCATION_EXECUTOR_IDLE_TIMEOUT =
ConfigBuilder("spark.dynamicAllocation.executorIdleTimeout")
.version("1.2.0")
.timeConf(TimeUnit.SECONDS)
.checkValue(_ >= 0L, "Timeout must be >= 0.")
.createWithDefault(60)
private[spark] val DYN_ALLOCATION_SHUFFLE_TRACKING_ENABLED =
ConfigBuilder("spark.dynamicAllocation.shuffleTracking.enabled")
.version("3.0.0")
.booleanConf
.createWithDefault(false)
private[spark] val DYN_ALLOCATION_SHUFFLE_TRACKING_TIMEOUT =
ConfigBuilder("spark.dynamicAllocation.shuffleTracking.timeout")
.version("3.0.0")
.timeConf(TimeUnit.MILLISECONDS)
.checkValue(_ >= 0L, "Timeout must be >= 0.")
.createWithDefault(Long.MaxValue)
private[spark] val DYN_ALLOCATION_SCHEDULER_BACKLOG_TIMEOUT =
ConfigBuilder("spark.dynamicAllocation.schedulerBacklogTimeout")
.version("1.2.0")
.timeConf(TimeUnit.SECONDS).createWithDefault(1)
private[spark] val DYN_ALLOCATION_SUSTAINED_SCHEDULER_BACKLOG_TIMEOUT =
ConfigBuilder("spark.dynamicAllocation.sustainedSchedulerBacklogTimeout")
.version("1.2.0")
.fallbackConf(DYN_ALLOCATION_SCHEDULER_BACKLOG_TIMEOUT)
private[spark] val LEGACY_LOCALITY_WAIT_RESET =
ConfigBuilder("spark.locality.wait.legacyResetOnTaskLaunch")
.doc("Whether to use the legacy behavior of locality wait, which resets the delay timer " +
"anytime a task is scheduled. See Delay Scheduling section of TaskSchedulerImpl's class " +
"documentation for more details.")
.internal()
.version("3.1.0")
.booleanConf
.createWithDefault(false)
private[spark] val LOCALITY_WAIT = ConfigBuilder("spark.locality.wait")
.version("0.5.0")
.timeConf(TimeUnit.MILLISECONDS)
.createWithDefaultString("3s")
private[spark] val SHUFFLE_SERVICE_ENABLED =
ConfigBuilder("spark.shuffle.service.enabled")
.version("1.2.0")
.booleanConf
.createWithDefault(false)
private[spark] val SHUFFLE_SERVICE_FETCH_RDD_ENABLED =
ConfigBuilder(Constants.SHUFFLE_SERVICE_FETCH_RDD_ENABLED)
.doc("Whether to use the ExternalShuffleService for fetching disk persisted RDD blocks. " +
"In case of dynamic allocation if this feature is enabled executors having only disk " +
"persisted blocks are considered idle after " +
"'spark.dynamicAllocation.executorIdleTimeout' and will be released accordingly.")
.version("3.0.0")
.booleanConf
.createWithDefault(false)
private[spark] val SHUFFLE_SERVICE_DB_ENABLED =
ConfigBuilder("spark.shuffle.service.db.enabled")
.doc("Whether to use db in ExternalShuffleService. Note that this only affects " +
"standalone mode.")
.version("3.0.0")
.booleanConf
.createWithDefault(true)
private[spark] val SHUFFLE_SERVICE_PORT =
ConfigBuilder("spark.shuffle.service.port").version("1.2.0").intConf.createWithDefault(7337)
private[spark] val SHUFFLE_SERVICE_NAME =
ConfigBuilder("spark.shuffle.service.name")
.doc("The configured name of the Spark shuffle service the client should communicate with. " +
"This must match the name used to configure the Shuffle within the YARN NodeManager " +
"configuration (`yarn.nodemanager.aux-services`). Only takes effect when " +
s"$SHUFFLE_SERVICE_ENABLED is set to true.")
.version("3.2.0")
.stringConf
.createWithDefault("spark_shuffle")
private[spark] val KEYTAB = ConfigBuilder("spark.kerberos.keytab")
.doc("Location of user's keytab.")
.version("3.0.0")
.stringConf.createOptional
private[spark] val PRINCIPAL = ConfigBuilder("spark.kerberos.principal")
.doc("Name of the Kerberos principal.")
.version("3.0.0")
.stringConf
.createOptional
private[spark] val KERBEROS_RELOGIN_PERIOD = ConfigBuilder("spark.kerberos.relogin.period")
.version("3.0.0")
.timeConf(TimeUnit.SECONDS)
.createWithDefaultString("1m")
private[spark] val KERBEROS_RENEWAL_CREDENTIALS =
ConfigBuilder("spark.kerberos.renewal.credentials")
.doc(
"Which credentials to use when renewing delegation tokens for executors. Can be either " +
"'keytab', the default, which requires a keytab to be provided, or 'ccache', which uses " +
"the local credentials cache.")
.version("3.0.0")
.stringConf
.checkValues(Set("keytab", "ccache"))
.createWithDefault("keytab")
private[spark] val KERBEROS_FILESYSTEMS_TO_ACCESS =
ConfigBuilder("spark.kerberos.access.hadoopFileSystems")
.doc("Extra Hadoop filesystem URLs for which to request delegation tokens. The filesystem " +
"that hosts fs.defaultFS does not need to be listed here.")
.version("3.0.0")
.stringConf
.toSequence
.createWithDefault(Nil)
private[spark] val YARN_KERBEROS_FILESYSTEM_RENEWAL_EXCLUDE =
ConfigBuilder("spark.yarn.kerberos.renewal.excludeHadoopFileSystems")
.doc("The list of Hadoop filesystem URLs whose hosts will be excluded from " +
"delegation token renewal at resource scheduler. Currently this is known to " +
"work under YARN, so YARN Resource Manager won't renew tokens for the application. " +
"Note that as resource scheduler does not renew token, so any application running " +
"longer than the original token expiration that tries to use that token will likely fail.")
.version("3.2.0")
.stringConf
.toSequence
.createWithDefault(Nil)
private[spark] val EXECUTOR_INSTANCES = ConfigBuilder("spark.executor.instances")
.version("1.0.0")
.intConf
.createOptional
private[spark] val PY_FILES = ConfigBuilder("spark.yarn.dist.pyFiles")
.internal()
.version("2.2.1")
.stringConf
.toSequence
.createWithDefault(Nil)
private[spark] val TASK_MAX_DIRECT_RESULT_SIZE =
ConfigBuilder("spark.task.maxDirectResultSize")
.version("2.0.0")
.bytesConf(ByteUnit.BYTE)
.createWithDefault(1L << 20)
private[spark] val TASK_MAX_FAILURES =
ConfigBuilder("spark.task.maxFailures")
.version("0.8.0")
.intConf
.createWithDefault(4)
private[spark] val TASK_REAPER_ENABLED =
ConfigBuilder("spark.task.reaper.enabled")
.version("2.0.3")
.booleanConf
.createWithDefault(false)
private[spark] val TASK_REAPER_KILL_TIMEOUT =
ConfigBuilder("spark.task.reaper.killTimeout")
.version("2.0.3")
.timeConf(TimeUnit.MILLISECONDS)
.createWithDefault(-1)
private[spark] val TASK_REAPER_POLLING_INTERVAL =
ConfigBuilder("spark.task.reaper.pollingInterval")
.version("2.0.3")
.timeConf(TimeUnit.MILLISECONDS)
.createWithDefaultString("10s")
private[spark] val TASK_REAPER_THREAD_DUMP =
ConfigBuilder("spark.task.reaper.threadDump")
.version("2.0.3")
.booleanConf
.createWithDefault(true)
private[spark] val EXCLUDE_ON_FAILURE_ENABLED =
ConfigBuilder("spark.excludeOnFailure.enabled")
.version("3.1.0")
.withAlternative("spark.blacklist.enabled")
.booleanConf
.createOptional
private[spark] val MAX_TASK_ATTEMPTS_PER_EXECUTOR =
ConfigBuilder("spark.excludeOnFailure.task.maxTaskAttemptsPerExecutor")
.version("3.1.0")
.withAlternative("spark.blacklist.task.maxTaskAttemptsPerExecutor")
.intConf
.createWithDefault(1)
private[spark] val MAX_TASK_ATTEMPTS_PER_NODE =
ConfigBuilder("spark.excludeOnFailure.task.maxTaskAttemptsPerNode")
.version("3.1.0")
.withAlternative("spark.blacklist.task.maxTaskAttemptsPerNode")
.intConf
.createWithDefault(2)
private[spark] val MAX_FAILURES_PER_EXEC =
ConfigBuilder("spark.excludeOnFailure.application.maxFailedTasksPerExecutor")
.version("3.1.0")
.withAlternative("spark.blacklist.application.maxFailedTasksPerExecutor")
.intConf
.createWithDefault(2)
private[spark] val MAX_FAILURES_PER_EXEC_STAGE =
ConfigBuilder("spark.excludeOnFailure.stage.maxFailedTasksPerExecutor")
.version("3.1.0")
.withAlternative("spark.blacklist.stage.maxFailedTasksPerExecutor")
.intConf
.createWithDefault(2)
private[spark] val MAX_FAILED_EXEC_PER_NODE =
ConfigBuilder("spark.excludeOnFailure.application.maxFailedExecutorsPerNode")
.version("3.1.0")
.withAlternative("spark.blacklist.application.maxFailedExecutorsPerNode")
.intConf
.createWithDefault(2)
private[spark] val MAX_FAILED_EXEC_PER_NODE_STAGE =
ConfigBuilder("spark.excludeOnFailure.stage.maxFailedExecutorsPerNode")
.version("3.1.0")
.withAlternative("spark.blacklist.stage.maxFailedExecutorsPerNode")
.intConf
.createWithDefault(2)
private[spark] val EXCLUDE_ON_FAILURE_TIMEOUT_CONF =
ConfigBuilder("spark.excludeOnFailure.timeout")
.version("3.1.0")
.withAlternative("spark.blacklist.timeout")
.timeConf(TimeUnit.MILLISECONDS)
.createOptional
private[spark] val EXCLUDE_ON_FAILURE_KILL_ENABLED =
ConfigBuilder("spark.excludeOnFailure.killExcludedExecutors")
.version("3.1.0")
.withAlternative("spark.blacklist.killBlacklistedExecutors")
.booleanConf
.createWithDefault(false)
private[spark] val EXCLUDE_ON_FAILURE_DECOMMISSION_ENABLED =
ConfigBuilder("spark.excludeOnFailure.killExcludedExecutors.decommission")
.doc("Attempt decommission of excluded nodes instead of going directly to kill")
.version("3.2.0")
.booleanConf
.createWithDefault(false)
private[spark] val EXCLUDE_ON_FAILURE_LEGACY_TIMEOUT_CONF =
ConfigBuilder("spark.scheduler.executorTaskExcludeOnFailureTime")
.internal()
.version("3.1.0")
.withAlternative("spark.scheduler.executorTaskBlacklistTime")
.timeConf(TimeUnit.MILLISECONDS)
.createOptional
private[spark] val EXCLUDE_ON_FAILURE_FETCH_FAILURE_ENABLED =
ConfigBuilder("spark.excludeOnFailure.application.fetchFailure.enabled")
.version("3.1.0")
.withAlternative("spark.blacklist.application.fetchFailure.enabled")
.booleanConf
.createWithDefault(false)
private[spark] val UNREGISTER_OUTPUT_ON_HOST_ON_FETCH_FAILURE =
ConfigBuilder("spark.files.fetchFailure.unRegisterOutputOnHost")
.doc("Whether to un-register all the outputs on the host in condition that we receive " +
" a FetchFailure. This is set default to false, which means, we only un-register the " +
" outputs related to the exact executor(instead of the host) on a FetchFailure.")
.version("2.3.0")
.booleanConf
.createWithDefault(false)
private[spark] val LISTENER_BUS_EVENT_QUEUE_CAPACITY =
ConfigBuilder("spark.scheduler.listenerbus.eventqueue.capacity")
.doc("The default capacity for event queues. Spark will try to initialize " +
"an event queue using capacity specified by `spark.scheduler.listenerbus" +
".eventqueue.queueName.capacity` first. If it's not configured, Spark will " +
"use the default capacity specified by this config.")
.version("2.3.0")
.intConf
.checkValue(_ > 0, "The capacity of listener bus event queue must be positive")
.createWithDefault(10000)
private[spark] val LISTENER_BUS_METRICS_MAX_LISTENER_CLASSES_TIMED =
ConfigBuilder("spark.scheduler.listenerbus.metrics.maxListenerClassesTimed")
.internal()
.version("2.3.0")
.intConf
.createWithDefault(128)
private[spark] val LISTENER_BUS_LOG_SLOW_EVENT_ENABLED =
ConfigBuilder("spark.scheduler.listenerbus.logSlowEvent")
.internal()
.doc("When enabled, log the event that takes too much time to process. This helps us " +
"discover the event types that cause performance bottlenecks. The time threshold is " +
"controlled by spark.scheduler.listenerbus.logSlowEvent.threshold.")
.version("3.0.0")
.booleanConf
.createWithDefault(true)
private[spark] val LISTENER_BUS_LOG_SLOW_EVENT_TIME_THRESHOLD =
ConfigBuilder("spark.scheduler.listenerbus.logSlowEvent.threshold")
.internal()
.doc("The time threshold of whether a event is considered to be taking too much time to " +
s"process. Log the event if ${LISTENER_BUS_LOG_SLOW_EVENT_ENABLED.key} is true.")
.version("3.0.0")
.timeConf(TimeUnit.NANOSECONDS)
.createWithDefaultString("1s")
// This property sets the root namespace for metrics reporting
private[spark] val METRICS_NAMESPACE = ConfigBuilder("spark.metrics.namespace")
.version("2.1.0")
.stringConf
.createOptional
private[spark] val METRICS_CONF = ConfigBuilder("spark.metrics.conf")
.version("0.8.0")
.stringConf
.createOptional
private[spark] val METRICS_EXECUTORMETRICS_SOURCE_ENABLED =
ConfigBuilder("spark.metrics.executorMetricsSource.enabled")
.doc("Whether to register the ExecutorMetrics source with the metrics system.")
.version("3.0.0")
.booleanConf
.createWithDefault(true)
private[spark] val METRICS_STATIC_SOURCES_ENABLED =
ConfigBuilder("spark.metrics.staticSources.enabled")
.doc("Whether to register static sources with the metrics system.")
.version("3.0.0")
.booleanConf
.createWithDefault(true)
private[spark] val PYSPARK_DRIVER_PYTHON = ConfigBuilder("spark.pyspark.driver.python")
.version("2.1.0")
.stringConf
.createOptional
private[spark] val PYSPARK_PYTHON = ConfigBuilder("spark.pyspark.python")
.version("2.1.0")
.stringConf
.createOptional
// To limit how many applications are shown in the History Server summary ui
private[spark] val HISTORY_UI_MAX_APPS =
ConfigBuilder("spark.history.ui.maxApplications")
.version("2.0.1")
.intConf
.createWithDefault(Integer.MAX_VALUE)
private[spark] val IO_ENCRYPTION_ENABLED = ConfigBuilder("spark.io.encryption.enabled")
.version("2.1.0")
.booleanConf
.createWithDefault(false)
private[spark] val IO_ENCRYPTION_KEYGEN_ALGORITHM =
ConfigBuilder("spark.io.encryption.keygen.algorithm")
.version("2.1.0")
.stringConf
.createWithDefault("HmacSHA1")
private[spark] val IO_ENCRYPTION_KEY_SIZE_BITS = ConfigBuilder("spark.io.encryption.keySizeBits")
.version("2.1.0")
.intConf
.checkValues(Set(128, 192, 256))
.createWithDefault(128)
private[spark] val IO_CRYPTO_CIPHER_TRANSFORMATION =
ConfigBuilder("spark.io.crypto.cipher.transformation")
.internal()
.version("2.1.0")
.stringConf
.createWithDefaultString("AES/CTR/NoPadding")
private[spark] val DRIVER_HOST_ADDRESS = ConfigBuilder("spark.driver.host")
.doc("Address of driver endpoints.")
.version("0.7.0")
.stringConf
.createWithDefault(Utils.localCanonicalHostName())
private[spark] val DRIVER_PORT = ConfigBuilder("spark.driver.port")
.doc("Port of driver endpoints.")
.version("0.7.0")
.intConf
.createWithDefault(0)
private[spark] val DRIVER_SUPERVISE = ConfigBuilder("spark.driver.supervise")
.doc("If true, restarts the driver automatically if it fails with a non-zero exit status. " +
"Only has effect in Spark standalone mode or Mesos cluster deploy mode.")
.version("1.3.0")
.booleanConf
.createWithDefault(false)
private[spark] val DRIVER_BIND_ADDRESS = ConfigBuilder("spark.driver.bindAddress")
.doc("Address where to bind network listen sockets on the driver.")
.version("2.1.0")
.fallbackConf(DRIVER_HOST_ADDRESS)
private[spark] val BLOCK_MANAGER_PORT = ConfigBuilder("spark.blockManager.port")
.doc("Port to use for the block manager when a more specific setting is not provided.")
.version("1.1.0")
.intConf
.createWithDefault(0)
private[spark] val DRIVER_BLOCK_MANAGER_PORT = ConfigBuilder("spark.driver.blockManager.port")
.doc("Port to use for the block manager on the driver.")
.version("2.1.0")
.fallbackConf(BLOCK_MANAGER_PORT)
private[spark] val IGNORE_CORRUPT_FILES = ConfigBuilder("spark.files.ignoreCorruptFiles")
.doc("Whether to ignore corrupt files. If true, the Spark jobs will continue to run when " +
"encountering corrupted or non-existing files and contents that have been read will still " +
"be returned.")
.version("2.1.0")
.booleanConf
.createWithDefault(false)
private[spark] val IGNORE_MISSING_FILES = ConfigBuilder("spark.files.ignoreMissingFiles")
.doc("Whether to ignore missing files. If true, the Spark jobs will continue to run when " +
"encountering missing files and the contents that have been read will still be returned.")
.version("2.4.0")
.booleanConf
.createWithDefault(false)
private[spark] val APP_CALLER_CONTEXT = ConfigBuilder("spark.log.callerContext")
.version("2.2.0")
.stringConf
.createOptional
private[spark] val FILES_MAX_PARTITION_BYTES = ConfigBuilder("spark.files.maxPartitionBytes")
.doc("The maximum number of bytes to pack into a single partition when reading files.")
.version("2.1.0")
.bytesConf(ByteUnit.BYTE)
.createWithDefault(128 * 1024 * 1024)
private[spark] val FILES_OPEN_COST_IN_BYTES = ConfigBuilder("spark.files.openCostInBytes")
.doc("The estimated cost to open a file, measured by the number of bytes could be scanned in" +
" the same time. This is used when putting multiple files into a partition. It's better to" +
" over estimate, then the partitions with small files will be faster than partitions with" +
" bigger files.")
.version("2.1.0")
.bytesConf(ByteUnit.BYTE)
.createWithDefault(4 * 1024 * 1024)
private[spark] val HADOOP_RDD_IGNORE_EMPTY_SPLITS =
ConfigBuilder("spark.hadoopRDD.ignoreEmptySplits")
.internal()
.doc("When true, HadoopRDD/NewHadoopRDD will not create partitions for empty input splits.")
.version("2.3.0")
.booleanConf
.createWithDefault(true)
private[spark] val SECRET_REDACTION_PATTERN =
ConfigBuilder("spark.redaction.regex")
.doc("Regex to decide which Spark configuration properties and environment variables in " +
"driver and executor environments contain sensitive information. When this regex matches " +
"a property key or value, the value is redacted from the environment UI and various logs " +
"like YARN and event logs.")
.version("2.1.2")
.regexConf
.createWithDefault("(?i)secret|password|token|access[.]key".r)
private[spark] val STRING_REDACTION_PATTERN =
ConfigBuilder("spark.redaction.string.regex")
.doc("Regex to decide which parts of strings produced by Spark contain sensitive " +
"information. When this regex matches a string part, that string part is replaced by a " +
"dummy value. This is currently used to redact the output of SQL explain commands.")
.version("2.2.0")
.regexConf
.createOptional
private[spark] val AUTH_SECRET =
ConfigBuilder("spark.authenticate.secret")
.version("1.0.0")
.stringConf
.createOptional
private[spark] val AUTH_SECRET_BIT_LENGTH =
ConfigBuilder("spark.authenticate.secretBitLength")
.version("1.6.0")
.intConf
.createWithDefault(256)
private[spark] val NETWORK_AUTH_ENABLED =
ConfigBuilder("spark.authenticate")
.version("1.0.0")
.booleanConf
.createWithDefault(false)
private[spark] val SASL_ENCRYPTION_ENABLED =
ConfigBuilder("spark.authenticate.enableSaslEncryption")
.version("1.4.0")
.booleanConf
.createWithDefault(false)
private[spark] val AUTH_SECRET_FILE =
ConfigBuilder("spark.authenticate.secret.file")
.doc("Path to a file that contains the authentication secret to use. The secret key is " +
"loaded from this path on both the driver and the executors if overrides are not set for " +
"either entity (see below). File-based secret keys are only allowed when using " +
"Kubernetes.")
.version("3.0.0")
.stringConf
.createOptional
private[spark] val AUTH_SECRET_FILE_DRIVER =
ConfigBuilder("spark.authenticate.secret.driver.file")
.doc("Path to a file that contains the authentication secret to use. Loaded by the " +
"driver. In Kubernetes client mode it is often useful to set a different secret " +
"path for the driver vs. the executors, since the driver may not be running in " +
"a pod unlike the executors. If this is set, an accompanying secret file must " +
"be specified for the executors. The fallback configuration allows the same path to be " +
"used for both the driver and the executors when running in cluster mode. File-based " +
"secret keys are only allowed when using Kubernetes.")
.version("3.0.0")
.fallbackConf(AUTH_SECRET_FILE)
private[spark] val AUTH_SECRET_FILE_EXECUTOR =
ConfigBuilder("spark.authenticate.secret.executor.file")
.doc("Path to a file that contains the authentication secret to use. Loaded by the " +
"executors only. In Kubernetes client mode it is often useful to set a different " +
"secret path for the driver vs. the executors, since the driver may not be running " +
"in a pod unlike the executors. If this is set, an accompanying secret file must be " +
"specified for the executors. The fallback configuration allows the same path to be " +
"used for both the driver and the executors when running in cluster mode. File-based " +
"secret keys are only allowed when using Kubernetes.")
.version("3.0.0")
.fallbackConf(AUTH_SECRET_FILE)
private[spark] val BUFFER_WRITE_CHUNK_SIZE =
ConfigBuilder("spark.buffer.write.chunkSize")
.internal()
.doc("The chunk size in bytes during writing out the bytes of ChunkedByteBuffer.")
.version("2.3.0")
.bytesConf(ByteUnit.BYTE)
.checkValue(_ <= ByteArrayMethods.MAX_ROUNDED_ARRAY_LENGTH,
"The chunk size during writing out the bytes of ChunkedByteBuffer should" +
s" be less than or equal to ${ByteArrayMethods.MAX_ROUNDED_ARRAY_LENGTH}.")
.createWithDefault(64 * 1024 * 1024)
private[spark] val CHECKPOINT_COMPRESS =
ConfigBuilder("spark.checkpoint.compress")
.doc("Whether to compress RDD checkpoints. Generally a good idea. Compression will use " +
"spark.io.compression.codec.")
.version("2.2.0")
.booleanConf
.createWithDefault(false)
private[spark] val CACHE_CHECKPOINT_PREFERRED_LOCS_EXPIRE_TIME =
ConfigBuilder("spark.rdd.checkpoint.cachePreferredLocsExpireTime")
.internal()
.doc("Expire time in minutes for caching preferred locations of checkpointed RDD." +
"Caching preferred locations can relieve query loading to DFS and save the query " +
"time. The drawback is that the cached locations can be possibly outdated and " +
"lose data locality. If this config is not specified, it will not cache.")
.version("3.0.0")
.timeConf(TimeUnit.MINUTES)
.checkValue(_ > 0, "The expire time for caching preferred locations cannot be non-positive.")
.createOptional
private[spark] val SHUFFLE_ACCURATE_BLOCK_THRESHOLD =
ConfigBuilder("spark.shuffle.accurateBlockThreshold")
.doc("Threshold in bytes above which the size of shuffle blocks in " +
"HighlyCompressedMapStatus is accurately recorded. This helps to prevent OOM " +
"by avoiding underestimating shuffle block size when fetch shuffle blocks.")
.version("2.2.1")
.bytesConf(ByteUnit.BYTE)
.createWithDefault(100 * 1024 * 1024)
private[spark] val SHUFFLE_REGISTRATION_TIMEOUT =
ConfigBuilder("spark.shuffle.registration.timeout")
.doc("Timeout in milliseconds for registration to the external shuffle service.")
.version("2.3.0")
.timeConf(TimeUnit.MILLISECONDS)
.createWithDefault(5000)
private[spark] val SHUFFLE_REGISTRATION_MAX_ATTEMPTS =
ConfigBuilder("spark.shuffle.registration.maxAttempts")
.doc("When we fail to register to the external shuffle service, we will " +
"retry for maxAttempts times.")
.version("2.3.0")
.intConf
.createWithDefault(3)
private[spark] val SHUFFLE_MAX_ATTEMPTS_ON_NETTY_OOM =
ConfigBuilder("spark.shuffle.maxAttemptsOnNettyOOM")
.doc("The max attempts of a shuffle block would retry on Netty OOM issue before throwing " +
"the shuffle fetch failure.")
.version("3.2.0")
.internal()
.intConf
.createWithDefault(10)
private[spark] val REDUCER_MAX_BLOCKS_IN_FLIGHT_PER_ADDRESS =
ConfigBuilder("spark.reducer.maxBlocksInFlightPerAddress")
.doc("This configuration limits the number of remote blocks being fetched per reduce task " +
"from a given host port. When a large number of blocks are being requested from a given " +
"address in a single fetch or simultaneously, this could crash the serving executor or " +
"Node Manager. This is especially useful to reduce the load on the Node Manager when " +
"external shuffle is enabled. You can mitigate the issue by setting it to a lower value.")
.version("2.2.1")
.intConf
.checkValue(_ > 0, "The max no. of blocks in flight cannot be non-positive.")
.createWithDefault(Int.MaxValue)
private[spark] val MAX_REMOTE_BLOCK_SIZE_FETCH_TO_MEM =
ConfigBuilder("spark.network.maxRemoteBlockSizeFetchToMem")
.doc("Remote block will be fetched to disk when size of the block is above this threshold " +
"in bytes. This is to avoid a giant request takes too much memory. Note this " +
"configuration will affect both shuffle fetch and block manager remote block fetch. " +
"For users who enabled external shuffle service, this feature can only work when " +
"external shuffle service is at least 2.3.0.")
.version("3.0.0")
.bytesConf(ByteUnit.BYTE)
// fetch-to-mem is guaranteed to fail if the message is bigger than 2 GB, so we might
// as well use fetch-to-disk in that case. The message includes some metadata in addition
// to the block data itself (in particular UploadBlock has a lot of metadata), so we leave
// extra room.
.checkValue(
_ <= Int.MaxValue - 512,
"maxRemoteBlockSizeFetchToMem cannot be larger than (Int.MaxValue - 512) bytes.")
.createWithDefaultString("200m")
private[spark] val TASK_METRICS_TRACK_UPDATED_BLOCK_STATUSES =
ConfigBuilder("spark.taskMetrics.trackUpdatedBlockStatuses")
.doc("Enable tracking of updatedBlockStatuses in the TaskMetrics. Off by default since " +
"tracking the block statuses can use a lot of memory and its not used anywhere within " +
"spark.")
.version("2.3.0")
.booleanConf
.createWithDefault(false)
private[spark] val SHUFFLE_IO_PLUGIN_CLASS =
ConfigBuilder("spark.shuffle.sort.io.plugin.class")
.doc("Name of the class to use for shuffle IO.")
.version("3.0.0")
.stringConf
.createWithDefault(classOf[LocalDiskShuffleDataIO].getName)
private[spark] val SHUFFLE_FILE_BUFFER_SIZE =
ConfigBuilder("spark.shuffle.file.buffer")
.doc("Size of the in-memory buffer for each shuffle file output stream, in KiB unless " +
"otherwise specified. These buffers reduce the number of disk seeks and system calls " +
"made in creating intermediate shuffle files.")
.version("1.4.0")
.bytesConf(ByteUnit.KiB)
.checkValue(v => v > 0 && v <= ByteArrayMethods.MAX_ROUNDED_ARRAY_LENGTH / 1024,
s"The file buffer size must be positive and less than or equal to" +
s" ${ByteArrayMethods.MAX_ROUNDED_ARRAY_LENGTH / 1024}.")
.createWithDefaultString("32k")
private[spark] val SHUFFLE_UNSAFE_FILE_OUTPUT_BUFFER_SIZE =
ConfigBuilder("spark.shuffle.unsafe.file.output.buffer")
.doc("The file system for this buffer size after each partition " +
"is written in unsafe shuffle writer. In KiB unless otherwise specified.")
.version("2.3.0")
.bytesConf(ByteUnit.KiB)
.checkValue(v => v > 0 && v <= ByteArrayMethods.MAX_ROUNDED_ARRAY_LENGTH / 1024,
s"The buffer size must be positive and less than or equal to" +
s" ${ByteArrayMethods.MAX_ROUNDED_ARRAY_LENGTH / 1024}.")
.createWithDefaultString("32k")
private[spark] val SHUFFLE_DISK_WRITE_BUFFER_SIZE =
ConfigBuilder("spark.shuffle.spill.diskWriteBufferSize")
.doc("The buffer size, in bytes, to use when writing the sorted records to an on-disk file.")
.version("2.3.0")
.bytesConf(ByteUnit.BYTE)
.checkValue(v => v > 12 && v <= ByteArrayMethods.MAX_ROUNDED_ARRAY_LENGTH,
s"The buffer size must be greater than 12 and less than or equal to " +
s"${ByteArrayMethods.MAX_ROUNDED_ARRAY_LENGTH}.")
.createWithDefault(1024 * 1024)
private[spark] val UNROLL_MEMORY_CHECK_PERIOD =
ConfigBuilder("spark.storage.unrollMemoryCheckPeriod")
.internal()
.doc("The memory check period is used to determine how often we should check whether "
+ "there is a need to request more memory when we try to unroll the given block in memory.")
.version("2.3.0")
.longConf
.createWithDefault(16)
private[spark] val UNROLL_MEMORY_GROWTH_FACTOR =
ConfigBuilder("spark.storage.unrollMemoryGrowthFactor")
.internal()
.doc("Memory to request as a multiple of the size that used to unroll the block.")
.version("2.3.0")
.doubleConf
.createWithDefault(1.5)
private[spark] val FORCE_DOWNLOAD_SCHEMES =
ConfigBuilder("spark.yarn.dist.forceDownloadSchemes")
.doc("Comma-separated list of schemes for which resources will be downloaded to the " +
"local disk prior to being added to YARN's distributed cache. For use in cases " +
"where the YARN service does not support schemes that are supported by Spark, like http, " +
"https and ftp, or jars required to be in the local YARN client's classpath. Wildcard " +
"'*' is denoted to download resources for all the schemes.")
.version("2.3.0")
.stringConf
.toSequence
.createWithDefault(Nil)
private[spark] val EXTRA_LISTENERS = ConfigBuilder("spark.extraListeners")
.doc("Class names of listeners to add to SparkContext during initialization.")
.version("1.3.0")
.stringConf
.toSequence
.createOptional
private[spark] val SHUFFLE_SPILL_NUM_ELEMENTS_FORCE_SPILL_THRESHOLD =
ConfigBuilder("spark.shuffle.spill.numElementsForceSpillThreshold")
.internal()
.doc("The maximum number of elements in memory before forcing the shuffle sorter to spill. " +
"By default it's Integer.MAX_VALUE, which means we never force the sorter to spill, " +
"until we reach some limitations, like the max page size limitation for the pointer " +
"array in the sorter.")
.version("1.6.0")
.intConf
.createWithDefault(Integer.MAX_VALUE)
private[spark] val SHUFFLE_MAP_OUTPUT_PARALLEL_AGGREGATION_THRESHOLD =
ConfigBuilder("spark.shuffle.mapOutput.parallelAggregationThreshold")
.internal()
.doc("Multi-thread is used when the number of mappers * shuffle partitions is greater than " +
"or equal to this threshold. Note that the actual parallelism is calculated by number of " +
"mappers * shuffle partitions / this threshold + 1, so this threshold should be positive.")
.version("2.3.0")
.intConf
.checkValue(v => v > 0, "The threshold should be positive.")
.createWithDefault(10000000)
private[spark] val MAX_RESULT_SIZE = ConfigBuilder("spark.driver.maxResultSize")
.doc("Size limit for results.")
.version("1.2.0")
.bytesConf(ByteUnit.BYTE)
.createWithDefaultString("1g")
private[spark] val CREDENTIALS_RENEWAL_INTERVAL_RATIO =
ConfigBuilder("spark.security.credentials.renewalRatio")
.doc("Ratio of the credential's expiration time when Spark should fetch new credentials.")
.version("2.4.0")
.doubleConf
.createWithDefault(0.75d)
private[spark] val CREDENTIALS_RENEWAL_RETRY_WAIT =
ConfigBuilder("spark.security.credentials.retryWait")
.doc("How long to wait before retrying to fetch new credentials after a failure.")
.version("2.4.0")
.timeConf(TimeUnit.SECONDS)
.createWithDefaultString("1h")
private[spark] val SHUFFLE_SORT_INIT_BUFFER_SIZE =
ConfigBuilder("spark.shuffle.sort.initialBufferSize")
.internal()
.version("2.1.0")
.bytesConf(ByteUnit.BYTE)
.checkValue(v => v > 0 && v <= Int.MaxValue,
s"The buffer size must be greater than 0 and less than or equal to ${Int.MaxValue}.")
.createWithDefault(4096)
private[spark] val SHUFFLE_COMPRESS =
ConfigBuilder("spark.shuffle.compress")
.doc("Whether to compress shuffle output. Compression will use " +
"spark.io.compression.codec.")
.version("0.6.0")
.booleanConf
.createWithDefault(true)
private[spark] val SHUFFLE_SPILL_COMPRESS =
ConfigBuilder("spark.shuffle.spill.compress")
.doc("Whether to compress data spilled during shuffles. Compression will use " +
"spark.io.compression.codec.")
.version("0.9.0")
.booleanConf
.createWithDefault(true)
private[spark] val MAP_STATUS_COMPRESSION_CODEC =
ConfigBuilder("spark.shuffle.mapStatus.compression.codec")
.internal()
.doc("The codec used to compress MapStatus, which is generated by ShuffleMapTask. " +
"By default, Spark provides four codecs: lz4, lzf, snappy, and zstd. You can also " +
"use fully qualified class names to specify the codec.")
.version("3.0.0")
.stringConf
.createWithDefault("zstd")
private[spark] val SHUFFLE_SPILL_INITIAL_MEM_THRESHOLD =
ConfigBuilder("spark.shuffle.spill.initialMemoryThreshold")
.internal()
.doc("Initial threshold for the size of a collection before we start tracking its " +
"memory usage.")
.version("1.1.1")
.bytesConf(ByteUnit.BYTE)
.createWithDefault(5 * 1024 * 1024)
private[spark] val SHUFFLE_SPILL_BATCH_SIZE =
ConfigBuilder("spark.shuffle.spill.batchSize")
.internal()
.doc("Size of object batches when reading/writing from serializers.")
.version("0.9.0")
.longConf
.createWithDefault(10000)
private[spark] val SHUFFLE_SORT_BYPASS_MERGE_THRESHOLD =
ConfigBuilder("spark.shuffle.sort.bypassMergeThreshold")
.doc("In the sort-based shuffle manager, avoid merge-sorting data if there is no " +
"map-side aggregation and there are at most this many reduce partitions")
.version("1.1.1")
.intConf
.createWithDefault(200)
private[spark] val SHUFFLE_MANAGER =
ConfigBuilder("spark.shuffle.manager")
.version("1.1.0")
.stringConf
.createWithDefault("sort")
private[spark] val SHUFFLE_REDUCE_LOCALITY_ENABLE =
ConfigBuilder("spark.shuffle.reduceLocality.enabled")
.doc("Whether to compute locality preferences for reduce tasks")
.version("1.5.0")
.booleanConf
.createWithDefault(true)
private[spark] val SHUFFLE_MAPOUTPUT_MIN_SIZE_FOR_BROADCAST =
ConfigBuilder("spark.shuffle.mapOutput.minSizeForBroadcast")
.doc("The size at which we use Broadcast to send the map output statuses to the executors.")
.version("2.0.0")
.bytesConf(ByteUnit.BYTE)
.createWithDefaultString("512k")
private[spark] val SHUFFLE_MAPOUTPUT_DISPATCHER_NUM_THREADS =
ConfigBuilder("spark.shuffle.mapOutput.dispatcher.numThreads")
.version("2.0.0")
.intConf
.createWithDefault(8)
private[spark] val SHUFFLE_DETECT_CORRUPT =
ConfigBuilder("spark.shuffle.detectCorrupt")
.doc("Whether to detect any corruption in fetched blocks.")
.version("2.2.0")
.booleanConf
.createWithDefault(true)
private[spark] val SHUFFLE_DETECT_CORRUPT_MEMORY =
ConfigBuilder("spark.shuffle.detectCorrupt.useExtraMemory")
.doc("If enabled, part of a compressed/encrypted stream will be de-compressed/de-crypted " +
"by using extra memory to detect early corruption. Any IOException thrown will cause " +
"the task to be retried once and if it fails again with same exception, then " +
"FetchFailedException will be thrown to retry previous stage")
.version("3.0.0")
.booleanConf
.createWithDefault(false)
private[spark] val SHUFFLE_SYNC =
ConfigBuilder("spark.shuffle.sync")
.doc("Whether to force outstanding writes to disk.")
.version("0.8.0")
.booleanConf
.createWithDefault(false)
private[spark] val SHUFFLE_UNSAFE_FAST_MERGE_ENABLE =
ConfigBuilder("spark.shuffle.unsafe.fastMergeEnabled")
.doc("Whether to perform a fast spill merge.")
.version("1.4.0")
.booleanConf
.createWithDefault(true)
private[spark] val SHUFFLE_SORT_USE_RADIXSORT =
ConfigBuilder("spark.shuffle.sort.useRadixSort")
.doc("Whether to use radix sort for sorting in-memory partition ids. Radix sort is much " +
"faster, but requires additional memory to be reserved memory as pointers are added.")
.version("2.0.0")
.booleanConf
.createWithDefault(true)
private[spark] val SHUFFLE_MIN_NUM_PARTS_TO_HIGHLY_COMPRESS =
ConfigBuilder("spark.shuffle.minNumPartitionsToHighlyCompress")
.internal()
.doc("Number of partitions to determine if MapStatus should use HighlyCompressedMapStatus")
.version("2.4.0")
.intConf
.checkValue(v => v > 0, "The value should be a positive integer.")
.createWithDefault(2000)
private[spark] val SHUFFLE_USE_OLD_FETCH_PROTOCOL =
ConfigBuilder("spark.shuffle.useOldFetchProtocol")
.doc("Whether to use the old protocol while doing the shuffle block fetching. " +
"It is only enabled while we need the compatibility in the scenario of new Spark " +
"version job fetching shuffle blocks from old version external shuffle service.")
.version("3.0.0")
.booleanConf
.createWithDefault(false)
private[spark] val SHUFFLE_HOST_LOCAL_DISK_READING_ENABLED =
ConfigBuilder("spark.shuffle.readHostLocalDisk")
.doc(s"If enabled (and `${SHUFFLE_USE_OLD_FETCH_PROTOCOL.key}` is disabled, shuffle " +
"blocks requested from those block managers which are running on the same host are " +
"read from the disk directly instead of being fetched as remote blocks over the network.")
.version("3.0.0")
.booleanConf
.createWithDefault(true)
private[spark] val STORAGE_LOCAL_DISK_BY_EXECUTORS_CACHE_SIZE =
ConfigBuilder("spark.storage.localDiskByExecutors.cacheSize")
.doc("The max number of executors for which the local dirs are stored. This size is " +
"both applied for the driver and both for the executors side to avoid having an " +
"unbounded store. This cache will be used to avoid the network in case of fetching disk " +
s"persisted RDD blocks or shuffle blocks " +
s"(when `${SHUFFLE_HOST_LOCAL_DISK_READING_ENABLED.key}` is set) from the same host.")
.version("3.0.0")
.intConf
.createWithDefault(1000)
private[spark] val MEMORY_MAP_LIMIT_FOR_TESTS =
ConfigBuilder("spark.storage.memoryMapLimitForTests")
.internal()
.doc("For testing only, controls the size of chunks when memory mapping a file")
.version("2.3.0")
.bytesConf(ByteUnit.BYTE)
.createWithDefault(ByteArrayMethods.MAX_ROUNDED_ARRAY_LENGTH)
private[spark] val BARRIER_SYNC_TIMEOUT =
ConfigBuilder("spark.barrier.sync.timeout")
.doc("The timeout in seconds for each barrier() call from a barrier task. If the " +
"coordinator didn't receive all the sync messages from barrier tasks within the " +
"configured time, throw a SparkException to fail all the tasks. The default value is set " +
"to 31536000(3600 * 24 * 365) so the barrier() call shall wait for one year.")
.version("2.4.0")
.timeConf(TimeUnit.SECONDS)
.checkValue(v => v > 0, "The value should be a positive time value.")
.createWithDefaultString("365d")
private[spark] val UNSCHEDULABLE_TASKSET_TIMEOUT =
ConfigBuilder("spark.scheduler.excludeOnFailure.unschedulableTaskSetTimeout")
.doc("The timeout in seconds to wait to acquire a new executor and schedule a task " +
"before aborting a TaskSet which is unschedulable because all executors are " +
"excluded due to failures.")
.version("3.1.0")
.withAlternative("spark.scheduler.blacklist.unschedulableTaskSetTimeout")
.timeConf(TimeUnit.SECONDS)
.checkValue(v => v >= 0, "The value should be a non negative time value.")
.createWithDefault(120)
private[spark] val BARRIER_MAX_CONCURRENT_TASKS_CHECK_INTERVAL =
ConfigBuilder("spark.scheduler.barrier.maxConcurrentTasksCheck.interval")
.doc("Time in seconds to wait between a max concurrent tasks check failure and the next " +
"check. A max concurrent tasks check ensures the cluster can launch more concurrent " +
"tasks than required by a barrier stage on job submitted. The check can fail in case " +
"a cluster has just started and not enough executors have registered, so we wait for a " +
"little while and try to perform the check again. If the check fails more than a " +
"configured max failure times for a job then fail current job submission. Note this " +
"config only applies to jobs that contain one or more barrier stages, we won't perform " +
"the check on non-barrier jobs.")
.version("2.4.0")
.timeConf(TimeUnit.SECONDS)
.createWithDefaultString("15s")
private[spark] val BARRIER_MAX_CONCURRENT_TASKS_CHECK_MAX_FAILURES =
ConfigBuilder("spark.scheduler.barrier.maxConcurrentTasksCheck.maxFailures")
.doc("Number of max concurrent tasks check failures allowed before fail a job submission. " +
"A max concurrent tasks check ensures the cluster can launch more concurrent tasks than " +
"required by a barrier stage on job submitted. The check can fail in case a cluster " +
"has just started and not enough executors have registered, so we wait for a little " +
"while and try to perform the check again. If the check fails more than a configured " +
"max failure times for a job then fail current job submission. Note this config only " +
"applies to jobs that contain one or more barrier stages, we won't perform the check on " +
"non-barrier jobs.")
.version("2.4.0")
.intConf
.checkValue(v => v > 0, "The max failures should be a positive value.")
.createWithDefault(40)
private[spark] val UNSAFE_EXCEPTION_ON_MEMORY_LEAK =
ConfigBuilder("spark.unsafe.exceptionOnMemoryLeak")
.internal()
.version("1.4.0")
.booleanConf
.createWithDefault(false)
private[spark] val UNSAFE_SORTER_SPILL_READ_AHEAD_ENABLED =
ConfigBuilder("spark.unsafe.sorter.spill.read.ahead.enabled")
.internal()
.version("2.3.0")
.booleanConf
.createWithDefault(true)
private[spark] val UNSAFE_SORTER_SPILL_READER_BUFFER_SIZE =
ConfigBuilder("spark.unsafe.sorter.spill.reader.buffer.size")
.internal()
.version("2.1.0")
.bytesConf(ByteUnit.BYTE)
.checkValue(v => 1024 * 1024 <= v && v <= MAX_BUFFER_SIZE_BYTES,
s"The value must be in allowed range [1,048,576, ${MAX_BUFFER_SIZE_BYTES}].")
.createWithDefault(1024 * 1024)
private[spark] val DEFAULT_PLUGINS_LIST = "spark.plugins.defaultList"
private[spark] val PLUGINS =
ConfigBuilder("spark.plugins")
.withPrepended(DEFAULT_PLUGINS_LIST, separator = ",")
.doc("Comma-separated list of class names implementing " +
"org.apache.spark.api.plugin.SparkPlugin to load into the application.")
.version("3.0.0")
.stringConf
.toSequence
.createWithDefault(Nil)
private[spark] val CLEANER_PERIODIC_GC_INTERVAL =
ConfigBuilder("spark.cleaner.periodicGC.interval")
.version("1.6.0")
.timeConf(TimeUnit.SECONDS)
.createWithDefaultString("30min")
private[spark] val CLEANER_REFERENCE_TRACKING =
ConfigBuilder("spark.cleaner.referenceTracking")
.version("1.0.0")
.booleanConf
.createWithDefault(true)
private[spark] val CLEANER_REFERENCE_TRACKING_BLOCKING =
ConfigBuilder("spark.cleaner.referenceTracking.blocking")
.version("1.0.0")
.booleanConf
.createWithDefault(true)
private[spark] val CLEANER_REFERENCE_TRACKING_BLOCKING_SHUFFLE =
ConfigBuilder("spark.cleaner.referenceTracking.blocking.shuffle")
.version("1.1.1")
.booleanConf
.createWithDefault(false)
private[spark] val CLEANER_REFERENCE_TRACKING_CLEAN_CHECKPOINTS =
ConfigBuilder("spark.cleaner.referenceTracking.cleanCheckpoints")
.version("1.4.0")
.booleanConf
.createWithDefault(false)
private[spark] val EXECUTOR_LOGS_ROLLING_STRATEGY =
ConfigBuilder("spark.executor.logs.rolling.strategy")
.version("1.1.0")
.stringConf
.createWithDefault("")
private[spark] val EXECUTOR_LOGS_ROLLING_TIME_INTERVAL =
ConfigBuilder("spark.executor.logs.rolling.time.interval")
.version("1.1.0")
.stringConf
.createWithDefault("daily")
private[spark] val EXECUTOR_LOGS_ROLLING_MAX_SIZE =
ConfigBuilder("spark.executor.logs.rolling.maxSize")
.version("1.4.0")
.stringConf
.createWithDefault((1024 * 1024).toString)
private[spark] val EXECUTOR_LOGS_ROLLING_MAX_RETAINED_FILES =
ConfigBuilder("spark.executor.logs.rolling.maxRetainedFiles")
.version("1.1.0")
.intConf
.createWithDefault(-1)
private[spark] val EXECUTOR_LOGS_ROLLING_ENABLE_COMPRESSION =
ConfigBuilder("spark.executor.logs.rolling.enableCompression")
.version("2.0.2")
.booleanConf
.createWithDefault(false)
private[spark] val MASTER_REST_SERVER_ENABLED = ConfigBuilder("spark.master.rest.enabled")
.version("1.3.0")
.booleanConf
.createWithDefault(false)
private[spark] val MASTER_REST_SERVER_PORT = ConfigBuilder("spark.master.rest.port")
.version("1.3.0")
.intConf
.createWithDefault(6066)
private[spark] val MASTER_UI_PORT = ConfigBuilder("spark.master.ui.port")
.version("1.1.0")
.intConf
.createWithDefault(8080)
private[spark] val IO_COMPRESSION_SNAPPY_BLOCKSIZE =
ConfigBuilder("spark.io.compression.snappy.blockSize")
.doc("Block size in bytes used in Snappy compression, in the case when " +
"Snappy compression codec is used. Lowering this block size " +
"will also lower shuffle memory usage when Snappy is used")
.version("1.4.0")
.bytesConf(ByteUnit.BYTE)
.createWithDefaultString("32k")
private[spark] val IO_COMPRESSION_LZ4_BLOCKSIZE =
ConfigBuilder("spark.io.compression.lz4.blockSize")
.doc("Block size in bytes used in LZ4 compression, in the case when LZ4 compression" +
"codec is used. Lowering this block size will also lower shuffle memory " +
"usage when LZ4 is used.")
.version("1.4.0")
.bytesConf(ByteUnit.BYTE)
.createWithDefaultString("32k")
private[spark] val IO_COMPRESSION_CODEC =
ConfigBuilder("spark.io.compression.codec")
.doc("The codec used to compress internal data such as RDD partitions, event log, " +
"broadcast variables and shuffle outputs. By default, Spark provides four codecs: " +
"lz4, lzf, snappy, and zstd. You can also use fully qualified class names to specify " +
"the codec")
.version("0.8.0")
.stringConf
.createWithDefaultString("lz4")
private[spark] val IO_COMPRESSION_ZSTD_BUFFERSIZE =
ConfigBuilder("spark.io.compression.zstd.bufferSize")
.doc("Buffer size in bytes used in Zstd compression, in the case when Zstd " +
"compression codec is used. Lowering this size will lower the shuffle " +
"memory usage when Zstd is used, but it might increase the compression " +
"cost because of excessive JNI call overhead")
.version("2.3.0")
.bytesConf(ByteUnit.BYTE)
.createWithDefaultString("32k")
private[spark] val IO_COMPRESSION_ZSTD_BUFFERPOOL_ENABLED =
ConfigBuilder("spark.io.compression.zstd.bufferPool.enabled")
.doc("If true, enable buffer pool of ZSTD JNI library.")
.version("3.2.0")
.booleanConf
.createWithDefault(true)
private[spark] val IO_COMPRESSION_ZSTD_LEVEL =
ConfigBuilder("spark.io.compression.zstd.level")
.doc("Compression level for Zstd compression codec. Increasing the compression " +
"level will result in better compression at the expense of more CPU and memory")
.version("2.3.0")
.intConf
.createWithDefault(1)
private[spark] val IO_WARNING_LARGEFILETHRESHOLD =
ConfigBuilder("spark.io.warning.largeFileThreshold")
.internal()
.doc("If the size in bytes of a file loaded by Spark exceeds this threshold, " +
"a warning is logged with the possible reasons.")
.version("3.0.0")
.bytesConf(ByteUnit.BYTE)
.createWithDefault(1024 * 1024 * 1024)
private[spark] val EVENT_LOG_COMPRESSION_CODEC =
ConfigBuilder("spark.eventLog.compression.codec")
.doc("The codec used to compress event log. By default, Spark provides four codecs: " +
"lz4, lzf, snappy, and zstd. You can also use fully qualified class names to specify " +
"the codec.")
.version("3.0.0")
.stringConf
.createWithDefault("zstd")
private[spark] val BUFFER_SIZE =
ConfigBuilder("spark.buffer.size")
.version("0.5.0")
.intConf
.checkValue(_ >= 0, "The buffer size must not be negative")
.createWithDefault(65536)
private[spark] val LOCALITY_WAIT_PROCESS = ConfigBuilder("spark.locality.wait.process")
.version("0.8.0")
.fallbackConf(LOCALITY_WAIT)
private[spark] val LOCALITY_WAIT_NODE = ConfigBuilder("spark.locality.wait.node")
.version("0.8.0")
.fallbackConf(LOCALITY_WAIT)
private[spark] val LOCALITY_WAIT_RACK = ConfigBuilder("spark.locality.wait.rack")
.version("0.8.0")
.fallbackConf(LOCALITY_WAIT)
private[spark] val REDUCER_MAX_SIZE_IN_FLIGHT = ConfigBuilder("spark.reducer.maxSizeInFlight")
.doc("Maximum size of map outputs to fetch simultaneously from each reduce task, " +
"in MiB unless otherwise specified. Since each output requires us to create a " +
"buffer to receive it, this represents a fixed memory overhead per reduce task, " +
"so keep it small unless you have a large amount of memory")
.version("1.4.0")
.bytesConf(ByteUnit.MiB)
.createWithDefaultString("48m")
private[spark] val REDUCER_MAX_REQS_IN_FLIGHT = ConfigBuilder("spark.reducer.maxReqsInFlight")
.doc("This configuration limits the number of remote requests to fetch blocks at " +
"any given point. When the number of hosts in the cluster increase, " +
"it might lead to very large number of inbound connections to one or more nodes, " +
"causing the workers to fail under load. By allowing it to limit the number of " +
"fetch requests, this scenario can be mitigated")
.version("2.0.0")
.intConf
.createWithDefault(Int.MaxValue)
private[spark] val BROADCAST_COMPRESS = ConfigBuilder("spark.broadcast.compress")
.doc("Whether to compress broadcast variables before sending them. " +
"Generally a good idea. Compression will use spark.io.compression.codec")
.version("0.6.0")
.booleanConf.createWithDefault(true)
private[spark] val BROADCAST_BLOCKSIZE = ConfigBuilder("spark.broadcast.blockSize")
.doc("Size of each piece of a block for TorrentBroadcastFactory, in " +
"KiB unless otherwise specified. Too large a value decreases " +
"parallelism during broadcast (makes it slower); however, " +
"if it is too small, BlockManager might take a performance hit")
.version("0.5.0")
.bytesConf(ByteUnit.KiB)
.createWithDefaultString("4m")
private[spark] val BROADCAST_CHECKSUM = ConfigBuilder("spark.broadcast.checksum")
.doc("Whether to enable checksum for broadcast. If enabled, " +
"broadcasts will include a checksum, which can help detect " +
"corrupted blocks, at the cost of computing and sending a little " +
"more data. It's possible to disable it if the network has other " +
"mechanisms to guarantee data won't be corrupted during broadcast")
.version("2.1.1")
.booleanConf
.createWithDefault(true)
private[spark] val BROADCAST_FOR_UDF_COMPRESSION_THRESHOLD =
ConfigBuilder("spark.broadcast.UDFCompressionThreshold")
.doc("The threshold at which user-defined functions (UDFs) and Python RDD commands " +
"are compressed by broadcast in bytes unless otherwise specified")
.version("3.0.0")
.bytesConf(ByteUnit.BYTE)
.checkValue(v => v >= 0, "The threshold should be non-negative.")
.createWithDefault(1L * 1024 * 1024)
private[spark] val RDD_COMPRESS = ConfigBuilder("spark.rdd.compress")
.doc("Whether to compress serialized RDD partitions " +
"(e.g. for StorageLevel.MEMORY_ONLY_SER in Scala " +
"or StorageLevel.MEMORY_ONLY in Python). Can save substantial " +
"space at the cost of some extra CPU time. " +
"Compression will use spark.io.compression.codec")
.version("0.6.0")
.booleanConf
.createWithDefault(false)
private[spark] val RDD_PARALLEL_LISTING_THRESHOLD =
ConfigBuilder("spark.rdd.parallelListingThreshold")
.version("2.0.0")
.intConf
.createWithDefault(10)
private[spark] val RDD_LIMIT_SCALE_UP_FACTOR =
ConfigBuilder("spark.rdd.limit.scaleUpFactor")
.version("2.1.0")
.intConf
.createWithDefault(4)
private[spark] val SERIALIZER = ConfigBuilder("spark.serializer")
.version("0.5.0")
.stringConf
.createWithDefault("org.apache.spark.serializer.JavaSerializer")
private[spark] val SERIALIZER_OBJECT_STREAM_RESET =
ConfigBuilder("spark.serializer.objectStreamReset")
.version("1.0.0")
.intConf
.createWithDefault(100)
private[spark] val SERIALIZER_EXTRA_DEBUG_INFO = ConfigBuilder("spark.serializer.extraDebugInfo")
.version("1.3.0")
.booleanConf
.createWithDefault(true)
private[spark] val JARS = ConfigBuilder("spark.jars")
.version("0.9.0")
.stringConf
.toSequence
.createWithDefault(Nil)
private[spark] val FILES = ConfigBuilder("spark.files")
.version("1.0.0")
.stringConf
.toSequence
.createWithDefault(Nil)
private[spark] val ARCHIVES = ConfigBuilder("spark.archives")
.version("3.1.0")
.doc("Comma-separated list of archives to be extracted into the working directory of each " +
"executor. .jar, .tar.gz, .tgz and .zip are supported. You can specify the directory " +
"name to unpack via adding '#' after the file name to unpack, for example, " +
"'file.zip#directory'. This configuration is experimental.")
.stringConf
.toSequence
.createWithDefault(Nil)
private[spark] val SUBMIT_DEPLOY_MODE = ConfigBuilder("spark.submit.deployMode")
.version("1.5.0")
.stringConf
.createWithDefault("client")
private[spark] val SUBMIT_PYTHON_FILES = ConfigBuilder("spark.submit.pyFiles")
.version("1.0.1")
.stringConf
.toSequence
.createWithDefault(Nil)
private[spark] val SCHEDULER_ALLOCATION_FILE =
ConfigBuilder("spark.scheduler.allocation.file")
.version("0.8.1")
.stringConf
.createOptional
private[spark] val SCHEDULER_MIN_REGISTERED_RESOURCES_RATIO =
ConfigBuilder("spark.scheduler.minRegisteredResourcesRatio")
.version("1.1.1")
.doubleConf
.createOptional
private[spark] val SCHEDULER_MAX_REGISTERED_RESOURCE_WAITING_TIME =
ConfigBuilder("spark.scheduler.maxRegisteredResourcesWaitingTime")
.version("1.1.1")
.timeConf(TimeUnit.MILLISECONDS)
.createWithDefaultString("30s")
private[spark] val SCHEDULER_MODE =
ConfigBuilder("spark.scheduler.mode")
.version("0.8.0")
.stringConf
.transform(_.toUpperCase(Locale.ROOT))
.createWithDefault(SchedulingMode.FIFO.toString)
private[spark] val SCHEDULER_REVIVE_INTERVAL =
ConfigBuilder("spark.scheduler.revive.interval")
.version("0.8.1")
.timeConf(TimeUnit.MILLISECONDS)
.createOptional
private[spark] val SPECULATION_ENABLED =
ConfigBuilder("spark.speculation")
.version("0.6.0")
.booleanConf
.createWithDefault(false)
private[spark] val SPECULATION_INTERVAL =
ConfigBuilder("spark.speculation.interval")
.version("0.6.0")
.timeConf(TimeUnit.MILLISECONDS)
.createWithDefault(100)
private[spark] val SPECULATION_MULTIPLIER =
ConfigBuilder("spark.speculation.multiplier")
.version("0.6.0")
.doubleConf
.createWithDefault(1.5)
private[spark] val SPECULATION_QUANTILE =
ConfigBuilder("spark.speculation.quantile")
.version("0.6.0")
.doubleConf
.createWithDefault(0.75)
private[spark] val SPECULATION_MIN_THRESHOLD =
ConfigBuilder("spark.speculation.min.threshold")
.doc("Minimum amount of time a task runs before being considered for speculation. " +
"This can be used to avoid launching speculative copies of tasks that are very short.")
.version("3.2.0")
.timeConf(TimeUnit.MILLISECONDS)
.createWithDefault(100)
private[spark] val SPECULATION_TASK_DURATION_THRESHOLD =
ConfigBuilder("spark.speculation.task.duration.threshold")
.doc("Task duration after which scheduler would try to speculative run the task. If " +
"provided, tasks would be speculatively run if current stage contains less tasks " +
"than or equal to the number of slots on a single executor and the task is taking " +
"longer time than the threshold. This config helps speculate stage with very few " +
"tasks. Regular speculation configs may also apply if the executor slots are " +
"large enough. E.g. tasks might be re-launched if there are enough successful runs " +
"even though the threshold hasn't been reached. The number of slots is computed based " +
"on the conf values of spark.executor.cores and spark.task.cpus minimum 1.")
.version("3.0.0")
.timeConf(TimeUnit.MILLISECONDS)
.createOptional
private[spark] val DECOMMISSION_ENABLED =
ConfigBuilder("spark.decommission.enabled")
.doc("When decommission enabled, Spark will try its best to shutdown the executor " +
s"gracefully. Spark will try to migrate all the RDD blocks (controlled by " +
s"${STORAGE_DECOMMISSION_RDD_BLOCKS_ENABLED.key}) and shuffle blocks (controlled by " +
s"${STORAGE_DECOMMISSION_SHUFFLE_BLOCKS_ENABLED.key}) from the decommissioning " +
s"executor to a remote executor when ${STORAGE_DECOMMISSION_ENABLED.key} is enabled. " +
s"With decommission enabled, Spark will also decommission an executor instead of " +
s"killing when ${DYN_ALLOCATION_ENABLED.key} enabled.")
.version("3.1.0")
.booleanConf
.createWithDefault(false)
private[spark] val EXECUTOR_DECOMMISSION_KILL_INTERVAL =
ConfigBuilder("spark.executor.decommission.killInterval")
.doc("Duration after which a decommissioned executor will be killed forcefully " +
"*by an outside* (e.g. non-spark) service. " +
"This config is useful for cloud environments where we know in advance when " +
"an executor is going to go down after decommissioning signal i.e. around 2 mins " +
"in aws spot nodes, 1/2 hrs in spot block nodes etc. This config is currently " +
"used to decide what tasks running on decommission executors to speculate.")
.version("3.1.0")
.timeConf(TimeUnit.SECONDS)
.createOptional
private[spark] val EXECUTOR_DECOMMISSION_FORCE_KILL_TIMEOUT =
ConfigBuilder("spark.executor.decommission.forceKillTimeout")
.doc("Duration after which a Spark will force a decommissioning executor to exit." +
" this should be set to a high value in most situations as low values will prevent " +
" block migrations from having enough time to complete.")
.version("3.2.0")
.timeConf(TimeUnit.SECONDS)
.createOptional
private[spark] val EXECUTOR_DECOMMISSION_SIGNAL =
ConfigBuilder("spark.executor.decommission.signal")
.doc("The signal that used to trigger the executor to start decommission.")
.version("3.2.0")
.stringConf
.createWithDefaultString("PWR")
private[spark] val STAGING_DIR = ConfigBuilder("spark.yarn.stagingDir")
.doc("Staging directory used while submitting applications.")
.version("2.0.0")
.stringConf
.createOptional
private[spark] val BUFFER_PAGESIZE = ConfigBuilder("spark.buffer.pageSize")
.doc("The amount of memory used per page in bytes")
.version("1.5.0")
.bytesConf(ByteUnit.BYTE)
.createOptional
private[spark] val RESOURCE_PROFILE_MERGE_CONFLICTS =
ConfigBuilder("spark.scheduler.resource.profileMergeConflicts")
.doc("If set to true, Spark will merge ResourceProfiles when different profiles " +
"are specified in RDDs that get combined into a single stage. When they are merged, " +
"Spark chooses the maximum of each resource and creates a new ResourceProfile. The " +
"default of false results in Spark throwing an exception if multiple different " +
"ResourceProfiles are found in RDDs going into the same stage.")
.version("3.1.0")
.booleanConf
.createWithDefault(false)
private[spark] val STANDALONE_SUBMIT_WAIT_APP_COMPLETION =
ConfigBuilder("spark.standalone.submit.waitAppCompletion")
.doc("In standalone cluster mode, controls whether the client waits to exit until the " +
"application completes. If set to true, the client process will stay alive polling " +
"the driver's status. Otherwise, the client process will exit after submission.")
.version("3.1.0")
.booleanConf
.createWithDefault(false)
private[spark] val EXECUTOR_ALLOW_SPARK_CONTEXT =
ConfigBuilder("spark.executor.allowSparkContext")
.doc("If set to true, SparkContext can be created in executors.")
.version("3.0.1")
.booleanConf
.createWithDefault(false)
private[spark] val EXECUTOR_KILL_ON_FATAL_ERROR_DEPTH =
ConfigBuilder("spark.executor.killOnFatalError.depth")
.doc("The max depth of the exception chain in a failed task Spark will search for a fatal " +
"error to check whether it should kill an executor. 0 means not checking any fatal " +
"error, 1 means checking only the exception but not the cause, and so on.")
.internal()
.version("3.1.0")
.intConf
.checkValue(_ >= 0, "needs to be a non-negative value")
.createWithDefault(5)
private[spark] val PUSH_BASED_SHUFFLE_ENABLED =
ConfigBuilder("spark.shuffle.push.enabled")
.doc("Set to 'true' to enable push-based shuffle on the client side and this works in " +
"conjunction with the server side flag spark.shuffle.server.mergedShuffleFileManagerImpl " +
"which needs to be set with the appropriate " +
"org.apache.spark.network.shuffle.MergedShuffleFileManager implementation for push-based " +
"shuffle to be enabled")
.version("3.1.0")
.booleanConf
.createWithDefault(false)
private[spark] val PUSH_BASED_SHUFFLE_MERGE_RESULTS_TIMEOUT =
ConfigBuilder("spark.shuffle.push.merge.results.timeout")
.doc("Specify the max amount of time DAGScheduler waits for the merge results from " +
"all remote shuffle services for a given shuffle. DAGScheduler will start to submit " +
"following stages if not all results are received within the timeout.")
.version("3.2.0")
.timeConf(TimeUnit.SECONDS)
.checkValue(_ >= 0L, "Timeout must be >= 0.")
.createWithDefaultString("10s")
private[spark] val PUSH_BASED_SHUFFLE_MERGE_FINALIZE_TIMEOUT =
ConfigBuilder("spark.shuffle.push.merge.finalize.timeout")
.doc("Specify the amount of time DAGScheduler waits after all mappers finish for " +
"a given shuffle map stage before it starts sending merge finalize requests to " +
"remote shuffle services. This allows the shuffle services some extra time to " +
"merge as many blocks as possible.")
.version("3.2.0")
.timeConf(TimeUnit.SECONDS)
.checkValue(_ >= 0L, "Timeout must be >= 0.")
.createWithDefaultString("10s")
private[spark] val SHUFFLE_MERGER_MAX_RETAINED_LOCATIONS =
ConfigBuilder("spark.shuffle.push.maxRetainedMergerLocations")
.doc("Maximum number of shuffle push merger locations cached for push based shuffle. " +
"Currently, shuffle push merger locations are nothing but external shuffle services " +
"which are responsible for handling pushed blocks and merging them and serving " +
"merged blocks for later shuffle fetch.")
.version("3.1.0")
.intConf
.createWithDefault(500)
private[spark] val SHUFFLE_MERGER_LOCATIONS_MIN_THRESHOLD_RATIO =
ConfigBuilder("spark.shuffle.push.mergersMinThresholdRatio")
.doc("The minimum number of shuffle merger locations required to enable push based " +
"shuffle for a stage. This is specified as a ratio of the number of partitions in " +
"the child stage. For example, a reduce stage which has 100 partitions and uses the " +
"default value 0.05 requires at least 5 unique merger locations to enable push based " +
"shuffle. Merger locations are currently defined as external shuffle services.")
.version("3.1.0")
.doubleConf
.createWithDefault(0.05)
private[spark] val SHUFFLE_MERGER_LOCATIONS_MIN_STATIC_THRESHOLD =
ConfigBuilder("spark.shuffle.push.mergersMinStaticThreshold")
.doc(s"The static threshold for number of shuffle push merger locations should be " +
"available in order to enable push based shuffle for a stage. Note this config " +
s"works in conjunction with ${SHUFFLE_MERGER_LOCATIONS_MIN_THRESHOLD_RATIO.key}. " +
"Maximum of spark.shuffle.push.mergersMinStaticThreshold and " +
s"${SHUFFLE_MERGER_LOCATIONS_MIN_THRESHOLD_RATIO.key} ratio number of mergers needed to " +
"enable push based shuffle for a stage. For eg: with 1000 partitions for the child " +
"stage with spark.shuffle.push.mergersMinStaticThreshold as 5 and " +
s"${SHUFFLE_MERGER_LOCATIONS_MIN_THRESHOLD_RATIO.key} set to 0.05, we would need " +
"at least 50 mergers to enable push based shuffle for that stage.")
.version("3.1.0")
.intConf
.createWithDefault(5)
private[spark] val SHUFFLE_NUM_PUSH_THREADS =
ConfigBuilder("spark.shuffle.push.numPushThreads")
.doc("Specify the number of threads in the block pusher pool. These threads assist " +
"in creating connections and pushing blocks to remote shuffle services. By default, the " +
"threadpool size is equal to the number of spark executor cores.")
.version("3.2.0")
.intConf
.createOptional
private[spark] val SHUFFLE_MAX_BLOCK_SIZE_TO_PUSH =
ConfigBuilder("spark.shuffle.push.maxBlockSizeToPush")
.doc("The max size of an individual block to push to the remote shuffle services. Blocks " +
"larger than this threshold are not pushed to be merged remotely. These shuffle blocks " +
"will be fetched by the executors in the original manner.")
.version("3.2.0")
.bytesConf(ByteUnit.BYTE)
.createWithDefaultString("1m")
private[spark] val SHUFFLE_MAX_BLOCK_BATCH_SIZE_FOR_PUSH =
ConfigBuilder("spark.shuffle.push.maxBlockBatchSize")
.doc("The max size of a batch of shuffle blocks to be grouped into a single push request.")
.version("3.2.0")
.bytesConf(ByteUnit.BYTE)
// Default is 3m because it is greater than 2m which is the default value for
// TransportConf#memoryMapBytes. If this defaults to 2m as well it is very likely that each
// batch of block will be loaded in memory with memory mapping, which has higher overhead
// with small MB sized chunk of data.
.createWithDefaultString("3m")
private[spark] val JAR_IVY_REPO_PATH =
ConfigBuilder("spark.jars.ivy")
.doc("Path to specify the Ivy user directory, used for the local Ivy cache and " +
"package files from spark.jars.packages. " +
"This will override the Ivy property ivy.default.ivy.user.dir " +
"which defaults to ~/.ivy2.")
.version("1.3.0")
.stringConf
.createOptional
private[spark] val JAR_IVY_SETTING_PATH =
ConfigBuilder("spark.jars.ivySettings")
.doc("Path to an Ivy settings file to customize resolution of jars specified " +
"using spark.jars.packages instead of the built-in defaults, such as maven central. " +
"Additional repositories given by the command-line option --repositories " +
"or spark.jars.repositories will also be included. " +
"Useful for allowing Spark to resolve artifacts from behind a firewall " +
"e.g. via an in-house artifact server like Artifactory. " +
"Details on the settings file format can be found at Settings Files")
.version("2.2.0")
.stringConf
.createOptional
private[spark] val JAR_PACKAGES =
ConfigBuilder("spark.jars.packages")
.doc("Comma-separated list of Maven coordinates of jars to include " +
"on the driver and executor classpaths. The coordinates should be " +
"groupId:artifactId:version. If spark.jars.ivySettings is given artifacts " +
"will be resolved according to the configuration in the file, otherwise artifacts " +
"will be searched for in the local maven repo, then maven central and finally " +
"any additional remote repositories given by the command-line option --repositories. " +
"For more details, see Advanced Dependency Management.")
.version("1.5.0")
.stringConf
.toSequence
.createWithDefault(Nil)
private[spark] val JAR_PACKAGES_EXCLUSIONS =
ConfigBuilder("spark.jars.excludes")
.doc("Comma-separated list of groupId:artifactId, " +
"to exclude while resolving the dependencies provided in spark.jars.packages " +
"to avoid dependency conflicts.")
.version("1.5.0")
.stringConf
.toSequence
.createWithDefault(Nil)
private[spark] val JAR_REPOSITORIES =
ConfigBuilder("spark.jars.repositories")
.doc("Comma-separated list of additional remote repositories to search " +
"for the maven coordinates given with --packages or spark.jars.packages.")
.version("2.3.0")
.stringConf
.toSequence
.createWithDefault(Nil)
}
| maropu/spark | core/src/main/scala/org/apache/spark/internal/config/package.scala | Scala | apache-2.0 | 96,384 |
package io.makana.hexwar.render
import io.makana.hexwar.engine.domain.game.GameState
import io.makana.hexwar.engine.domain.model._
import io.makana.hexwar.render.Ansi._
object Render {
def drawHeader(gameState: GameState) : Unit = {
println(s"Turn: ${gameState.gameTurn}")
println("Players: ")
gameState.players.map(p => println(s"${p.number}: ${p.name} (${p.nationality})"))
}
def drawMap(mapBoard: MapBoard): Unit = {
val drawBuffer: Array[String] = mapBoard.hexes.map {
case _: Plains => BgGreen(" ")
case _: Road => "|"
case _: Hill => Yellow("^")
case _: Swamp => Cyan("~")
case _: Beach => (BgWhite + Black)(".")
case _: Ocean => (Black + BgBlue)("~")
case _: River => (White + BgBlue)("~")
case _: Stream => Blue("~")
case _: Hilltop => (Black + BgYellow)("_")
case _: Woods => (BgGreen + Black)("T")
case _: Town => (White + BgBlack)(".")
case _: TemporaryBridge => Yellow("-")
case _: PermanentBridge => Black("-")
case _ => "?"
}
for (entity <- mapBoard.entities.values) {
val p = mapBoard.size.y * entity.position.x + entity.position.y
drawBuffer(p) = entity.glyph()
}
for (line <- drawBuffer.grouped(mapBoard.size.x)) {
println(line.mkString(" "))
}
}
def draw(gameState: GameState): Unit = {
drawHeader(gameState)
drawMap(gameState.mapBoard)
}
} | brmakana/hexwar | src/main/scala/io/makana/hexwar/render/Render.scala | Scala | apache-2.0 | 1,417 |
package pl.japila.spark.mf
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.FileStatus
import org.apache.hadoop.mapreduce.Job
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.execution.datasources.{FileFormat, OutputWriterFactory, PartitionedFile}
import org.apache.spark.sql.sources.Filter
import org.apache.spark.sql.types.{StringType, StructField, StructType}
import org.apache.spark.unsafe.types.UTF8String
class DefaultSource extends FileFormat {
override def inferSchema(sparkSession: SparkSession, options: Map[String, String], files: Seq[FileStatus]): Option[StructType] = {
println(s">>> inferSchema($files)")
Some(StructType(
StructField("line", StringType, nullable = true) :: Nil
))
}
override def prepareWrite(sparkSession: SparkSession, job: Job, options: Map[String, String], dataSchema: StructType): OutputWriterFactory = {
println(">>> prepareWrite")
null
}
override def buildReader(sparkSession: SparkSession, dataSchema: StructType, partitionSchema: StructType, requiredSchema: StructType, filters: Seq[Filter], options: Map[String, String], hadoopConf: Configuration): (PartitionedFile) => Iterator[InternalRow] = {
pf => Iterator(InternalRow(UTF8String.fromString("hello")))
}
}
| jaceklaskowski/spark-workshop | solutions/spark-mf-format/src/main/scala/pl/japila/spark/mf/DefaultSource.scala | Scala | apache-2.0 | 1,338 |
class A()
class B()
object B
| dotty-staging/dotty | tests/pos/creators/A_1.scala | Scala | apache-2.0 | 31 |
package com.dividezero.stubby.core.service
import com.dividezero.stubby.core.service.model._
import com.dividezero.stubby.core.model._
import scala.collection.mutable.ListBuffer
import com.dividezero.stubby.core.js.ScriptWorld
import com.dividezero.stubby.core.js.Script
import com.typesafe.scalalogging.log4j.Logging
import com.dividezero.stubby.core.util.JsonUtils
import com.dividezero.stubby.core.util.TimeLimit
case class NotFoundException(message: String) extends RuntimeException(message)
class StubService extends Logging {
val LOGGER = logger // make logging stand out...
val requests: ListBuffer[StubRequest] = new ListBuffer
val responses: ListBuffer[StubServiceExchange] = new ListBuffer
def addResponse(exchange: StubExchange): Unit = this.synchronized {
LOGGER.trace("Adding stubbed exchange: " + JsonUtils.prettyPrint(exchange))
val internal = new StubServiceExchange(exchange)
responses -= internal // remove existing stubed request (ie, will never match anymore)
internal +=: responses // ensure most recent matched first
}
def findMatch(request: StubRequest): StubServiceResult = this.synchronized {
try {
LOGGER.trace("Got request: " + JsonUtils.prettyPrint(request))
request +=: requests // prepend
val attempts = new ListBuffer[MatchResult]
for (response <- responses) {
val matchResult = response.matches(request)
attempts += matchResult
if (matchResult.matches) {
LOGGER.info("Matched: " + request.path.get)
val exchange = response.exchange
return exchange.script match {
case Some(script) => {
val world = new ScriptWorld(request, exchange.response, exchange.delay) // creates deep copies of objects
new Script(script).execute(world)
val (scriptResponse, scriptDelay) = world.result
new StubServiceResult(
attempts.toList, Some(scriptResponse), scriptDelay)
}
case None => new StubServiceResult(
attempts.toList, Some(exchange.response), exchange.delay)
}
}
}
LOGGER.info("Didn't match: " + request.path.get)
this.notifyAll // inform any waiting threads that a new request has come in
new StubServiceResult(Nil) // no match (empty list)
} catch {
case e: Exception =>
throw new RuntimeException("Error matching request", e)
}
}
@throws[NotFoundException]("if index does not exist")
def getResponse(index: Int): StubServiceExchange = this.synchronized {
try {
return responses(index)
} catch {
case e: IndexOutOfBoundsException =>
throw new NotFoundException("Response does not exist: " + index)
}
}
def deleteResponse(index: Int) = this.synchronized {
LOGGER.trace("Deleting response: " + index)
try {
responses.remove(index)
} catch {
case e: IndexOutOfBoundsException =>
throw new RuntimeException("Response does not exist: " + index)
}
}
def deleteResponse(exchange: StubExchange) = this.synchronized {
val toDelete = responses.filter { it =>
//println("it"+it+"exchange.req"+exchange.request+" match: "+it.matches(exchange.request).matches)
it.matches(exchange.request).matches
}
toDelete.foreach { it: StubServiceExchange =>
val index = responses.indexOf(it)
//println("index:"+index)
responses.remove(index)
//println("responses="+responses)
}
}
def deleteResponses() = this.synchronized {
LOGGER.trace("Deleting all responses")
responses.clear
}
@throws[NotFoundException]("if index does not exist")
def getRequest(index: Int): StubRequest = this.synchronized {
try {
requests(index)
} catch {
case e: IndexOutOfBoundsException =>
throw new NotFoundException("Response does not exist: " + index)
}
}
def findRequests(filter: StubRequest, timeout: Long): Traversable[StubRequest] = this.synchronized { // blocking call
TimeLimit.retry(timeout) { remaining =>
val result = findRequests(filter)
if (result.isEmpty) {
try {
this.wait(remaining) // wait for a request to come in, or time to expire
} catch {
case e: InterruptedException =>
throw new RuntimeException("Interrupted while waiting for request")
}
None // retry
} else {
Some(result) // found
}
}.getOrElse(Nil)
}
def findRequests(filter: StubRequest): Traversable[StubRequest] = this.synchronized {
val pattern = new RequestPattern(filter)
requests.filter(r => pattern.matches(r).matches)
}
@throws[NotFoundException]("if index does not exist")
def deleteRequest(index: Int) = this.synchronized {
LOGGER.trace("Deleting request: " + index)
try {
requests.remove(index)
} catch {
case e: IndexOutOfBoundsException =>
throw new NotFoundException("Request does not exist: " + index)
}
}
def deleteRequests() = this.synchronized {
LOGGER.trace("Deleting all requests")
requests.clear
}
}
| headexplodes/http-stub-server-scala | core/src/main/scala/com/dividezero/stubby/core/service/StubService.scala | Scala | apache-2.0 | 5,156 |
package me.reminisce.stats.model
object ComputationMessages {
case class ComputeStatistics(userId: String)
}
| reminisceme/stats | src/main/scala/me/reminisce/stats/model/ComputationMessages.scala | Scala | apache-2.0 | 114 |
/**
* Copyright (c) 2013, The National Archives <digitalpreservation@nationalarchives.gov.uk>
* https://www.nationalarchives.gov.uk
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package uk.gov.nationalarchives.csv.validator.schema.v1_0
import org.junit.runner.RunWith
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
import uk.gov.nationalarchives.csv.validator.metadata.{Cell, Row}
import uk.gov.nationalarchives.csv.validator.schema._
import scalaz.{Success, Failure, IList}
@RunWith(classOf[JUnitRunner])
class RangeRuleSpec extends Specification {
"RangeRule" should {
val globalDirectives = List(TotalColumns(1))
val schema = Schema(globalDirectives, List(ColumnDefinition(NamedColumnIdentifier("Country"))))
"fail when non numeric number passed" in {
val rangeRule = new RangeRule(1,2)
rangeRule.evaluate(0, Row(List(Cell("Germany")), 1), schema) must beLike {
case Failure(messages) => messages.list mustEqual IList("""range(1,2) fails for line: 1, column: Country, value: "Germany"""")
}
}
"pass when we test integer boundaries" in {
val rangeRule = new RangeRule(Int.MinValue,(Int.MaxValue))
rangeRule.evaluate(0, Row(List(Cell((Int.MaxValue).toString)), 1), schema) mustEqual Success(true)
}
"fail when we test small decimal outside range" in {
val rangeRule = new RangeRule(0.01,0.1)
rangeRule.evaluate(0, Row(List(Cell(("0.00999999999999999999999999999999"))), 1), schema) must beLike {
case Failure(messages) => messages.list mustEqual IList("""range(0.01,0.1) fails for line: 1, column: Country, value: "0.00999999999999999999999999999999"""")
}
}
}
}
| adamretter/csv-validator | csv-validator-core/src/test/scala/uk/gov/nationalarchives/csv/validator/schema/v1_0/RangeRuleSpec.scala | Scala | mpl-2.0 | 1,871 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.apollo.broker.store
import java.nio.channels.{WritableByteChannel, ReadableByteChannel}
import java.nio.ByteBuffer
import java.io._
import org.fusesource.hawtdispatch.Retained
/**
* <p>Allocates ZeroCopyBuffer objects</p>
*
* @author <a href="http://hiramchirino.com">Hiram Chirino</a>
*/
trait DirectBufferAllocator {
def alloc(size:Int):DirectBuffer
def close
}
/**
* <p>
* A ZeroCopyBuffer is a reference counted buffer on
* temp storage.
*
* ON the
* </p>
*
* @author <a href="http://hiramchirino.com">Hiram Chirino</a>
*/
trait DirectBuffer extends Retained {
def size:Int
def remaining(from_position: Int): Int
def read(target: OutputStream):Unit
def read(src: Int, target: WritableByteChannel): Int
def copy(src:DirectBuffer): Unit
def write(src:ReadableByteChannel, target:Int): Int
def write(src:ByteBuffer, target:Int):Int
def write(target:InputStream):Unit
}
| chirino/activemq-apollo | apollo-broker/src/main/scala/org/apache/activemq/apollo/broker/store/DirectBufferAllocator.scala | Scala | apache-2.0 | 1,746 |
package com.arcusys.learn.liferay.services
import com.arcusys.learn.liferay.LiferayClasses.LLayoutSetPrototype
import com.liferay.portal.kernel.dao.orm.QueryUtil
import com.liferay.portal.kernel.service.LayoutSetPrototypeLocalServiceUtil
import scala.collection.JavaConverters._
/**
* Created by amikhailov on 23.11.16.
*/
object LayoutSetPrototypeServiceHelper {
def search(companyId: Long, active: Boolean): Seq[LLayoutSetPrototype] =
LayoutSetPrototypeLocalServiceUtil.search(companyId, active, QueryUtil.ALL_POS, QueryUtil.ALL_POS, null).asScala
def getLayoutSetPrototype(layoutSetPrototypeId: Long): LLayoutSetPrototype =
LayoutSetPrototypeLocalServiceUtil.getLayoutSetPrototype(layoutSetPrototypeId)
def fetchLayoutSetPrototype(layoutSetPrototypeId: Long): Option[LLayoutSetPrototype] =
Option(LayoutSetPrototypeLocalServiceUtil.fetchLayoutSetPrototype(layoutSetPrototypeId))
} | arcusys/Valamis | learn-liferay700-services/src/main/scala/com/arcusys/learn/liferay/services/LayoutSetPrototypeServiceHelper.scala | Scala | gpl-3.0 | 910 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.carbondata.spark.testsuite.createTable
import org.apache.spark.sql.test.util.QueryTest
import org.scalatest.BeforeAndAfterAll
import org.apache.carbondata.core.constants.CarbonCommonConstants
import org.apache.carbondata.core.util.CarbonProperties
/**
* test functionality for alter table with datamap
*/
class TestRenameTableWithDataMap extends QueryTest with BeforeAndAfterAll {
val smallFile = s"$resourcesPath/sample.csv"
override def beforeAll {
sql("DROP TABLE IF EXISTS carbon_table")
sql("DROP TABLE IF EXISTS carbon_tb")
sql("DROP TABLE IF EXISTS fact_table1")
CarbonProperties.getInstance()
.addProperty(CarbonCommonConstants.ENABLE_QUERY_STATISTICS, "true")
}
test("Creating a bloomfilter datamap,then table rename") {
sql(
s"""
| CREATE TABLE carbon_table(
| id INT, name String, city String, age INT
| )
| STORED AS carbondata
""".stripMargin)
sql(
s"""
| CREATE DATAMAP dm_carbon_table_name ON TABLE carbon_table
| USING 'bloomfilter'
| DMProperties('INDEX_COLUMNS'='name,city', 'BLOOM_SIZE'='640000')
""".stripMargin)
(1 to 2).foreach { i =>
sql(
s"""
| insert into carbon_table select 5,'bb','beijing',21
| """.stripMargin)
sql(
s"""
| insert into carbon_table select 6,'cc','shanghai','29'
| """.stripMargin)
sql(
s"""
| LOAD DATA LOCAL INPATH '$smallFile' INTO TABLE carbon_table
| OPTIONS('header'='false')
""".stripMargin)
}
sql(
s"""
| show datamap on table carbon_table
""".stripMargin).show(false)
sql(
s"""
| select * from carbon_table where name='eason'
""".stripMargin).show(false)
sql(
s"""
| explain select * from carbon_table where name='eason'
""".stripMargin).show(false)
sql(
s"""
| alter TABLE carbon_table rename to carbon_tb
""".stripMargin)
sql(
s"""
| show datamap on table carbon_tb
""".stripMargin).show(false)
sql(
s"""
| select * from carbon_tb where name='eason'
""".stripMargin).show(false)
sql(
s"""
| explain select * from carbon_tb where name='eason'
""".stripMargin).show(false)
}
/*
* mv datamap does not support running here, now must run in mv project.
test("Creating a mv datamap,then table rename") {
sql(
"""
| CREATE TABLE fact_table2 (empname String, designation String, doj Timestamp,
| workgroupcategory int, workgroupcategoryname String, deptno int, deptname String,
| projectcode int, projectjoindate Timestamp, projectenddate Timestamp,attendance int,
| utilization int,salary int)
| STORED AS carbondata
""".stripMargin)
sql(s"""LOAD DATA local inpath '$resourcesPath/data_big.csv' INTO TABLE fact_table2 OPTIONS('DELIMITER'= ',', 'QUOTECHAR'= '"')""")
sql(s"""LOAD DATA local inpath '$resourcesPath/data_big.csv' INTO TABLE fact_table2 OPTIONS('DELIMITER'= ',', 'QUOTECHAR'= '"')""")
sql("drop datamap if exists datamap1")
sql("create datamap datamap1 using 'mv' as select empname, designation from fact_table2")
sql(s"rebuild datamap datamap1")
sql(
s"""
| show datamap on table fact_table2
""".stripMargin).show(false)
val exception_tb_rename: Exception = intercept[Exception] {
sql(
s"""
| alter TABLE fact_table2 rename to fact_tb2
""".stripMargin)
}
assert(exception_tb_rename.getMessage
.contains("alter rename is not supported for mv datamap"))
} */
override def afterAll: Unit = {
sql("DROP TABLE IF EXISTS carbon_table")
sql("DROP TABLE IF EXISTS carbon_tb")
sql("DROP TABLE IF EXISTS fact_table1")
CarbonProperties.getInstance()
.addProperty(CarbonCommonConstants.ENABLE_QUERY_STATISTICS,
CarbonCommonConstants.ENABLE_QUERY_STATISTICS_DEFAULT)
}
}
| jackylk/incubator-carbondata | integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestRenameTableWithDataMap.scala | Scala | apache-2.0 | 4,898 |
package japgolly.scalajs.react.extra
import org.scalajs.dom.raw.EventTarget
import org.scalajs.dom.Event
import scala.scalajs.js
import scalaz.effect.IO
import japgolly.scalajs.react._
object EventListener {
def apply[E <: Event] = new OfEventType[E](true)
def defaultTarget[P, S, B, N <: TopNode]: ComponentScopeM[P,S,B,N] => EventTarget =
_.getDOMNode()
final class OfEventType[E <: Event](val _unused: Boolean) extends AnyVal {
/**
* Install an event listener when a component is mounted.
* Automatically uninstalls the event listener when the component is unmounted.
*
* @param eventType A string representing the
* <a href="https://developer.mozilla.org/en-US/docs/DOM/event.type">event type</a> to listen for.
* @param useCapture If true, useCapture indicates that the user wishes to initiate capture.
* After initiating capture, all events of the specified type will be dispatched to the registered
* listener before being dispatched to any EventTarget beneath it in the DOM tree.
* Events which are bubbling upward through the tree will not trigger a listener designated to use
* capture.
*/
def install[P, S, B <: OnUnmount, N <: TopNode](eventType : String,
listener : ComponentScopeM[P,S,B,N] => E => Unit,
target : ComponentScopeM[P,S,B,N] => EventTarget = defaultTarget[P,S,B,N],
useCapture: Boolean = false) =
OnUnmount.install[P,S,B,N] andThen (_.componentDidMount { $ =>
val et = target($)
val fe = listener($)
val f: js.Function1[E, Unit] = (e: E) => fe(e)
et.addEventListener(eventType, f, useCapture)
$.backend.onUnmount(et.removeEventListener(eventType, f, useCapture))
})
/** See [[install()]]. */
def installIO[P, S, B <: OnUnmount, N <: TopNode](eventType : String,
listener : ComponentScopeM[P,S,B,N] => E => IO[Unit],
target : ComponentScopeM[P,S,B,N] => EventTarget = defaultTarget[P,S,B,N],
useCapture: Boolean = false) =
install[P,S,B,N](
eventType,
$ => { val f = listener($); e => f(e).unsafePerformIO() },
target, useCapture)
}
/** See [[OfEventType.install()]]. */
def install[P, S, B <: OnUnmount, N <: TopNode](eventType : String,
listener : ComponentScopeM[P,S,B,N] => () => Unit,
target : ComponentScopeM[P,S,B,N] => EventTarget = defaultTarget[P,S,B,N],
useCapture: Boolean = false) =
EventListener[Event].install[P,S,B,N](
eventType,
$ => { val f = listener($); _ => f() },
target, useCapture)
/** See [[OfEventType.install()]]. */
def installIO[P, S, B <: OnUnmount, N <: TopNode](eventType : String,
listener : ComponentScopeM[P,S,B,N] => IO[Unit],
target : ComponentScopeM[P,S,B,N] => EventTarget = defaultTarget[P,S,B,N],
useCapture: Boolean = false) =
EventListener[Event].installIO[P,S,B,N](
eventType,
Function const listener(_),
target, useCapture)
}
| gshakhn/scalajs-react | extra/src/main/scala/japgolly/scalajs/react/extra/EventListener.scala | Scala | apache-2.0 | 3,653 |
package org.imdex.tractor.meta.union
import org.imdex.tractor.union
import org.imdex.tractor.union._
import scala.reflect.macros.whitebox
/**
* Created by a.tsukanov on 14.07.2016.
*/
class Traits(val traitsCtx: whitebox.Context) extends Common(traitsCtx) {
import c.universe._
private def stop(): Nothing = c.abort(c.enclosingPosition, "")
private def hasImplicitView(from: Type, to: Type): Boolean = c.inferImplicitView(q"null", from, to) ne EmptyTree
private def result[T]: Expr[T] = c.Expr[T](q"null")
private def weakSubset[T <: Union : c.WeakTypeTag, U <: Union : c.WeakTypeTag]: Boolean = {
val firstTypes = decay[T]
val secondTypes = decay[U]
firstTypes.forall(tpe => secondTypes.exists(tpe <:< _))
}
private def isMemberExists[T : c.WeakTypeTag, U <: Union : c.WeakTypeTag]: Boolean = {
val tpe = weakTypeOf[T]
val unionTypes = decay[U]
unionTypes.contains(tpe)
}
private def isWeakMemberExists[T : c.WeakTypeTag, U <: Union : c.WeakTypeTag]: Boolean = {
val tpe = weakTypeOf[T]
val unionTypes = decay[U]
unionTypes.exists(tpe <:< _) || unionTypes.exists(hasImplicitView(tpe, _))
}
def isMember[T : c.WeakTypeTag, U <: Union : c.WeakTypeTag]: c.Expr[T ∈ U] = {
if (!isMemberExists[T, U]) stop()
result[T ∈ U]
}
def isWeakMember[T : c.WeakTypeTag, U <: Union : c.WeakTypeTag]: c.Expr[T weak_∈ U] = {
if (!isWeakMemberExists[T, U]) stop()
result[T weak_∈ U]
}
def isNotAMember[T : c.WeakTypeTag, U <: Union : c.WeakTypeTag]: c.Expr[T ∉ U] = {
if (isMemberExists[T, U]) stop()
result[T ∉ U]
}
def isNotAWeakMember[T : c.WeakTypeTag, U <: Union : c.WeakTypeTag]: c.Expr[T weak_∉ U] = {
if (isWeakMemberExists[T, U]) stop()
result[T weak_∉ U]
}
def equals[T <: Union : c.WeakTypeTag, U <: Union : c.WeakTypeTag]: c.Expr[union.=:=[T, U]] = {
val firstTypes = decay[T]
val secondTypes = decay[U]
if (firstTypes != secondTypes) stop()
result[union.=:=[T, U]]
}
def notEquals[T <: Union : c.WeakTypeTag, U <: Union : c.WeakTypeTag]: c.Expr[union.=!=[T, U]] = {
val firstTypes = decay[T]
val secondTypes = decay[U]
if (firstTypes != secondTypes) result[union.=!=[T, U]]
else stop()
}
def isSubset[T <: Union : c.WeakTypeTag, U <: Union : c.WeakTypeTag]: c.Expr[T ⊂ U] = {
val firstTypes = decay[T]
val secondTypes = decay[U]
if (firstTypes.subsetOf(secondTypes)) result[T ⊂ U]
else stop()
}
def isWeakSubset[T <: Union : c.WeakTypeTag, U <: Union : c.WeakTypeTag]: c.Expr[T weak_⊂ U] = {
if (weakSubset[T, U]) result[T weak_⊂ U]
else stop()
}
def isSuperset[T <: Union : c.WeakTypeTag, U <: Union : c.WeakTypeTag]: c.Expr[T ⊃ U] = {
val firstTypes = decay[T]
val secondTypes = decay[U]
if (secondTypes.subsetOf(firstTypes)) result[T ⊃ U]
else stop()
}
def isWeakSuperset[T <: Union : c.WeakTypeTag, U <: Union : c.WeakTypeTag]: c.Expr[T weak_⊃ U] = {
if (weakSubset[U, T]) result[T weak_⊃ U]
else stop()
}
}
| Im-dex/trActor | tractor-macro/src/main/scala/org/imdex/tractor/meta/union/Traits.scala | Scala | mit | 3,410 |
/*
* Scala (https://www.scala-lang.org)
*
* Copyright EPFL and Lightbend, Inc.
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package scala
package collection
package immutable
import scala.annotation.tailrec
/** This class implements an immutable map that preserves order using
* a hash map for the key to value mapping to provide efficient lookup,
* and a tree for the ordering of the keys to provide efficient
* insertion/modification order traversal and destructuring.
*
* By default insertion order (`TreeSeqMap.OrderBy.Insertion`)
* is used, but modification order (`TreeSeqMap.OrderBy.Modification`)
* can be used instead if so specified at creation.
*
* The `orderingBy(orderBy: TreeSeqMap.OrderBy): TreeSeqMap[K, V]` method
* can be used to switch to the specified ordering for the returned map.
*
* A key can be manually refreshed (i.e. placed at the end) via the
* `refresh(key: K): TreeSeqMap[K, V]` method (regardless of the ordering in
* use).
*
* Internally, an ordinal counter is increased for each insertion/modification
* and then the current ordinal is used as key in the tree map. After 2^32^
* insertions/modifications the entire map is copied (thus resetting the ordinal
* counter).
*
* @tparam K the type of the keys contained in this map.
* @tparam V the type of the values associated with the keys in this map.
* @define coll immutable tree seq map
* @define Coll `immutable.TreeSeqMap`
*/
final class TreeSeqMap[K, +V] private (
private val ordering: TreeSeqMap.Ordering[K],
private val mapping: TreeSeqMap.Mapping[K, V],
private val ordinal: Int,
val orderedBy: TreeSeqMap.OrderBy)
extends AbstractMap[K, V]
with SeqMap[K, V]
with MapOps[K, V, TreeSeqMap, TreeSeqMap[K, V]]
with StrictOptimizedIterableOps[(K, V), Iterable, TreeSeqMap[K, V]]
with StrictOptimizedMapOps[K, V, TreeSeqMap, TreeSeqMap[K, V]]
with MapFactoryDefaults[K, V, TreeSeqMap, Iterable] {
import TreeSeqMap._
override protected[this] def className: String = "TreeSeqMap"
override def mapFactory: MapFactory[TreeSeqMap] = TreeSeqMap
override val size = mapping.size
override def knownSize: Int = size
override def isEmpty = size == 0
/*
// This should have been overridden in 2.13.0 but wasn't so it will have to wait since it is not forwards compatible
// Now handled in inherited method from scala.collection.MapFactoryDefaults instead.
override def empty = TreeSeqMap.empty[K, V](orderedBy)
*/
def orderingBy(orderBy: OrderBy): TreeSeqMap[K, V] = {
if (orderBy == this.orderedBy) this
else if (isEmpty) TreeSeqMap.empty(orderBy)
else new TreeSeqMap(ordering, mapping, ordinal, orderBy)
}
def updated[V1 >: V](key: K, value: V1): TreeSeqMap[K, V1] = {
mapping.get(key) match {
case e if ordinal == -1 && (orderedBy == OrderBy.Modification || e.isEmpty) =>
// Reinsert into fresh instance to restart ordinal counting, expensive but only done after 2^32 updates.
TreeSeqMap.empty[K, V1](orderedBy) ++ this + (key -> value)
case Some((o, _)) if orderedBy == OrderBy.Insertion =>
new TreeSeqMap(
ordering.include(o, key),
mapping.updated[(Int, V1)](key, (o, value)),
ordinal, // Do not increment the ordinal since the key is already present, i.e. o <= ordinal.
orderedBy)
case Some((o, _)) =>
val o1 = increment(ordinal)
new TreeSeqMap(
ordering.exclude(o).append(o1, key),
mapping.updated[(Int, V1)](key, (o1, value)),
o1,
orderedBy)
case None =>
val o1 = increment(ordinal)
new TreeSeqMap(
ordering.append(o1, key),
mapping.updated[(Int, V1)](key, (o1, value)),
o1,
orderedBy)
}
}
def removed(key: K): TreeSeqMap[K, V] = {
mapping.get(key) match {
case Some((o, _)) =>
new TreeSeqMap(
ordering.exclude(o),
mapping.removed(key),
ordinal,
orderedBy)
case None =>
this
}
}
def refresh(key: K): TreeSeqMap[K, V] = {
mapping.get(key) match {
case Some((o, _)) =>
val o1 = increment(ordinal)
new TreeSeqMap(
ordering.exclude(o).append(o1, key),
mapping,
o1,
orderedBy)
case None =>
this
}
}
def get(key: K): Option[V] = mapping.get(key).map(value)
def iterator: Iterator[(K, V)] = new AbstractIterator[(K, V)] {
private[this] val iter = ordering.iterator
override def hasNext: Boolean = iter.hasNext
override def next(): (K, V) = binding(iter.next())
}
override def keysIterator: Iterator[K] = new AbstractIterator[K] {
private[this] val iter = ordering.iterator
override def hasNext: Boolean = iter.hasNext
override def next(): K = iter.next()
}
override def valuesIterator: Iterator[V] = new AbstractIterator[V] {
private[this] val iter = ordering.iterator
override def hasNext: Boolean = iter.hasNext
override def next(): V = value(binding(iter.next()))
}
override def contains(key: K): Boolean = mapping.contains(key)
override def head: (K, V) = binding(ordering.head)
override def headOption = ordering.headOption.map(binding)
override def last: (K, V) = binding(ordering.last)
override def lastOption: Option[(K, V)] = ordering.lastOption.map(binding)
override def tail: TreeSeqMap[K, V] = {
val (head, tail) = ordering.headTail
new TreeSeqMap(tail, mapping.removed(head), ordinal, orderedBy)
}
override def init: TreeSeqMap[K, V] = {
val (init, last) = ordering.initLast
new TreeSeqMap(init, mapping.removed(last), ordinal, orderedBy)
}
override def slice(from: Int, until: Int): TreeSeqMap[K, V] = {
val sz = size
if (sz == 0 || from >= until) TreeSeqMap.empty[K, V](orderedBy)
else {
val sz = size
val f = if (from >= 0) from else 0
val u = if (until <= sz) until else sz
val l = u - f
if (l <= 0) TreeSeqMap.empty[K, V](orderedBy)
else if (l > sz / 2) {
// Remove front and rear incrementally if majority of elements are to be kept
val (front, rest) = ordering.splitAt(f)
val (ong, rear) = rest.splitAt(l)
var mng = this.mapping
val frontIter = front.iterator
while (frontIter.hasNext) {
mng = mng - frontIter.next()
}
val rearIter = rear.iterator
while (rearIter.hasNext) {
mng = mng - rearIter.next()
}
new TreeSeqMap(ong, mng, ordinal, orderedBy)
} else {
// Populate with builder otherwise
val bdr = newBuilder[K, V](orderedBy)
val iter = ordering.iterator
var i = 0
while (i < f) {
iter.next()
i += 1
}
while (i < u) {
val k = iter.next()
bdr.addOne((k, mapping(k)._2))
i += 1
}
bdr.result()
}
}
}
override def map[K2, V2](f: ((K, V)) => (K2, V2)): TreeSeqMap[K2, V2] = {
val bdr = newBuilder[K2, V2](orderedBy)
val iter = ordering.iterator
while (iter.hasNext) {
val k = iter.next()
val (_, v) = mapping(k)
val (k2, v2) = f((k, v))
bdr.addOne((k2, v2))
}
bdr.result()
}
override def flatMap[K2, V2](f: ((K, V)) => IterableOnce[(K2, V2)]): TreeSeqMap[K2, V2] = {
val bdr = newBuilder[K2, V2](orderedBy)
val iter = ordering.iterator
while (iter.hasNext) {
val k = iter.next()
val (_, v) = mapping(k)
val jter = f((k, v)).iterator
while (jter.hasNext) {
val (k2, v2) = jter.next()
bdr.addOne((k2, v2))
}
}
bdr.result()
}
override def collect[K2, V2](pf: PartialFunction[(K, V), (K2, V2)]): TreeSeqMap[K2, V2] = {
val bdr = newBuilder[K2, V2](orderedBy)
val iter = ordering.iterator
while (iter.hasNext) {
val k = iter.next()
val (_, v) = mapping(k)
pf.runWith({ case (k2, v2) => bdr.addOne((k2, v2)) })((k, v))
}
bdr.result()
}
override def concat[V2 >: V](suffix: IterableOnce[(K, V2)]): TreeSeqMap[K, V2] = {
var ong: Ordering[K] = ordering
var mng: Mapping[K, V2] = mapping
var ord = increment(ordinal)
val iter = suffix.iterator
while (iter.hasNext) {
val (k, v2) = iter.next()
mng.get(k) match {
case Some((o, v)) =>
if (orderedBy == OrderBy.Insertion && v != v2) mng = mng.updated(k, (o, v2))
else if (orderedBy == OrderBy.Modification) {
mng = mng.updated(k, (ord, v2))
ong = ong.exclude(o).append(ord, k)
ord = increment(ord)
}
case None =>
mng = mng.updated(k, (ord, v2))
ong = ong.append(ord, k)
ord = increment(ord)
}
}
new TreeSeqMap[K, V2](ong, mng, ord, orderedBy)
}
@`inline` private[this] def value(p: (_, V)) = p._2
@`inline` private[this] def binding(k: K) = mapping(k).copy(_1 = k)
}
object TreeSeqMap extends MapFactory[TreeSeqMap] {
sealed trait OrderBy
object OrderBy {
case object Insertion extends OrderBy
case object Modification extends OrderBy
}
private val EmptyByInsertion = new TreeSeqMap[Nothing, Nothing](Ordering.empty, HashMap.empty, 0, OrderBy.Insertion)
private val EmptyByModification = new TreeSeqMap[Nothing, Nothing](Ordering.empty, HashMap.empty, 0, OrderBy.Modification)
val Empty = EmptyByInsertion
def empty[K, V]: TreeSeqMap[K, V] = empty(OrderBy.Insertion)
def empty[K, V](orderBy: OrderBy): TreeSeqMap[K, V] = {
if (orderBy == OrderBy.Modification) EmptyByModification
else EmptyByInsertion
}.asInstanceOf[TreeSeqMap[K, V]]
def from[K, V](it: collection.IterableOnce[(K, V)]): TreeSeqMap[K, V] =
it match {
case om: TreeSeqMap[K, V] => om
case _ => (newBuilder[K, V] ++= it).result()
}
@inline private def increment(ord: Int) = if (ord == Int.MaxValue) Int.MinValue else ord + 1
def newBuilder[K, V]: mutable.Builder[(K, V), TreeSeqMap[K, V]] = newBuilder(OrderBy.Insertion)
def newBuilder[K, V](orderedBy: OrderBy): mutable.Builder[(K, V), TreeSeqMap[K, V]] = new Builder[K, V](orderedBy)
final class Builder[K, V](orderedBy: OrderBy) extends mutable.Builder[(K, V), TreeSeqMap[K, V]] {
private[this] val bdr = new MapBuilderImpl[K, (Int, V)]
private[this] var ong = Ordering.empty[K]
private[this] var ord = 0
private[this] var aliased: TreeSeqMap[K, V] = _
override def addOne(elem: (K, V)): this.type = addOne(elem._1, elem._2)
def addOne(key: K, value: V): this.type = {
if (aliased ne null) {
aliased = aliased.updated(key, value)
} else {
bdr.getOrElse(key, null) match {
case (o, v) =>
if (orderedBy == OrderBy.Insertion && v != value) bdr.addOne(key, (o, value))
else if (orderedBy == OrderBy.Modification) {
bdr.addOne(key, (ord, value))
ong = ong.exclude(o).appendInPlace(ord, key)
ord = increment(ord)
}
case null =>
bdr.addOne(key, (ord, value))
ong = ong.appendInPlace(ord, key)
ord = increment(ord)
}
}
this
}
override def clear(): Unit = {
ong = Ordering.empty
ord = 0
bdr.clear()
aliased = null
}
override def result(): TreeSeqMap[K, V] = {
if (aliased eq null) {
aliased = new TreeSeqMap(ong, bdr.result(), ord, orderedBy)
}
aliased
}
}
private type Mapping[K, +V] = Map[K, (Int, V)]
@annotation.unused
private val Mapping = Map
/* The ordering implementation below is an adapted version of immutable.IntMap. */
private[immutable] object Ordering {
import scala.collection.generic.BitOperations.Int._
@inline private[immutable] def toBinaryString(i: Int): String = s"$i/${i.toBinaryString}"
def empty[T] : Ordering[T] = Zero
def apply[T](elems: (Int, T)*): Ordering[T] =
elems.foldLeft(empty[T])((x, y) => x.include(y._1, y._2))
// Iterator over a non-empty Ordering.
final class Iterator[+V](it: Ordering[V]) {
// Basically this uses a simple stack to emulate conversion over the tree. However
// because we know that Ints are at least 32 bits we can have at most 32 Bins and
// one Tip sitting on the tree at any point. Therefore we know the maximum stack
// depth is 33
private[this] var index = 0
private[this] val buffer = new Array[AnyRef](33)
private[this] def pop = {
index -= 1
buffer(index).asInstanceOf[Ordering[V]]
}
private[this] def push[V2 >: V](x: Ordering[V2]): Unit = {
buffer(index) = x.asInstanceOf[AnyRef]
index += 1
}
if (it != Zero) push(it)
def hasNext = index != 0
@tailrec
def next(): V =
pop match {
case Bin(_,_, Tip(_, v), right) =>
push(right)
v
case Bin(_, _, left, right) =>
push(right)
push(left)
next()
case Tip(_, v) => v
// This should never happen. We don't allow Ordering.Zero in subtrees of the Ordering
// and don't return an Ordering.Iterator for Ordering.Zero.
case Zero => throw new IllegalStateException("empty subtree not allowed")
}
}
object Iterator {
val Empty = new Iterator[Nothing](Ordering.empty[Nothing])
def empty[V]: Iterator[V] = Empty.asInstanceOf[Iterator[V]]
}
case object Zero extends Ordering[Nothing] {
// Important! Without this equals method in place, an infinite
// loop from Map.equals => size => pattern-match-on-Nil => equals
// develops. Case objects and custom equality don't mix without
// careful handling.
override def equals(that : Any): Boolean = that match {
case _: this.type => true
case _: Ordering[_] => false // The only empty Orderings are eq Nil
case _ => super.equals(that)
}
protected def format(sb: StringBuilder, prefix: String, subPrefix: String): Unit = sb ++= s"${prefix}Ø"
}
final case class Tip[+T](ord: Int, value: T) extends Ordering[T] {
def withValue[S](s: S) =
if (s.asInstanceOf[AnyRef] eq value.asInstanceOf[AnyRef]) this.asInstanceOf[Tip[S]]
else Tip(ord, s)
protected def format(sb: StringBuilder, prefix: String, subPrefix: String): Unit = sb ++= s"${prefix}Tip(${toBinaryString(ord)} -> $value)\\n"
}
final case class Bin[+T](prefix: Int, mask: Int, left: Ordering[T], var right: Ordering[T] @scala.annotation.unchecked.uncheckedVariance) extends Ordering[T] {
def bin[S](left: Ordering[S], right: Ordering[S]): Ordering[S] = {
if ((this.left eq left) && (this.right eq right)) this.asInstanceOf[Bin[S]]
else Bin[S](prefix, mask, left, right)
}
protected def format(sb: StringBuilder, prefix: String, subPrefix: String): Unit = {
sb ++= s"${prefix}Bin(${toBinaryString(this.prefix)}:${toBinaryString(mask)})\\n"
left.format(sb, subPrefix + "├── ", subPrefix + "│ ")
right.format(sb, subPrefix + "└── ", subPrefix + " ")
}
}
private def branchMask(i: Int, j: Int) = highestOneBit(i ^ j)
private def join[T](p1: Int, t1: Ordering[T], p2: Int, t2: Ordering[T]): Ordering[T] = {
val m = branchMask(p1, p2)
val p = mask(p1, m)
if (zero(p1, m)) Bin(p, m, t1, t2)
else Bin(p, m, t2, t1)
}
private def bin[T](prefix: Int, mask: Int, left: Ordering[T], right: Ordering[T]): Ordering[T] = (left, right) match {
case (l, Zero) => l
case (Zero, r) => r
case (l, r) => Bin(prefix, mask, l, r)
}
}
sealed abstract class Ordering[+T] {
import Ordering._
import scala.annotation.tailrec
import scala.collection.generic.BitOperations.Int._
override final def toString: String = format
final def format: String = {
val sb = new StringBuilder
format(sb, "", "")
sb.toString()
}
protected def format(sb: StringBuilder, prefix: String, subPrefix: String): Unit
@tailrec
final def head: T = this match {
case Zero => throw new NoSuchElementException("head of empty map")
case Tip(k, v) => v
case Bin(_, _, l, _) => l.head
}
@tailrec
final def headOption: Option[T] = this match {
case Zero => None
case Tip(_, v) => Some(v)
case Bin(_, _, l, _) => l.headOption
}
@tailrec
final def last: T = this match {
case Zero => throw new NoSuchElementException("last of empty map")
case Tip(_, v) => v
case Bin(_, _, _, r) => r.last
}
@tailrec
final def lastOption: Option[T] = this match {
case Zero => None
case Tip(_, v) => Some(v)
case Bin(_, _, _, r) => r.lastOption
}
@tailrec
final def ordinal: Int = this match {
case Zero => 0
case Tip(o, _) => o
case Bin(_, _, _, r) => r.ordinal
}
final def tail: Ordering[T] = this match {
case Zero => throw new NoSuchElementException("tail of empty map")
case Tip(_, _) => Zero
case Bin(p, m, l, r) => bin(p, m, l.tail, r)
}
final def headTail: (T, Ordering[T]) = this match {
case Zero => throw new NoSuchElementException("init of empty map")
case Tip(_, v) => (v, Zero)
case Bin(p, m, l, r) =>
val (head, tail) = l.headTail
(head, bin(p, m, tail, r))
}
final def init: Ordering[T] = this match {
case Zero => throw new NoSuchElementException("init of empty map")
case Tip(_, _) => Zero
case Bin(p, m, l, r) =>
bin(p, m, l, r.init)
}
final def initLast: (Ordering[T], T) = this match {
case Zero => throw new NoSuchElementException("init of empty map")
case Tip(_, v) => (Zero, v)
case Bin(p, m, l, r) =>
val (init, last) = r.initLast
(bin(p, m, l, init), last)
}
final def iterator: Iterator[T] = this match {
case Zero => Iterator.empty
case _ => new Iterator(this)
}
final def include[S >: T](ordinal: Int, value: S): Ordering[S] = this match {
case Zero =>
Tip(ordinal, value)
case Tip(o, _) =>
if (ordinal == o) Tip(ordinal, value)
else join(ordinal, Tip(ordinal, value), o, this)
case Bin(p, m, l, r) =>
if (!hasMatch(ordinal, p, m)) join(ordinal, Tip(ordinal, value), p, this)
else if (zero(ordinal, m)) Bin(p, m, l.include(ordinal, value), r)
else Bin(p, m, l, r.include(ordinal, value))
}
final def append[S >: T](ordinal: Int, value: S): Ordering[S] = this match {
case Zero =>
Tip(ordinal, value)
case Tip(o, _) =>
if (ordinal == o) Tip(ordinal, value)
else join(ordinal, Tip(ordinal, value), o, this)
case Bin(p, m, l, r) =>
if (!hasMatch(ordinal, p, m)) join(ordinal, Tip(ordinal, value), p, this)
else if (zero(ordinal, m)) throw new IllegalArgumentException(s"Append called with ordinal out of range: $ordinal is not greater than current max ordinal ${this.ordinal}")
else Bin(p, m, l, r.append(ordinal, value))
}
@inline private[collection] final def appendInPlace[S >: T](ordinal: Int, value: S): Ordering[S] = appendInPlace1(null, ordinal, value)
private[collection] final def appendInPlace1[S >: T](parent: Bin[S], ordinal: Int, value: S): Ordering[S] = this match {
case Zero =>
Tip(ordinal, value)
case Tip(o, _) if o >= ordinal =>
throw new IllegalArgumentException(s"Append called with ordinal out of range: $o is not greater than current max ordinal ${this.ordinal}")
case Tip(o, _) if parent == null =>
join(ordinal, Tip(ordinal, value), o, this)
case Tip(o, _) =>
parent.right = join(ordinal, Tip(ordinal, value), o, this)
parent
case b @ Bin(p, m, _, r) =>
if (!hasMatch(ordinal, p, m)) {
val b2 = join(ordinal, Tip(ordinal, value), p, this)
if (parent != null) {
parent.right = b2
parent
} else b2
} else if (zero(ordinal, m)) throw new IllegalArgumentException(s"Append called with ordinal out of range: $ordinal is not greater than current max ordinal ${this.ordinal}")
else {
r.appendInPlace1(b, ordinal, value)
this
}
}
final def exclude(ordinal: Int): Ordering[T] = this match {
case Zero =>
Zero
case Tip(o, _) =>
if (ordinal == o) Zero
else this
case Bin(p, m, l, r) =>
if (!hasMatch(ordinal, p, m)) this
else if (zero(ordinal, m)) bin(p, m, l.exclude(ordinal), r)
else bin(p, m, l, r.exclude(ordinal))
}
final def splitAt(n: Int): (Ordering[T], Ordering[T]) = {
var rear = Ordering.empty[T]
var i = n
(modifyOrRemove { (o, v) =>
i -= 1
if (i >= 0) Some(v)
else {
rear = rear.appendInPlace(o, v)
None
}
}, rear)
}
/**
* A combined transform and filter function. Returns an `Ordering` such that
* for each `(key, value)` mapping in this map, if `f(key, value) == None`
* the map contains no mapping for key, and if `f(key, value) == Some(x)` the
* map contains `(key, x)`.
*
* @tparam S The type of the values in the resulting `LongMap`.
* @param f The transforming function.
* @return The modified map.
*/
final def modifyOrRemove[S](f: (Int, T) => Option[S]): Ordering[S] = this match {
case Zero => Zero
case Tip(key, value) =>
f(key, value) match {
case None => Zero
case Some(value2) =>
// hack to preserve sharing
if (value.asInstanceOf[AnyRef] eq value2.asInstanceOf[AnyRef]) this.asInstanceOf[Ordering[S]]
else Tip(key, value2)
}
case Bin(prefix, mask, left, right) =>
val l = left.modifyOrRemove(f)
val r = right.modifyOrRemove(f)
if ((left eq l) && (right eq r)) this.asInstanceOf[Ordering[S]]
else bin(prefix, mask, l, r)
}
}
}
| lrytz/scala | src/library/scala/collection/immutable/TreeSeqMap.scala | Scala | apache-2.0 | 22,494 |
package keystoneml.nodes.learning
import breeze.linalg._
import edu.berkeley.cs.amplab.mlmatrix.{RowPartition, NormalEquations, BlockCoordinateDescent, RowPartitionedMatrix}
import keystoneml.nodes.stats.{StandardScalerModel, StandardScaler}
import org.apache.spark.rdd.RDD
import keystoneml.nodes.util.{VectorSplitter, Identity}
import keystoneml.utils.{MatrixUtils, Stats}
import keystoneml.workflow.{WeightedNode, Transformer, LabelEstimator}
/**
* Transformer that applies a linear model to an input.
* Different from [[LinearMapper]] in that the matrix representing the transformation
* is split into a seq.
*
* @param xs The chunks of the matrix representing the linear model
* @param blockSize blockSize to split data before applying transformations
* @param bOpt optional intercept term to be added
* @param featureScalersOpt optional seq of transformers to be applied before transformation
*/
class BlockLinearMapper(
val xs: Seq[DenseMatrix[Double]],
val blockSize: Int,
val bOpt: Option[DenseVector[Double]] = None,
val featureScalersOpt: Option[Seq[Transformer[DenseVector[Double], DenseVector[Double]]]] = None)
extends Transformer[DenseVector[Double], DenseVector[Double]] {
// Use identity nodes if we don't need to do scaling
val featureScalers = featureScalersOpt.getOrElse(
Seq.fill(xs.length)(new Identity[DenseVector[Double]]))
val vectorSplitter = new VectorSplitter(blockSize)
/**
* Applies the linear model to feature vectors large enough to have been split into several RDDs.
*
* @param in RDD of vectors to apply the model to
* @return the output vectors
*/
override def apply(in: RDD[DenseVector[Double]]): RDD[DenseVector[Double]] = {
apply(vectorSplitter(in))
}
/**
* Applies the linear model to feature vectors large enough to have been split into several RDDs.
*
* @param ins RDD of vectors to apply the model to, split into same size as model blocks
* @return the output vectors
*/
def apply(in: Seq[RDD[DenseVector[Double]]]): RDD[DenseVector[Double]] = {
val res = in.zip(xs.zip(featureScalers)).map {
case (rdd, xScaler) => {
val (x, scaler) = xScaler
val modelBroadcast = rdd.context.broadcast(x)
scaler(rdd).mapPartitions(rows => {
MatrixUtils.rowsToMatrixIter(rows).map(_ * modelBroadcast.value)
})
}
}
val matOut = res.reduceLeft((sum, next) => sum.zip(next).map(c => c._1 + c._2))
// Add the intercept here
val bBroadcast = matOut.context.broadcast(bOpt)
val matOutWithIntercept = matOut.map { mat =>
bOpt.map { b =>
mat(*, ::) :+= b
mat
}.getOrElse(mat)
}
matOutWithIntercept.flatMap(x => MatrixUtils.matrixToRowArray(x))
}
override def apply(in: DenseVector[Double]): DenseVector[Double] = {
val res = vectorSplitter.splitVector(in).zip(xs.zip(featureScalers)).map {
case (in, xScaler) => {
xScaler._1.t * xScaler._2(in)
}
}
val out = res.reduceLeft((sum, next) => sum + next)
bOpt.map { b =>
out += b
out
}.getOrElse(out)
}
/**
* Applies the linear model to feature vectors. After processing chunk i of every vector, applies
*
* @param evaluator to the intermediate output vector.
* @param in input RDD
*/
def applyAndEvaluate(in: RDD[DenseVector[Double]], evaluator: (RDD[DenseVector[Double]]) => Unit) {
applyAndEvaluate(vectorSplitter(in), evaluator)
}
/**
* Applies the linear model to feature vectors. After processing chunk i of every vector, applies
*
* @param evaluator to the intermediate output vector.
* @param in sequence of input RDD chunks
*/
def applyAndEvaluate(
in: Seq[RDD[DenseVector[Double]]],
evaluator: (RDD[DenseVector[Double]]) => Unit) {
val res = in.zip(xs.zip(featureScalers)).map {
case (rdd, xScaler) => {
val modelBroadcast = rdd.context.broadcast(xScaler._1)
xScaler._2(rdd).mapPartitions(rows => {
MatrixUtils.rowsToMatrixIter(rows).map(_ * modelBroadcast.value)
})
}
}
var prev: Option[RDD[DenseMatrix[Double]]] = None
for (next <- res) {
val sum = prev match {
case Some(prevVal) => prevVal.zip(next).map(c => c._1 + c._2).cache()
case None => next.cache()
}
// NOTE: We should only add the intercept once. So do it right before
// we call the evaluator but don't cache this
val sumAndIntercept = sum.map { mat =>
bOpt.map { b =>
mat(*, ::) :+= b
mat
}.getOrElse(mat)
}
evaluator.apply(sumAndIntercept.flatMap(x => MatrixUtils.matrixToRowArray(x)))
prev.map(_.unpersist())
prev = Some(sum)
}
prev.map(_.unpersist())
}
}
object BlockLeastSquaresEstimator {
def computeCost(
trainingFeatures: Seq[RDD[DenseVector[Double]]],
trainingLabels: RDD[DenseVector[Double]],
lambda: Double,
xs: Seq[DenseMatrix[Double]],
bOpt: Option[DenseVector[Double]]): Double = {
val nTrain = trainingLabels.count
val res = trainingFeatures.zip(xs).map {
case (rdd, x) => {
val modelBroadcast = rdd.context.broadcast(x)
rdd.mapPartitions(rows => {
MatrixUtils.rowsToMatrixIter(rows).map(_ * modelBroadcast.value)
})
}
}
val matOut = res.reduceLeft((sum, next) => sum.zip(next).map(c => c._1 + c._2))
// Add the intercept here
val bBroadcast = matOut.context.broadcast(bOpt)
val matOutWithIntercept = matOut.map { mat =>
bOpt.map { b =>
mat(*, ::) :+= b
mat
}.getOrElse(mat)
}
val axb = matOutWithIntercept.flatMap(x => MatrixUtils.matrixToRowArray(x))
val cost = axb.zip(trainingLabels).map { part =>
val axb = part._1
val labels = part._2
val out = axb - labels
math.pow(norm(out), 2)
}.reduce(_ + _)
if (lambda == 0) {
cost/(2.0*nTrain.toDouble)
} else {
val wNorm = xs.map(part => math.pow(norm(part.toDenseVector), 2)).reduce(_+_)
cost/(2.0*nTrain.toDouble) + lambda/2.0 * wNorm
}
}
}
/**
* Fits a least squares model using block coordinate descent with provided
* training features and labels
*
* @param blockSize size of block to use in the solver
* @param numIter number of iterations of solver to run
* @param lambda L2-regularization to use
*/
class BlockLeastSquaresEstimator(blockSize: Int, numIter: Int, lambda: Double = 0.0, numFeaturesOpt: Option[Int] = None)
extends LabelEstimator[DenseVector[Double], DenseVector[Double], DenseVector[Double]]
with WeightedNode
with CostModel {
override val weight = (3*numIter)+1
/**
* Fit a model using blocks of features and labels provided.
*
* @param trainingFeatures feature blocks to use in RDDs.
* @param trainingLabels RDD of labels to use.
*/
def fit(
trainingFeatures: Seq[RDD[DenseVector[Double]]],
trainingLabels: RDD[DenseVector[Double]]): BlockLinearMapper = {
val labelScaler = new StandardScaler(normalizeStdDev = false).fit(trainingLabels)
// Find out numRows, numCols once
val b = RowPartitionedMatrix.fromArray(
labelScaler.apply(trainingLabels).map(_.toArray)).cache()
val numRows = Some(b.numRows())
val numCols = Some(blockSize.toLong)
// NOTE: This will cause trainingFeatures to be evaluated twice
// which might not be optimal if its not cached ?
val featureScalers = trainingFeatures.map { rdd =>
new StandardScaler(normalizeStdDev = false).fit(rdd)
}
val A = trainingFeatures.zip(featureScalers).map { case (rdd, scaler) =>
new RowPartitionedMatrix(scaler.apply(rdd).mapPartitions { rows =>
MatrixUtils.rowsToMatrixIter(rows)
}.map(RowPartition), numRows, numCols)
}
val bcd = new BlockCoordinateDescent()
val models = if (numIter > 1) {
bcd.solveLeastSquaresWithL2(
A, b, Array(lambda), numIter, new NormalEquations()).transpose
} else {
bcd.solveOnePassL2(A.iterator, b, Array(lambda), new NormalEquations()).toSeq.transpose
}
new BlockLinearMapper(models.head, blockSize, Some(labelScaler.mean), Some(featureScalers))
}
/**
* Fit a model after splitting training data into appropriate blocks.
*
* @param trainingFeatures training data to use in one RDD.
* @param trainingLabels labels for training data in a RDD.
*/
override def fit(
trainingFeatures: RDD[DenseVector[Double]],
trainingLabels: RDD[DenseVector[Double]]): BlockLinearMapper = {
val vectorSplitter = new VectorSplitter(blockSize, numFeaturesOpt)
val featureBlocks = vectorSplitter.apply(trainingFeatures)
fit(featureBlocks, trainingLabels)
}
def fit(
trainingFeatures: RDD[DenseVector[Double]],
trainingLabels: RDD[DenseVector[Double]],
numFeaturesOpt: Option[Int]): BlockLinearMapper = {
val vectorSplitter = new VectorSplitter(blockSize, numFeaturesOpt)
val featureBlocks = vectorSplitter.apply(trainingFeatures)
fit(featureBlocks, trainingLabels)
}
override def cost(
n: Long,
d: Int,
k: Int,
sparsity: Double,
numMachines: Int,
cpuWeight: Double,
memWeight: Double,
networkWeight: Double)
: Double = {
val flops = n.toDouble * d * (blockSize + k) / numMachines
val bytesScanned = n.toDouble * d / numMachines + (d.toDouble * k)
val network = 2.0 * (d.toDouble * (blockSize + k)) * math.log(numMachines) / math.log(2.0)
numIter * (math.max(cpuWeight * flops, memWeight * bytesScanned) + networkWeight * network)
}
}
| amplab/keystone | src/main/scala/keystoneml/nodes/learning/BlockLinearMapper.scala | Scala | apache-2.0 | 9,648 |
/**
* Copyright (C) 2015 Stratio (http://stratio.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.stratio.datasource.mongodb.partitioner
import com.mongodb.DBObject
import com.mongodb.util.JSON
import com.stratio.datasource.MongodbTestConstants
import com.stratio.datasource.mongodb._
import com.stratio.datasource.mongodb.client.MongodbClientFactory
import com.stratio.datasource.mongodb.config.{MongodbConfig, MongodbConfigBuilder}
import com.stratio.datasource.mongodb.config.MongodbConfig._
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
import org.scalatest.{BeforeAndAfterAll, BeforeAndAfter, Matchers, FlatSpec}
@RunWith(classOf[JUnitRunner])
class MongodbPartitionerIT extends FlatSpec
with BeforeAndAfter
with Matchers
with MongoClusterEmbedDatabase
with MongoEmbedDatabase
with TestBsonData
with MongodbTestConstants
with BeforeAndAfterAll {
val configServerPorts = List(mongoPort+10)
val database = "database-1"
val collection = "collection-1"
val shardKey = "_id"
val shardMaxSize = 1
val chunkSize = 1
val currentHost = "localhost"
val replicaSets = Map(
"replicaSet1" -> List(mongoPort+1, mongoPort+2, mongoPort+3),
"replicaSet2" -> List(mongoPort+4, mongoPort+5, mongoPort+6))
behavior of "MongodbPartitioner"
it should "get proper partition ranges when connecting" + " to a sharded cluster" + scalaBinaryVersion in {
val testConfig = MongodbConfigBuilder()
.set(MongodbConfig.Host, replicaSets.values.flatMap(
ports => ports.take(1).map(
p => s"$currentHost:$p")))
.set(MongodbConfig.Database, database)
.set(MongodbConfig.Collection, collection)
.set(MongodbConfig.SamplingRatio, 1.0)
.build()
withCluster { system =>
val partitioner1 = new MongodbPartitioner(testConfig)
val (partition :: Nil) = partitioner1.computePartitions().toList
partition.index should equal(0)
partition.partitionRange.minKey should equal(None)
partition.partitionRange.maxKey should equal(None)
//TODO: Check what happens when shard is enable due to get over max chunk size
}
}
def objectSample(amount: Long): Stream[DBObject] = {
def longs: Stream[Long] = 0 #:: longs.map(_ + 1)
longs.map { n =>
n -> JSON.parse(
s"""{"string":"this is a simple string.",
"integer":10,
"long":$n,
"double":1.7976931348623157E308,
"boolean":true,
"null":null
}""").asInstanceOf[DBObject]
}.takeWhile {
case (idx, _) => idx <= amount
}.map(_._2)
}
override def afterAll {
MongodbClientFactory.closeAll(false)
}
it should "get proper partition ranges using splitVector with bounds" + scalaBinaryVersion in {
import com.mongodb.casbah.Imports.MongoDBObject
val dataSet = (1 to 15000).map(n=> MongoDBObject("name" -> s"name$n" , "id" -> n)).toList
withEmbedMongoFixture(dataSet) { mongoProc =>
val mongoClient = com.mongodb.casbah.MongoClient("localhost", mongoPort)
val coll = mongoClient(db)("testCol"
)
// to run splitVector index by the splitKey field is needed
coll.createIndex(MongoDBObject("id" ->1))
val testConfig = MongodbConfigBuilder(Map(
Host -> List(s"localhost:$mongoPort"),
Database -> db,
Collection -> "testCol",
SamplingRatio -> 1.0,
SplitSize -> 1,
SplitKey -> "id",
SplitSize -> "1",
SplitKeyType -> "int",
SplitKeyMin -> "500",
SplitKeyMax -> "14000")
).build()
val partitioner = new MongodbPartitioner(testConfig)
val partitions = partitioner.computePartitions().toList.size
//With the dataSet for this test and this splitVector config, 3 partitions would be created
partitions should equal(3)
}
}
} | darroyocazorla/spark-mongodb | spark-mongodb/src/test/scala/com/stratio/datasource/mongodb/partitioner/MongodbPartitionerIT.scala | Scala | apache-2.0 | 4,363 |
class Foo(x:Int) {
def bar(y:Int) = x + y
def apply(z:Int) = x * z
}
object MagicApply extends App {
val foo = new Foo(10)
println(foo.bar(20))
println(foo(20))
}
| chrisheckler/Scala_scripts | MagicApply.scala | Scala | mit | 176 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.analysis
import java.sql.Timestamp
import org.apache.spark.sql.catalyst.analysis.TypeCoercion._
import org.apache.spark.sql.catalyst.dsl.expressions._
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.plans.PlanTest
import org.apache.spark.sql.catalyst.plans.logical._
import org.apache.spark.sql.catalyst.rules.{Rule, RuleExecutor}
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types._
import org.apache.spark.unsafe.types.CalendarInterval
class TypeCoercionSuite extends AnalysisTest {
// scalastyle:off line.size.limit
// The following table shows all implicit data type conversions that are not visible to the user.
// +----------------------+----------+-----------+-------------+----------+------------+-----------+------------+------------+-------------+------------+----------+---------------+------------+----------+-------------+----------+----------------------+---------------------+-------------+--------------+
// | Source Type\\CAST TO | ByteType | ShortType | IntegerType | LongType | DoubleType | FloatType | Dec(10, 2) | BinaryType | BooleanType | StringType | DateType | TimestampType | ArrayType | MapType | StructType | NullType | CalendarIntervalType | DecimalType | NumericType | IntegralType |
// +----------------------+----------+-----------+-------------+----------+------------+-----------+------------+------------+-------------+------------+----------+---------------+------------+----------+-------------+----------+----------------------+---------------------+-------------+--------------+
// | ByteType | ByteType | ShortType | IntegerType | LongType | DoubleType | FloatType | Dec(10, 2) | X | X | StringType | X | X | X | X | X | X | X | DecimalType(3, 0) | ByteType | ByteType |
// | ShortType | ByteType | ShortType | IntegerType | LongType | DoubleType | FloatType | Dec(10, 2) | X | X | StringType | X | X | X | X | X | X | X | DecimalType(5, 0) | ShortType | ShortType |
// | IntegerType | ByteType | ShortType | IntegerType | LongType | DoubleType | FloatType | Dec(10, 2) | X | X | StringType | X | X | X | X | X | X | X | DecimalType(10, 0) | IntegerType | IntegerType |
// | LongType | ByteType | ShortType | IntegerType | LongType | DoubleType | FloatType | Dec(10, 2) | X | X | StringType | X | X | X | X | X | X | X | DecimalType(20, 0) | LongType | LongType |
// | DoubleType | ByteType | ShortType | IntegerType | LongType | DoubleType | FloatType | Dec(10, 2) | X | X | StringType | X | X | X | X | X | X | X | DecimalType(30, 15) | DoubleType | IntegerType |
// | FloatType | ByteType | ShortType | IntegerType | LongType | DoubleType | FloatType | Dec(10, 2) | X | X | StringType | X | X | X | X | X | X | X | DecimalType(14, 7) | FloatType | IntegerType |
// | Dec(10, 2) | ByteType | ShortType | IntegerType | LongType | DoubleType | FloatType | Dec(10, 2) | X | X | StringType | X | X | X | X | X | X | X | DecimalType(10, 2) | Dec(10, 2) | IntegerType |
// | BinaryType | X | X | X | X | X | X | X | BinaryType | X | StringType | X | X | X | X | X | X | X | X | X | X |
// | BooleanType | X | X | X | X | X | X | X | X | BooleanType | StringType | X | X | X | X | X | X | X | X | X | X |
// | StringType | ByteType | ShortType | IntegerType | LongType | DoubleType | FloatType | Dec(10, 2) | BinaryType | X | StringType | DateType | TimestampType | X | X | X | X | X | DecimalType(38, 18) | DoubleType | X |
// | DateType | X | X | X | X | X | X | X | X | X | StringType | DateType | TimestampType | X | X | X | X | X | X | X | X |
// | TimestampType | X | X | X | X | X | X | X | X | X | StringType | DateType | TimestampType | X | X | X | X | X | X | X | X |
// | ArrayType | X | X | X | X | X | X | X | X | X | X | X | X | ArrayType* | X | X | X | X | X | X | X |
// | MapType | X | X | X | X | X | X | X | X | X | X | X | X | X | MapType* | X | X | X | X | X | X |
// | StructType | X | X | X | X | X | X | X | X | X | X | X | X | X | X | StructType* | X | X | X | X | X |
// | NullType | ByteType | ShortType | IntegerType | LongType | DoubleType | FloatType | Dec(10, 2) | BinaryType | BooleanType | StringType | DateType | TimestampType | ArrayType | MapType | StructType | NullType | CalendarIntervalType | DecimalType(38, 18) | DoubleType | IntegerType |
// | CalendarIntervalType | X | X | X | X | X | X | X | X | X | X | X | X | X | X | X | X | CalendarIntervalType | X | X | X |
// +----------------------+----------+-----------+-------------+----------+------------+-----------+------------+------------+-------------+------------+----------+---------------+------------+----------+-------------+----------+----------------------+---------------------+-------------+--------------+
// Note: StructType* is castable when all the internal child types are castable according to the table.
// Note: ArrayType* is castable when the element type is castable according to the table.
// Note: MapType* is castable when both the key type and the value type are castable according to the table.
// scalastyle:on line.size.limit
private def shouldCast(from: DataType, to: AbstractDataType, expected: DataType): Unit = {
// Check default value
val castDefault = TypeCoercion.ImplicitTypeCasts.implicitCast(default(from), to)
assert(DataType.equalsIgnoreCompatibleNullability(
castDefault.map(_.dataType).getOrElse(null), expected),
s"Failed to cast $from to $to")
// Check null value
val castNull = TypeCoercion.ImplicitTypeCasts.implicitCast(createNull(from), to)
assert(DataType.equalsIgnoreCaseAndNullability(
castNull.map(_.dataType).getOrElse(null), expected),
s"Failed to cast $from to $to")
}
private def shouldNotCast(from: DataType, to: AbstractDataType): Unit = {
// Check default value
val castDefault = TypeCoercion.ImplicitTypeCasts.implicitCast(default(from), to)
assert(castDefault.isEmpty, s"Should not be able to cast $from to $to, but got $castDefault")
// Check null value
val castNull = TypeCoercion.ImplicitTypeCasts.implicitCast(createNull(from), to)
assert(castNull.isEmpty, s"Should not be able to cast $from to $to, but got $castNull")
}
private def default(dataType: DataType): Expression = dataType match {
case ArrayType(internalType: DataType, _) =>
CreateArray(Seq(Literal.default(internalType)))
case MapType(keyDataType: DataType, valueDataType: DataType, _) =>
CreateMap(Seq(Literal.default(keyDataType), Literal.default(valueDataType)))
case _ => Literal.default(dataType)
}
private def createNull(dataType: DataType): Expression = dataType match {
case ArrayType(internalType: DataType, _) =>
CreateArray(Seq(Literal.create(null, internalType)))
case MapType(keyDataType: DataType, valueDataType: DataType, _) =>
CreateMap(Seq(Literal.create(null, keyDataType), Literal.create(null, valueDataType)))
case _ => Literal.create(null, dataType)
}
val integralTypes: Seq[DataType] =
Seq(ByteType, ShortType, IntegerType, LongType)
val fractionalTypes: Seq[DataType] =
Seq(DoubleType, FloatType, DecimalType.SYSTEM_DEFAULT, DecimalType(10, 2))
val numericTypes: Seq[DataType] = integralTypes ++ fractionalTypes
val atomicTypes: Seq[DataType] =
numericTypes ++ Seq(BinaryType, BooleanType, StringType, DateType, TimestampType)
val complexTypes: Seq[DataType] =
Seq(ArrayType(IntegerType),
ArrayType(StringType),
MapType(StringType, StringType),
new StructType().add("a1", StringType),
new StructType().add("a1", StringType).add("a2", IntegerType))
val allTypes: Seq[DataType] =
atomicTypes ++ complexTypes ++ Seq(NullType, CalendarIntervalType)
// Check whether the type `checkedType` can be cast to all the types in `castableTypes`,
// but cannot be cast to the other types in `allTypes`.
private def checkTypeCasting(checkedType: DataType, castableTypes: Seq[DataType]): Unit = {
val nonCastableTypes = allTypes.filterNot(castableTypes.contains)
castableTypes.foreach { tpe =>
shouldCast(checkedType, tpe, tpe)
}
nonCastableTypes.foreach { tpe =>
shouldNotCast(checkedType, tpe)
}
}
private def checkWidenType(
widenFunc: (DataType, DataType) => Option[DataType],
t1: DataType,
t2: DataType,
expected: Option[DataType],
isSymmetric: Boolean = true): Unit = {
var found = widenFunc(t1, t2)
assert(found == expected,
s"Expected $expected as wider common type for $t1 and $t2, found $found")
// Test both directions to make sure the widening is symmetric.
if (isSymmetric) {
found = widenFunc(t2, t1)
assert(found == expected,
s"Expected $expected as wider common type for $t2 and $t1, found $found")
}
}
test("implicit type cast - ByteType") {
val checkedType = ByteType
checkTypeCasting(checkedType, castableTypes = numericTypes ++ Seq(StringType))
shouldCast(checkedType, DecimalType, DecimalType.ByteDecimal)
shouldCast(checkedType, NumericType, checkedType)
shouldCast(checkedType, IntegralType, checkedType)
}
test("implicit type cast - ShortType") {
val checkedType = ShortType
checkTypeCasting(checkedType, castableTypes = numericTypes ++ Seq(StringType))
shouldCast(checkedType, DecimalType, DecimalType.ShortDecimal)
shouldCast(checkedType, NumericType, checkedType)
shouldCast(checkedType, IntegralType, checkedType)
}
test("implicit type cast - IntegerType") {
val checkedType = IntegerType
checkTypeCasting(checkedType, castableTypes = numericTypes ++ Seq(StringType))
shouldCast(IntegerType, DecimalType, DecimalType.IntDecimal)
shouldCast(checkedType, NumericType, checkedType)
shouldCast(checkedType, IntegralType, checkedType)
}
test("implicit type cast - LongType") {
val checkedType = LongType
checkTypeCasting(checkedType, castableTypes = numericTypes ++ Seq(StringType))
shouldCast(checkedType, DecimalType, DecimalType.LongDecimal)
shouldCast(checkedType, NumericType, checkedType)
shouldCast(checkedType, IntegralType, checkedType)
}
test("implicit type cast - FloatType") {
val checkedType = FloatType
checkTypeCasting(checkedType, castableTypes = numericTypes ++ Seq(StringType))
shouldCast(checkedType, DecimalType, DecimalType.FloatDecimal)
shouldCast(checkedType, NumericType, checkedType)
shouldNotCast(checkedType, IntegralType)
}
test("implicit type cast - DoubleType") {
val checkedType = DoubleType
checkTypeCasting(checkedType, castableTypes = numericTypes ++ Seq(StringType))
shouldCast(checkedType, DecimalType, DecimalType.DoubleDecimal)
shouldCast(checkedType, NumericType, checkedType)
shouldNotCast(checkedType, IntegralType)
}
test("implicit type cast - DecimalType(10, 2)") {
val checkedType = DecimalType(10, 2)
checkTypeCasting(checkedType, castableTypes = numericTypes ++ Seq(StringType))
shouldCast(checkedType, DecimalType, checkedType)
shouldCast(checkedType, NumericType, checkedType)
shouldNotCast(checkedType, IntegralType)
}
test("implicit type cast - BinaryType") {
val checkedType = BinaryType
checkTypeCasting(checkedType, castableTypes = Seq(checkedType, StringType))
shouldNotCast(checkedType, DecimalType)
shouldNotCast(checkedType, NumericType)
shouldNotCast(checkedType, IntegralType)
}
test("implicit type cast - BooleanType") {
val checkedType = BooleanType
checkTypeCasting(checkedType, castableTypes = Seq(checkedType, StringType))
shouldNotCast(checkedType, DecimalType)
shouldNotCast(checkedType, NumericType)
shouldNotCast(checkedType, IntegralType)
}
test("implicit type cast - StringType") {
val checkedType = StringType
val nonCastableTypes =
complexTypes ++ Seq(BooleanType, NullType, CalendarIntervalType)
checkTypeCasting(checkedType, castableTypes = allTypes.filterNot(nonCastableTypes.contains))
shouldCast(checkedType, DecimalType, DecimalType.SYSTEM_DEFAULT)
shouldCast(checkedType, NumericType, NumericType.defaultConcreteType)
shouldNotCast(checkedType, IntegralType)
}
test("implicit type cast - DateType") {
val checkedType = DateType
checkTypeCasting(checkedType, castableTypes = Seq(checkedType, StringType, TimestampType))
shouldNotCast(checkedType, DecimalType)
shouldNotCast(checkedType, NumericType)
shouldNotCast(checkedType, IntegralType)
}
test("implicit type cast - TimestampType") {
val checkedType = TimestampType
checkTypeCasting(checkedType, castableTypes = Seq(checkedType, StringType, DateType))
shouldNotCast(checkedType, DecimalType)
shouldNotCast(checkedType, NumericType)
shouldNotCast(checkedType, IntegralType)
}
test("implicit type cast - ArrayType(StringType)") {
val checkedType = ArrayType(StringType)
val nonCastableTypes =
complexTypes ++ Seq(BooleanType, NullType, CalendarIntervalType)
checkTypeCasting(checkedType,
castableTypes = allTypes.filterNot(nonCastableTypes.contains).map(ArrayType(_)))
nonCastableTypes.map(ArrayType(_)).foreach(shouldNotCast(checkedType, _))
shouldNotCast(ArrayType(DoubleType, containsNull = false),
ArrayType(LongType, containsNull = false))
shouldNotCast(checkedType, DecimalType)
shouldNotCast(checkedType, NumericType)
shouldNotCast(checkedType, IntegralType)
}
test("implicit type cast between two Map types") {
val sourceType = MapType(IntegerType, IntegerType, true)
val castableTypes = numericTypes ++ Seq(StringType).filter(!Cast.forceNullable(IntegerType, _))
val targetTypes = numericTypes.filter(!Cast.forceNullable(IntegerType, _)).map { t =>
MapType(t, sourceType.valueType, valueContainsNull = true)
}
val nonCastableTargetTypes = allTypes.filterNot(castableTypes.contains(_)).map {t =>
MapType(t, sourceType.valueType, valueContainsNull = true)
}
// Tests that its possible to setup implicit casts between two map types when
// source map's key type is integer and the target map's key type are either Byte, Short,
// Long, Double, Float, Decimal(38, 18) or String.
targetTypes.foreach { targetType =>
shouldCast(sourceType, targetType, targetType)
}
// Tests that its not possible to setup implicit casts between two map types when
// source map's key type is integer and the target map's key type are either Binary,
// Boolean, Date, Timestamp, Array, Struct, CaleandarIntervalType or NullType
nonCastableTargetTypes.foreach { targetType =>
shouldNotCast(sourceType, targetType)
}
// Tests that its not possible to cast from nullable map type to not nullable map type.
val targetNotNullableTypes = allTypes.filterNot(_ == IntegerType).map { t =>
MapType(t, sourceType.valueType, valueContainsNull = false)
}
val sourceMapExprWithValueNull =
CreateMap(Seq(Literal.default(sourceType.keyType),
Literal.create(null, sourceType.valueType)))
targetNotNullableTypes.foreach { targetType =>
val castDefault =
TypeCoercion.ImplicitTypeCasts.implicitCast(sourceMapExprWithValueNull, targetType)
assert(castDefault.isEmpty,
s"Should not be able to cast $sourceType to $targetType, but got $castDefault")
}
}
test("implicit type cast - StructType().add(\\"a1\\", StringType)") {
val checkedType = new StructType().add("a1", StringType)
checkTypeCasting(checkedType, castableTypes = Seq(checkedType))
shouldNotCast(checkedType, DecimalType)
shouldNotCast(checkedType, NumericType)
shouldNotCast(checkedType, IntegralType)
}
test("implicit type cast - NullType") {
val checkedType = NullType
checkTypeCasting(checkedType, castableTypes = allTypes)
shouldCast(checkedType, DecimalType, DecimalType.SYSTEM_DEFAULT)
shouldCast(checkedType, NumericType, NumericType.defaultConcreteType)
shouldCast(checkedType, IntegralType, IntegralType.defaultConcreteType)
}
test("implicit type cast - CalendarIntervalType") {
val checkedType = CalendarIntervalType
checkTypeCasting(checkedType, castableTypes = Seq(checkedType))
shouldNotCast(checkedType, DecimalType)
shouldNotCast(checkedType, NumericType)
shouldNotCast(checkedType, IntegralType)
}
test("eligible implicit type cast - TypeCollection") {
shouldCast(NullType, TypeCollection(StringType, BinaryType), StringType)
shouldCast(StringType, TypeCollection(StringType, BinaryType), StringType)
shouldCast(BinaryType, TypeCollection(StringType, BinaryType), BinaryType)
shouldCast(StringType, TypeCollection(BinaryType, StringType), StringType)
shouldCast(IntegerType, TypeCollection(IntegerType, BinaryType), IntegerType)
shouldCast(IntegerType, TypeCollection(BinaryType, IntegerType), IntegerType)
shouldCast(BinaryType, TypeCollection(BinaryType, IntegerType), BinaryType)
shouldCast(BinaryType, TypeCollection(IntegerType, BinaryType), BinaryType)
shouldCast(IntegerType, TypeCollection(StringType, BinaryType), StringType)
shouldCast(IntegerType, TypeCollection(BinaryType, StringType), StringType)
shouldCast(DecimalType.SYSTEM_DEFAULT,
TypeCollection(IntegerType, DecimalType), DecimalType.SYSTEM_DEFAULT)
shouldCast(DecimalType(10, 2), TypeCollection(IntegerType, DecimalType), DecimalType(10, 2))
shouldCast(DecimalType(10, 2), TypeCollection(DecimalType, IntegerType), DecimalType(10, 2))
shouldCast(IntegerType, TypeCollection(DecimalType(10, 2), StringType), DecimalType(10, 2))
shouldCast(StringType, TypeCollection(NumericType, BinaryType), DoubleType)
shouldCast(
ArrayType(StringType, false),
TypeCollection(ArrayType(StringType), StringType),
ArrayType(StringType, false))
shouldCast(
ArrayType(StringType, true),
TypeCollection(ArrayType(StringType), StringType),
ArrayType(StringType, true))
}
test("ineligible implicit type cast - TypeCollection") {
shouldNotCast(IntegerType, TypeCollection(DateType, TimestampType))
}
test("tightest common bound for types") {
def widenTest(t1: DataType, t2: DataType, expected: Option[DataType]): Unit =
checkWidenType(TypeCoercion.findTightestCommonType, t1, t2, expected)
// Null
widenTest(NullType, NullType, Some(NullType))
// Boolean
widenTest(NullType, BooleanType, Some(BooleanType))
widenTest(BooleanType, BooleanType, Some(BooleanType))
widenTest(IntegerType, BooleanType, None)
widenTest(LongType, BooleanType, None)
// Integral
widenTest(NullType, ByteType, Some(ByteType))
widenTest(NullType, IntegerType, Some(IntegerType))
widenTest(NullType, LongType, Some(LongType))
widenTest(ShortType, IntegerType, Some(IntegerType))
widenTest(ShortType, LongType, Some(LongType))
widenTest(IntegerType, LongType, Some(LongType))
widenTest(LongType, LongType, Some(LongType))
// Floating point
widenTest(NullType, FloatType, Some(FloatType))
widenTest(NullType, DoubleType, Some(DoubleType))
widenTest(FloatType, DoubleType, Some(DoubleType))
widenTest(FloatType, FloatType, Some(FloatType))
widenTest(DoubleType, DoubleType, Some(DoubleType))
// Integral mixed with floating point.
widenTest(IntegerType, FloatType, Some(FloatType))
widenTest(IntegerType, DoubleType, Some(DoubleType))
widenTest(IntegerType, DoubleType, Some(DoubleType))
widenTest(LongType, FloatType, Some(FloatType))
widenTest(LongType, DoubleType, Some(DoubleType))
// No up-casting for fixed-precision decimal (this is handled by arithmetic rules)
widenTest(DecimalType(2, 1), DecimalType(3, 2), None)
widenTest(DecimalType(2, 1), DoubleType, None)
widenTest(DecimalType(2, 1), IntegerType, None)
widenTest(DoubleType, DecimalType(2, 1), None)
// StringType
widenTest(NullType, StringType, Some(StringType))
widenTest(StringType, StringType, Some(StringType))
widenTest(IntegerType, StringType, None)
widenTest(LongType, StringType, None)
// TimestampType
widenTest(NullType, TimestampType, Some(TimestampType))
widenTest(TimestampType, TimestampType, Some(TimestampType))
widenTest(DateType, TimestampType, Some(TimestampType))
widenTest(IntegerType, TimestampType, None)
widenTest(StringType, TimestampType, None)
// ComplexType
widenTest(NullType,
MapType(IntegerType, StringType, false),
Some(MapType(IntegerType, StringType, false)))
widenTest(NullType, StructType(Seq()), Some(StructType(Seq())))
widenTest(StringType, MapType(IntegerType, StringType, true), None)
widenTest(ArrayType(IntegerType), StructType(Seq()), None)
widenTest(
StructType(Seq(StructField("a", IntegerType))),
StructType(Seq(StructField("b", IntegerType))),
None)
widenTest(
StructType(Seq(StructField("a", IntegerType, nullable = false))),
StructType(Seq(StructField("a", DoubleType, nullable = false))),
Some(StructType(Seq(StructField("a", DoubleType, nullable = false)))))
widenTest(
StructType(Seq(StructField("a", IntegerType, nullable = false))),
StructType(Seq(StructField("a", IntegerType, nullable = false))),
Some(StructType(Seq(StructField("a", IntegerType, nullable = false)))))
widenTest(
StructType(Seq(StructField("a", IntegerType, nullable = false))),
StructType(Seq(StructField("a", IntegerType, nullable = true))),
Some(StructType(Seq(StructField("a", IntegerType, nullable = true)))))
widenTest(
StructType(Seq(StructField("a", IntegerType, nullable = true))),
StructType(Seq(StructField("a", IntegerType, nullable = false))),
Some(StructType(Seq(StructField("a", IntegerType, nullable = true)))))
widenTest(
StructType(Seq(StructField("a", IntegerType, nullable = true))),
StructType(Seq(StructField("a", IntegerType, nullable = true))),
Some(StructType(Seq(StructField("a", IntegerType, nullable = true)))))
withSQLConf(SQLConf.CASE_SENSITIVE.key -> "true") {
widenTest(
StructType(Seq(StructField("a", IntegerType))),
StructType(Seq(StructField("A", IntegerType))),
None)
}
withSQLConf(SQLConf.CASE_SENSITIVE.key -> "false") {
checkWidenType(
TypeCoercion.findTightestCommonType,
StructType(Seq(StructField("a", IntegerType), StructField("B", IntegerType))),
StructType(Seq(StructField("A", IntegerType), StructField("b", IntegerType))),
Some(StructType(Seq(StructField("a", IntegerType), StructField("B", IntegerType)))),
isSymmetric = false)
}
widenTest(
ArrayType(IntegerType, containsNull = true),
ArrayType(IntegerType, containsNull = false),
Some(ArrayType(IntegerType, containsNull = true)))
widenTest(
MapType(IntegerType, StringType, valueContainsNull = true),
MapType(IntegerType, StringType, valueContainsNull = false),
Some(MapType(IntegerType, StringType, valueContainsNull = true)))
widenTest(
new StructType()
.add("arr", ArrayType(IntegerType, containsNull = true), nullable = false),
new StructType()
.add("arr", ArrayType(IntegerType, containsNull = false), nullable = true),
Some(new StructType()
.add("arr", ArrayType(IntegerType, containsNull = true), nullable = true)))
}
test("wider common type for decimal and array") {
def widenTestWithStringPromotion(
t1: DataType,
t2: DataType,
expected: Option[DataType],
isSymmetric: Boolean = true): Unit = {
checkWidenType(TypeCoercion.findWiderTypeForTwo, t1, t2, expected, isSymmetric)
}
def widenTestWithoutStringPromotion(
t1: DataType,
t2: DataType,
expected: Option[DataType],
isSymmetric: Boolean = true): Unit = {
checkWidenType(
TypeCoercion.findWiderTypeWithoutStringPromotionForTwo, t1, t2, expected, isSymmetric)
}
// Decimal
widenTestWithStringPromotion(
DecimalType(2, 1), DecimalType(3, 2), Some(DecimalType(3, 2)))
widenTestWithStringPromotion(
DecimalType(2, 1), DoubleType, Some(DoubleType))
widenTestWithStringPromotion(
DecimalType(2, 1), IntegerType, Some(DecimalType(11, 1)))
widenTestWithStringPromotion(
DecimalType(2, 1), LongType, Some(DecimalType(21, 1)))
// ArrayType
widenTestWithStringPromotion(
ArrayType(ShortType, containsNull = true),
ArrayType(DoubleType, containsNull = false),
Some(ArrayType(DoubleType, containsNull = true)))
widenTestWithStringPromotion(
ArrayType(TimestampType, containsNull = false),
ArrayType(StringType, containsNull = true),
Some(ArrayType(StringType, containsNull = true)))
widenTestWithStringPromotion(
ArrayType(ArrayType(IntegerType), containsNull = false),
ArrayType(ArrayType(LongType), containsNull = false),
Some(ArrayType(ArrayType(LongType), containsNull = false)))
widenTestWithStringPromotion(
ArrayType(MapType(IntegerType, FloatType), containsNull = false),
ArrayType(MapType(LongType, DoubleType), containsNull = false),
Some(ArrayType(MapType(LongType, DoubleType), containsNull = false)))
widenTestWithStringPromotion(
ArrayType(new StructType().add("num", ShortType), containsNull = false),
ArrayType(new StructType().add("num", LongType), containsNull = false),
Some(ArrayType(new StructType().add("num", LongType), containsNull = false)))
widenTestWithStringPromotion(
ArrayType(IntegerType, containsNull = false),
ArrayType(DecimalType.IntDecimal, containsNull = false),
Some(ArrayType(DecimalType.IntDecimal, containsNull = false)))
widenTestWithStringPromotion(
ArrayType(DecimalType(36, 0), containsNull = false),
ArrayType(DecimalType(36, 35), containsNull = false),
Some(ArrayType(DecimalType(38, 35), containsNull = true)))
// MapType
widenTestWithStringPromotion(
MapType(ShortType, TimestampType, valueContainsNull = true),
MapType(DoubleType, StringType, valueContainsNull = false),
Some(MapType(DoubleType, StringType, valueContainsNull = true)))
widenTestWithStringPromotion(
MapType(IntegerType, ArrayType(TimestampType), valueContainsNull = false),
MapType(LongType, ArrayType(StringType), valueContainsNull = true),
Some(MapType(LongType, ArrayType(StringType), valueContainsNull = true)))
widenTestWithStringPromotion(
MapType(IntegerType, MapType(ShortType, TimestampType), valueContainsNull = false),
MapType(LongType, MapType(DoubleType, StringType), valueContainsNull = false),
Some(MapType(LongType, MapType(DoubleType, StringType), valueContainsNull = false)))
widenTestWithStringPromotion(
MapType(IntegerType, new StructType().add("num", ShortType), valueContainsNull = false),
MapType(LongType, new StructType().add("num", LongType), valueContainsNull = false),
Some(MapType(LongType, new StructType().add("num", LongType), valueContainsNull = false)))
widenTestWithStringPromotion(
MapType(StringType, IntegerType, valueContainsNull = false),
MapType(StringType, DecimalType.IntDecimal, valueContainsNull = false),
Some(MapType(StringType, DecimalType.IntDecimal, valueContainsNull = false)))
widenTestWithStringPromotion(
MapType(StringType, DecimalType(36, 0), valueContainsNull = false),
MapType(StringType, DecimalType(36, 35), valueContainsNull = false),
Some(MapType(StringType, DecimalType(38, 35), valueContainsNull = true)))
widenTestWithStringPromotion(
MapType(IntegerType, StringType, valueContainsNull = false),
MapType(DecimalType.IntDecimal, StringType, valueContainsNull = false),
Some(MapType(DecimalType.IntDecimal, StringType, valueContainsNull = false)))
widenTestWithStringPromotion(
MapType(DecimalType(36, 0), StringType, valueContainsNull = false),
MapType(DecimalType(36, 35), StringType, valueContainsNull = false),
None)
// StructType
widenTestWithStringPromotion(
new StructType()
.add("num", ShortType, nullable = true).add("ts", StringType, nullable = false),
new StructType()
.add("num", DoubleType, nullable = false).add("ts", TimestampType, nullable = true),
Some(new StructType()
.add("num", DoubleType, nullable = true).add("ts", StringType, nullable = true)))
widenTestWithStringPromotion(
new StructType()
.add("arr", ArrayType(ShortType, containsNull = false), nullable = false),
new StructType()
.add("arr", ArrayType(DoubleType, containsNull = true), nullable = false),
Some(new StructType()
.add("arr", ArrayType(DoubleType, containsNull = true), nullable = false)))
widenTestWithStringPromotion(
new StructType()
.add("map", MapType(ShortType, TimestampType, valueContainsNull = true), nullable = false),
new StructType()
.add("map", MapType(DoubleType, StringType, valueContainsNull = false), nullable = false),
Some(new StructType()
.add("map", MapType(DoubleType, StringType, valueContainsNull = true), nullable = false)))
widenTestWithStringPromotion(
new StructType().add("num", IntegerType, nullable = false),
new StructType().add("num", DecimalType.IntDecimal, nullable = false),
Some(new StructType().add("num", DecimalType.IntDecimal, nullable = false)))
widenTestWithStringPromotion(
new StructType().add("num", DecimalType(36, 0), nullable = false),
new StructType().add("num", DecimalType(36, 35), nullable = false),
Some(new StructType().add("num", DecimalType(38, 35), nullable = true)))
widenTestWithStringPromotion(
new StructType().add("num", IntegerType),
new StructType().add("num", LongType).add("str", StringType),
None)
widenTestWithoutStringPromotion(
new StructType().add("num", IntegerType),
new StructType().add("num", LongType).add("str", StringType),
None)
withSQLConf(SQLConf.CASE_SENSITIVE.key -> "true") {
widenTestWithStringPromotion(
new StructType().add("a", IntegerType),
new StructType().add("A", LongType),
None)
widenTestWithoutStringPromotion(
new StructType().add("a", IntegerType),
new StructType().add("A", LongType),
None)
}
withSQLConf(SQLConf.CASE_SENSITIVE.key -> "false") {
widenTestWithStringPromotion(
new StructType().add("a", IntegerType),
new StructType().add("A", LongType),
Some(new StructType().add("a", LongType)),
isSymmetric = false)
widenTestWithoutStringPromotion(
new StructType().add("a", IntegerType),
new StructType().add("A", LongType),
Some(new StructType().add("a", LongType)),
isSymmetric = false)
}
// Without string promotion
widenTestWithoutStringPromotion(IntegerType, StringType, None)
widenTestWithoutStringPromotion(StringType, TimestampType, None)
widenTestWithoutStringPromotion(ArrayType(LongType), ArrayType(StringType), None)
widenTestWithoutStringPromotion(ArrayType(StringType), ArrayType(TimestampType), None)
widenTestWithoutStringPromotion(
MapType(LongType, IntegerType), MapType(StringType, IntegerType), None)
widenTestWithoutStringPromotion(
MapType(IntegerType, LongType), MapType(IntegerType, StringType), None)
widenTestWithoutStringPromotion(
MapType(StringType, IntegerType), MapType(TimestampType, IntegerType), None)
widenTestWithoutStringPromotion(
MapType(IntegerType, StringType), MapType(IntegerType, TimestampType), None)
widenTestWithoutStringPromotion(
new StructType().add("a", IntegerType),
new StructType().add("a", StringType),
None)
widenTestWithoutStringPromotion(
new StructType().add("a", StringType),
new StructType().add("a", IntegerType),
None)
// String promotion
widenTestWithStringPromotion(IntegerType, StringType, Some(StringType))
widenTestWithStringPromotion(StringType, TimestampType, Some(StringType))
widenTestWithStringPromotion(
ArrayType(LongType), ArrayType(StringType), Some(ArrayType(StringType)))
widenTestWithStringPromotion(
ArrayType(StringType), ArrayType(TimestampType), Some(ArrayType(StringType)))
widenTestWithStringPromotion(
MapType(LongType, IntegerType),
MapType(StringType, IntegerType),
Some(MapType(StringType, IntegerType)))
widenTestWithStringPromotion(
MapType(IntegerType, LongType),
MapType(IntegerType, StringType),
Some(MapType(IntegerType, StringType)))
widenTestWithStringPromotion(
MapType(StringType, IntegerType),
MapType(TimestampType, IntegerType),
Some(MapType(StringType, IntegerType)))
widenTestWithStringPromotion(
MapType(IntegerType, StringType),
MapType(IntegerType, TimestampType),
Some(MapType(IntegerType, StringType)))
widenTestWithStringPromotion(
new StructType().add("a", IntegerType),
new StructType().add("a", StringType),
Some(new StructType().add("a", StringType)))
widenTestWithStringPromotion(
new StructType().add("a", StringType),
new StructType().add("a", IntegerType),
Some(new StructType().add("a", StringType)))
}
private def ruleTest(rule: Rule[LogicalPlan], initial: Expression, transformed: Expression) {
ruleTest(Seq(rule), initial, transformed)
}
private def ruleTest(
rules: Seq[Rule[LogicalPlan]],
initial: Expression,
transformed: Expression): Unit = {
val testRelation = LocalRelation(AttributeReference("a", IntegerType)())
val analyzer = new RuleExecutor[LogicalPlan] {
override val batches = Seq(Batch("Resolution", FixedPoint(3), rules: _*))
}
comparePlans(
analyzer.execute(Project(Seq(Alias(initial, "a")()), testRelation)),
Project(Seq(Alias(transformed, "a")()), testRelation))
}
test("cast NullType for expressions that implement ExpectsInputTypes") {
import TypeCoercionSuite._
ruleTest(TypeCoercion.ImplicitTypeCasts,
AnyTypeUnaryExpression(Literal.create(null, NullType)),
AnyTypeUnaryExpression(Literal.create(null, NullType)))
ruleTest(TypeCoercion.ImplicitTypeCasts,
NumericTypeUnaryExpression(Literal.create(null, NullType)),
NumericTypeUnaryExpression(Literal.create(null, DoubleType)))
}
test("cast NullType for binary operators") {
import TypeCoercionSuite._
ruleTest(TypeCoercion.ImplicitTypeCasts,
AnyTypeBinaryOperator(Literal.create(null, NullType), Literal.create(null, NullType)),
AnyTypeBinaryOperator(Literal.create(null, NullType), Literal.create(null, NullType)))
ruleTest(TypeCoercion.ImplicitTypeCasts,
NumericTypeBinaryOperator(Literal.create(null, NullType), Literal.create(null, NullType)),
NumericTypeBinaryOperator(Literal.create(null, DoubleType), Literal.create(null, DoubleType)))
}
test("coalesce casts") {
val rule = TypeCoercion.FunctionArgumentConversion
val intLit = Literal(1)
val longLit = Literal.create(1L)
val doubleLit = Literal(1.0)
val stringLit = Literal.create("c", StringType)
val nullLit = Literal.create(null, NullType)
val floatNullLit = Literal.create(null, FloatType)
val floatLit = Literal.create(1.0f, FloatType)
val timestampLit = Literal.create(Timestamp.valueOf("2017-04-12 00:00:00"), TimestampType)
val decimalLit = Literal(new java.math.BigDecimal("1000000000000000000000"))
val tsArrayLit = Literal(Array(new Timestamp(System.currentTimeMillis())))
val strArrayLit = Literal(Array("c"))
val intArrayLit = Literal(Array(1))
ruleTest(rule,
Coalesce(Seq(doubleLit, intLit, floatLit)),
Coalesce(Seq(doubleLit, Cast(intLit, DoubleType), Cast(floatLit, DoubleType))))
ruleTest(rule,
Coalesce(Seq(longLit, intLit, decimalLit)),
Coalesce(Seq(Cast(longLit, DecimalType(22, 0)),
Cast(intLit, DecimalType(22, 0)), decimalLit)))
ruleTest(rule,
Coalesce(Seq(nullLit, intLit)),
Coalesce(Seq(Cast(nullLit, IntegerType), intLit)))
ruleTest(rule,
Coalesce(Seq(timestampLit, stringLit)),
Coalesce(Seq(Cast(timestampLit, StringType), stringLit)))
ruleTest(rule,
Coalesce(Seq(nullLit, floatNullLit, intLit)),
Coalesce(Seq(Cast(nullLit, FloatType), floatNullLit, Cast(intLit, FloatType))))
ruleTest(rule,
Coalesce(Seq(nullLit, intLit, decimalLit, doubleLit)),
Coalesce(Seq(Cast(nullLit, DoubleType), Cast(intLit, DoubleType),
Cast(decimalLit, DoubleType), doubleLit)))
ruleTest(rule,
Coalesce(Seq(nullLit, floatNullLit, doubleLit, stringLit)),
Coalesce(Seq(Cast(nullLit, StringType), Cast(floatNullLit, StringType),
Cast(doubleLit, StringType), stringLit)))
ruleTest(rule,
Coalesce(Seq(timestampLit, intLit, stringLit)),
Coalesce(Seq(Cast(timestampLit, StringType), Cast(intLit, StringType), stringLit)))
ruleTest(rule,
Coalesce(Seq(tsArrayLit, intArrayLit, strArrayLit)),
Coalesce(Seq(Cast(tsArrayLit, ArrayType(StringType)),
Cast(intArrayLit, ArrayType(StringType)), strArrayLit)))
}
test("CreateArray casts") {
ruleTest(TypeCoercion.FunctionArgumentConversion,
CreateArray(Literal(1.0)
:: Literal(1)
:: Literal.create(1.0f, FloatType)
:: Nil),
CreateArray(Literal(1.0)
:: Cast(Literal(1), DoubleType)
:: Cast(Literal.create(1.0f, FloatType), DoubleType)
:: Nil))
ruleTest(TypeCoercion.FunctionArgumentConversion,
CreateArray(Literal(1.0)
:: Literal(1)
:: Literal("a")
:: Nil),
CreateArray(Cast(Literal(1.0), StringType)
:: Cast(Literal(1), StringType)
:: Literal("a")
:: Nil))
ruleTest(TypeCoercion.FunctionArgumentConversion,
CreateArray(Literal.create(null, DecimalType(5, 3))
:: Literal(1)
:: Nil),
CreateArray(Literal.create(null, DecimalType(5, 3)).cast(DecimalType(13, 3))
:: Literal(1).cast(DecimalType(13, 3))
:: Nil))
ruleTest(TypeCoercion.FunctionArgumentConversion,
CreateArray(Literal.create(null, DecimalType(5, 3))
:: Literal.create(null, DecimalType(22, 10))
:: Literal.create(null, DecimalType(38, 38))
:: Nil),
CreateArray(Literal.create(null, DecimalType(5, 3)).cast(DecimalType(38, 38))
:: Literal.create(null, DecimalType(22, 10)).cast(DecimalType(38, 38))
:: Literal.create(null, DecimalType(38, 38))
:: Nil))
}
test("CreateMap casts") {
// type coercion for map keys
ruleTest(TypeCoercion.FunctionArgumentConversion,
CreateMap(Literal(1)
:: Literal("a")
:: Literal.create(2.0f, FloatType)
:: Literal("b")
:: Nil),
CreateMap(Cast(Literal(1), FloatType)
:: Literal("a")
:: Literal.create(2.0f, FloatType)
:: Literal("b")
:: Nil))
ruleTest(TypeCoercion.FunctionArgumentConversion,
CreateMap(Literal.create(null, DecimalType(5, 3))
:: Literal("a")
:: Literal.create(2.0f, FloatType)
:: Literal("b")
:: Nil),
CreateMap(Literal.create(null, DecimalType(5, 3)).cast(DoubleType)
:: Literal("a")
:: Literal.create(2.0f, FloatType).cast(DoubleType)
:: Literal("b")
:: Nil))
// type coercion for map values
ruleTest(TypeCoercion.FunctionArgumentConversion,
CreateMap(Literal(1)
:: Literal("a")
:: Literal(2)
:: Literal(3.0)
:: Nil),
CreateMap(Literal(1)
:: Literal("a")
:: Literal(2)
:: Cast(Literal(3.0), StringType)
:: Nil))
ruleTest(TypeCoercion.FunctionArgumentConversion,
CreateMap(Literal(1)
:: Literal.create(null, DecimalType(38, 0))
:: Literal(2)
:: Literal.create(null, DecimalType(38, 38))
:: Nil),
CreateMap(Literal(1)
:: Literal.create(null, DecimalType(38, 0)).cast(DecimalType(38, 38))
:: Literal(2)
:: Literal.create(null, DecimalType(38, 38))
:: Nil))
// type coercion for both map keys and values
ruleTest(TypeCoercion.FunctionArgumentConversion,
CreateMap(Literal(1)
:: Literal("a")
:: Literal(2.0)
:: Literal(3.0)
:: Nil),
CreateMap(Cast(Literal(1), DoubleType)
:: Literal("a")
:: Literal(2.0)
:: Cast(Literal(3.0), StringType)
:: Nil))
}
test("greatest/least cast") {
for (operator <- Seq[(Seq[Expression] => Expression)](Greatest, Least)) {
ruleTest(TypeCoercion.FunctionArgumentConversion,
operator(Literal(1.0)
:: Literal(1)
:: Literal.create(1.0f, FloatType)
:: Nil),
operator(Literal(1.0)
:: Cast(Literal(1), DoubleType)
:: Cast(Literal.create(1.0f, FloatType), DoubleType)
:: Nil))
ruleTest(TypeCoercion.FunctionArgumentConversion,
operator(Literal(1L)
:: Literal(1)
:: Literal(new java.math.BigDecimal("1000000000000000000000"))
:: Nil),
operator(Cast(Literal(1L), DecimalType(22, 0))
:: Cast(Literal(1), DecimalType(22, 0))
:: Literal(new java.math.BigDecimal("1000000000000000000000"))
:: Nil))
ruleTest(TypeCoercion.FunctionArgumentConversion,
operator(Literal(1.0)
:: Literal.create(null, DecimalType(10, 5))
:: Literal(1)
:: Nil),
operator(Literal(1.0)
:: Literal.create(null, DecimalType(10, 5)).cast(DoubleType)
:: Literal(1).cast(DoubleType)
:: Nil))
ruleTest(TypeCoercion.FunctionArgumentConversion,
operator(Literal.create(null, DecimalType(15, 0))
:: Literal.create(null, DecimalType(10, 5))
:: Literal(1)
:: Nil),
operator(Literal.create(null, DecimalType(15, 0)).cast(DecimalType(20, 5))
:: Literal.create(null, DecimalType(10, 5)).cast(DecimalType(20, 5))
:: Literal(1).cast(DecimalType(20, 5))
:: Nil))
ruleTest(TypeCoercion.FunctionArgumentConversion,
operator(Literal.create(2L, LongType)
:: Literal(1)
:: Literal.create(null, DecimalType(10, 5))
:: Nil),
operator(Literal.create(2L, LongType).cast(DecimalType(25, 5))
:: Literal(1).cast(DecimalType(25, 5))
:: Literal.create(null, DecimalType(10, 5)).cast(DecimalType(25, 5))
:: Nil))
}
}
test("nanvl casts") {
ruleTest(TypeCoercion.FunctionArgumentConversion,
NaNvl(Literal.create(1.0f, FloatType), Literal.create(1.0, DoubleType)),
NaNvl(Cast(Literal.create(1.0f, FloatType), DoubleType), Literal.create(1.0, DoubleType)))
ruleTest(TypeCoercion.FunctionArgumentConversion,
NaNvl(Literal.create(1.0, DoubleType), Literal.create(1.0f, FloatType)),
NaNvl(Literal.create(1.0, DoubleType), Cast(Literal.create(1.0f, FloatType), DoubleType)))
ruleTest(TypeCoercion.FunctionArgumentConversion,
NaNvl(Literal.create(1.0, DoubleType), Literal.create(1.0, DoubleType)),
NaNvl(Literal.create(1.0, DoubleType), Literal.create(1.0, DoubleType)))
ruleTest(TypeCoercion.FunctionArgumentConversion,
NaNvl(Literal.create(1.0f, FloatType), Literal.create(null, NullType)),
NaNvl(Literal.create(1.0f, FloatType), Cast(Literal.create(null, NullType), FloatType)))
ruleTest(TypeCoercion.FunctionArgumentConversion,
NaNvl(Literal.create(1.0, DoubleType), Literal.create(null, NullType)),
NaNvl(Literal.create(1.0, DoubleType), Cast(Literal.create(null, NullType), DoubleType)))
}
test("type coercion for If") {
val rule = TypeCoercion.IfCoercion
val intLit = Literal(1)
val doubleLit = Literal(1.0)
val trueLit = Literal.create(true, BooleanType)
val falseLit = Literal.create(false, BooleanType)
val stringLit = Literal.create("c", StringType)
val floatLit = Literal.create(1.0f, FloatType)
val timestampLit = Literal.create(Timestamp.valueOf("2017-04-12 00:00:00"), TimestampType)
val decimalLit = Literal(new java.math.BigDecimal("1000000000000000000000"))
ruleTest(rule,
If(Literal(true), Literal(1), Literal(1L)),
If(Literal(true), Cast(Literal(1), LongType), Literal(1L)))
ruleTest(rule,
If(Literal.create(null, NullType), Literal(1), Literal(1)),
If(Literal.create(null, BooleanType), Literal(1), Literal(1)))
ruleTest(rule,
If(AssertTrue(trueLit), Literal(1), Literal(2)),
If(Cast(AssertTrue(trueLit), BooleanType), Literal(1), Literal(2)))
ruleTest(rule,
If(AssertTrue(falseLit), Literal(1), Literal(2)),
If(Cast(AssertTrue(falseLit), BooleanType), Literal(1), Literal(2)))
ruleTest(rule,
If(trueLit, intLit, doubleLit),
If(trueLit, Cast(intLit, DoubleType), doubleLit))
ruleTest(rule,
If(trueLit, floatLit, doubleLit),
If(trueLit, Cast(floatLit, DoubleType), doubleLit))
ruleTest(rule,
If(trueLit, floatLit, decimalLit),
If(trueLit, Cast(floatLit, DoubleType), Cast(decimalLit, DoubleType)))
ruleTest(rule,
If(falseLit, stringLit, doubleLit),
If(falseLit, stringLit, Cast(doubleLit, StringType)))
ruleTest(rule,
If(trueLit, timestampLit, stringLit),
If(trueLit, Cast(timestampLit, StringType), stringLit))
}
test("type coercion for CaseKeyWhen") {
ruleTest(TypeCoercion.ImplicitTypeCasts,
CaseKeyWhen(Literal(1.toShort), Seq(Literal(1), Literal("a"))),
CaseKeyWhen(Cast(Literal(1.toShort), IntegerType), Seq(Literal(1), Literal("a")))
)
ruleTest(TypeCoercion.CaseWhenCoercion,
CaseKeyWhen(Literal(true), Seq(Literal(1), Literal("a"))),
CaseKeyWhen(Literal(true), Seq(Literal(1), Literal("a")))
)
ruleTest(TypeCoercion.CaseWhenCoercion,
CaseWhen(Seq((Literal(true), Literal(1.2))),
Literal.create(BigDecimal.valueOf(1), DecimalType(7, 2))),
CaseWhen(Seq((Literal(true), Literal(1.2))),
Cast(Literal.create(BigDecimal.valueOf(1), DecimalType(7, 2)), DoubleType))
)
ruleTest(TypeCoercion.CaseWhenCoercion,
CaseWhen(Seq((Literal(true), Literal(100L))),
Literal.create(BigDecimal.valueOf(1), DecimalType(7, 2))),
CaseWhen(Seq((Literal(true), Cast(Literal(100L), DecimalType(22, 2)))),
Cast(Literal.create(BigDecimal.valueOf(1), DecimalType(7, 2)), DecimalType(22, 2)))
)
}
test("type coercion for Stack") {
val rule = TypeCoercion.StackCoercion
ruleTest(rule,
Stack(Seq(Literal(3), Literal(1), Literal(2), Literal(null))),
Stack(Seq(Literal(3), Literal(1), Literal(2), Literal.create(null, IntegerType))))
ruleTest(rule,
Stack(Seq(Literal(3), Literal(1.0), Literal(null), Literal(3.0))),
Stack(Seq(Literal(3), Literal(1.0), Literal.create(null, DoubleType), Literal(3.0))))
ruleTest(rule,
Stack(Seq(Literal(3), Literal(null), Literal("2"), Literal("3"))),
Stack(Seq(Literal(3), Literal.create(null, StringType), Literal("2"), Literal("3"))))
ruleTest(rule,
Stack(Seq(Literal(3), Literal(null), Literal(null), Literal(null))),
Stack(Seq(Literal(3), Literal(null), Literal(null), Literal(null))))
ruleTest(rule,
Stack(Seq(Literal(2),
Literal(1), Literal("2"),
Literal(null), Literal(null))),
Stack(Seq(Literal(2),
Literal(1), Literal("2"),
Literal.create(null, IntegerType), Literal.create(null, StringType))))
ruleTest(rule,
Stack(Seq(Literal(2),
Literal(1), Literal(null),
Literal(null), Literal("2"))),
Stack(Seq(Literal(2),
Literal(1), Literal.create(null, StringType),
Literal.create(null, IntegerType), Literal("2"))))
ruleTest(rule,
Stack(Seq(Literal(2),
Literal(null), Literal(1),
Literal("2"), Literal(null))),
Stack(Seq(Literal(2),
Literal.create(null, StringType), Literal(1),
Literal("2"), Literal.create(null, IntegerType))))
ruleTest(rule,
Stack(Seq(Literal(2),
Literal(null), Literal(null),
Literal(1), Literal("2"))),
Stack(Seq(Literal(2),
Literal.create(null, IntegerType), Literal.create(null, StringType),
Literal(1), Literal("2"))))
ruleTest(rule,
Stack(Seq(Subtract(Literal(3), Literal(1)),
Literal(1), Literal("2"),
Literal(null), Literal(null))),
Stack(Seq(Subtract(Literal(3), Literal(1)),
Literal(1), Literal("2"),
Literal.create(null, IntegerType), Literal.create(null, StringType))))
}
test("type coercion for Concat") {
val rule = TypeCoercion.ConcatCoercion(conf)
ruleTest(rule,
Concat(Seq(Literal("ab"), Literal("cde"))),
Concat(Seq(Literal("ab"), Literal("cde"))))
ruleTest(rule,
Concat(Seq(Literal(null), Literal("abc"))),
Concat(Seq(Cast(Literal(null), StringType), Literal("abc"))))
ruleTest(rule,
Concat(Seq(Literal(1), Literal("234"))),
Concat(Seq(Cast(Literal(1), StringType), Literal("234"))))
ruleTest(rule,
Concat(Seq(Literal("1"), Literal("234".getBytes()))),
Concat(Seq(Literal("1"), Cast(Literal("234".getBytes()), StringType))))
ruleTest(rule,
Concat(Seq(Literal(1L), Literal(2.toByte), Literal(0.1))),
Concat(Seq(Cast(Literal(1L), StringType), Cast(Literal(2.toByte), StringType),
Cast(Literal(0.1), StringType))))
ruleTest(rule,
Concat(Seq(Literal(true), Literal(0.1f), Literal(3.toShort))),
Concat(Seq(Cast(Literal(true), StringType), Cast(Literal(0.1f), StringType),
Cast(Literal(3.toShort), StringType))))
ruleTest(rule,
Concat(Seq(Literal(1L), Literal(0.1))),
Concat(Seq(Cast(Literal(1L), StringType), Cast(Literal(0.1), StringType))))
ruleTest(rule,
Concat(Seq(Literal(Decimal(10)))),
Concat(Seq(Cast(Literal(Decimal(10)), StringType))))
ruleTest(rule,
Concat(Seq(Literal(BigDecimal.valueOf(10)))),
Concat(Seq(Cast(Literal(BigDecimal.valueOf(10)), StringType))))
ruleTest(rule,
Concat(Seq(Literal(java.math.BigDecimal.valueOf(10)))),
Concat(Seq(Cast(Literal(java.math.BigDecimal.valueOf(10)), StringType))))
ruleTest(rule,
Concat(Seq(Literal(new java.sql.Date(0)), Literal(new Timestamp(0)))),
Concat(Seq(Cast(Literal(new java.sql.Date(0)), StringType),
Cast(Literal(new Timestamp(0)), StringType))))
withSQLConf(SQLConf.CONCAT_BINARY_AS_STRING.key -> "true") {
ruleTest(rule,
Concat(Seq(Literal("123".getBytes), Literal("456".getBytes))),
Concat(Seq(Cast(Literal("123".getBytes), StringType),
Cast(Literal("456".getBytes), StringType))))
}
withSQLConf(SQLConf.CONCAT_BINARY_AS_STRING.key -> "false") {
ruleTest(rule,
Concat(Seq(Literal("123".getBytes), Literal("456".getBytes))),
Concat(Seq(Literal("123".getBytes), Literal("456".getBytes))))
}
}
test("type coercion for Elt") {
val rule = TypeCoercion.EltCoercion(conf)
ruleTest(rule,
Elt(Seq(Literal(1), Literal("ab"), Literal("cde"))),
Elt(Seq(Literal(1), Literal("ab"), Literal("cde"))))
ruleTest(rule,
Elt(Seq(Literal(1.toShort), Literal("ab"), Literal("cde"))),
Elt(Seq(Cast(Literal(1.toShort), IntegerType), Literal("ab"), Literal("cde"))))
ruleTest(rule,
Elt(Seq(Literal(2), Literal(null), Literal("abc"))),
Elt(Seq(Literal(2), Cast(Literal(null), StringType), Literal("abc"))))
ruleTest(rule,
Elt(Seq(Literal(2), Literal(1), Literal("234"))),
Elt(Seq(Literal(2), Cast(Literal(1), StringType), Literal("234"))))
ruleTest(rule,
Elt(Seq(Literal(3), Literal(1L), Literal(2.toByte), Literal(0.1))),
Elt(Seq(Literal(3), Cast(Literal(1L), StringType), Cast(Literal(2.toByte), StringType),
Cast(Literal(0.1), StringType))))
ruleTest(rule,
Elt(Seq(Literal(2), Literal(true), Literal(0.1f), Literal(3.toShort))),
Elt(Seq(Literal(2), Cast(Literal(true), StringType), Cast(Literal(0.1f), StringType),
Cast(Literal(3.toShort), StringType))))
ruleTest(rule,
Elt(Seq(Literal(1), Literal(1L), Literal(0.1))),
Elt(Seq(Literal(1), Cast(Literal(1L), StringType), Cast(Literal(0.1), StringType))))
ruleTest(rule,
Elt(Seq(Literal(1), Literal(Decimal(10)))),
Elt(Seq(Literal(1), Cast(Literal(Decimal(10)), StringType))))
ruleTest(rule,
Elt(Seq(Literal(1), Literal(BigDecimal.valueOf(10)))),
Elt(Seq(Literal(1), Cast(Literal(BigDecimal.valueOf(10)), StringType))))
ruleTest(rule,
Elt(Seq(Literal(1), Literal(java.math.BigDecimal.valueOf(10)))),
Elt(Seq(Literal(1), Cast(Literal(java.math.BigDecimal.valueOf(10)), StringType))))
ruleTest(rule,
Elt(Seq(Literal(2), Literal(new java.sql.Date(0)), Literal(new Timestamp(0)))),
Elt(Seq(Literal(2), Cast(Literal(new java.sql.Date(0)), StringType),
Cast(Literal(new Timestamp(0)), StringType))))
withSQLConf(SQLConf.ELT_OUTPUT_AS_STRING.key -> "true") {
ruleTest(rule,
Elt(Seq(Literal(1), Literal("123".getBytes), Literal("456".getBytes))),
Elt(Seq(Literal(1), Cast(Literal("123".getBytes), StringType),
Cast(Literal("456".getBytes), StringType))))
}
withSQLConf(SQLConf.ELT_OUTPUT_AS_STRING.key -> "false") {
ruleTest(rule,
Elt(Seq(Literal(1), Literal("123".getBytes), Literal("456".getBytes))),
Elt(Seq(Literal(1), Literal("123".getBytes), Literal("456".getBytes))))
}
}
test("BooleanEquality type cast") {
val be = TypeCoercion.BooleanEquality
// Use something more than a literal to avoid triggering the simplification rules.
val one = Add(Literal(Decimal(1)), Literal(Decimal(0)))
ruleTest(be,
EqualTo(Literal(true), one),
EqualTo(Cast(Literal(true), one.dataType), one)
)
ruleTest(be,
EqualTo(one, Literal(true)),
EqualTo(one, Cast(Literal(true), one.dataType))
)
ruleTest(be,
EqualNullSafe(Literal(true), one),
EqualNullSafe(Cast(Literal(true), one.dataType), one)
)
ruleTest(be,
EqualNullSafe(one, Literal(true)),
EqualNullSafe(one, Cast(Literal(true), one.dataType))
)
}
test("BooleanEquality simplification") {
val be = TypeCoercion.BooleanEquality
ruleTest(be,
EqualTo(Literal(true), Literal(1)),
Literal(true)
)
ruleTest(be,
EqualTo(Literal(true), Literal(0)),
Not(Literal(true))
)
ruleTest(be,
EqualNullSafe(Literal(true), Literal(1)),
And(IsNotNull(Literal(true)), Literal(true))
)
ruleTest(be,
EqualNullSafe(Literal(true), Literal(0)),
And(IsNotNull(Literal(true)), Not(Literal(true)))
)
ruleTest(be,
EqualTo(Literal(true), Literal(1L)),
Literal(true)
)
ruleTest(be,
EqualTo(Literal(new java.math.BigDecimal(1)), Literal(true)),
Literal(true)
)
ruleTest(be,
EqualTo(Literal(BigDecimal(0)), Literal(true)),
Not(Literal(true))
)
ruleTest(be,
EqualTo(Literal(Decimal(1)), Literal(true)),
Literal(true)
)
ruleTest(be,
EqualTo(Literal.create(Decimal(1), DecimalType(8, 0)), Literal(true)),
Literal(true)
)
}
private def checkOutput(logical: LogicalPlan, expectTypes: Seq[DataType]): Unit = {
logical.output.zip(expectTypes).foreach { case (attr, dt) =>
assert(attr.dataType === dt)
}
}
private val timeZoneResolver = ResolveTimeZone(new SQLConf)
private def widenSetOperationTypes(plan: LogicalPlan): LogicalPlan = {
timeZoneResolver(TypeCoercion.WidenSetOperationTypes(plan))
}
test("WidenSetOperationTypes for except and intersect") {
val firstTable = LocalRelation(
AttributeReference("i", IntegerType)(),
AttributeReference("u", DecimalType.SYSTEM_DEFAULT)(),
AttributeReference("b", ByteType)(),
AttributeReference("d", DoubleType)())
val secondTable = LocalRelation(
AttributeReference("s", StringType)(),
AttributeReference("d", DecimalType(2, 1))(),
AttributeReference("f", FloatType)(),
AttributeReference("l", LongType)())
val expectedTypes = Seq(StringType, DecimalType.SYSTEM_DEFAULT, FloatType, DoubleType)
val r1 = widenSetOperationTypes(
Except(firstTable, secondTable, isAll = false)).asInstanceOf[Except]
val r2 = widenSetOperationTypes(
Intersect(firstTable, secondTable, isAll = false)).asInstanceOf[Intersect]
checkOutput(r1.left, expectedTypes)
checkOutput(r1.right, expectedTypes)
checkOutput(r2.left, expectedTypes)
checkOutput(r2.right, expectedTypes)
// Check if a Project is added
assert(r1.left.isInstanceOf[Project])
assert(r1.right.isInstanceOf[Project])
assert(r2.left.isInstanceOf[Project])
assert(r2.right.isInstanceOf[Project])
}
test("WidenSetOperationTypes for union") {
val firstTable = LocalRelation(
AttributeReference("i", IntegerType)(),
AttributeReference("u", DecimalType.SYSTEM_DEFAULT)(),
AttributeReference("b", ByteType)(),
AttributeReference("d", DoubleType)())
val secondTable = LocalRelation(
AttributeReference("s", StringType)(),
AttributeReference("d", DecimalType(2, 1))(),
AttributeReference("f", FloatType)(),
AttributeReference("l", LongType)())
val thirdTable = LocalRelation(
AttributeReference("m", StringType)(),
AttributeReference("n", DecimalType.SYSTEM_DEFAULT)(),
AttributeReference("p", FloatType)(),
AttributeReference("q", DoubleType)())
val forthTable = LocalRelation(
AttributeReference("m", StringType)(),
AttributeReference("n", DecimalType.SYSTEM_DEFAULT)(),
AttributeReference("p", ByteType)(),
AttributeReference("q", DoubleType)())
val expectedTypes = Seq(StringType, DecimalType.SYSTEM_DEFAULT, FloatType, DoubleType)
val unionRelation = widenSetOperationTypes(
Union(firstTable :: secondTable :: thirdTable :: forthTable :: Nil)).asInstanceOf[Union]
assert(unionRelation.children.length == 4)
checkOutput(unionRelation.children.head, expectedTypes)
checkOutput(unionRelation.children(1), expectedTypes)
checkOutput(unionRelation.children(2), expectedTypes)
checkOutput(unionRelation.children(3), expectedTypes)
assert(unionRelation.children.head.isInstanceOf[Project])
assert(unionRelation.children(1).isInstanceOf[Project])
assert(unionRelation.children(2).isInstanceOf[Project])
assert(unionRelation.children(3).isInstanceOf[Project])
}
test("Transform Decimal precision/scale for union except and intersect") {
def checkOutput(logical: LogicalPlan, expectTypes: Seq[DataType]): Unit = {
logical.output.zip(expectTypes).foreach { case (attr, dt) =>
assert(attr.dataType === dt)
}
}
val left1 = LocalRelation(
AttributeReference("l", DecimalType(10, 8))())
val right1 = LocalRelation(
AttributeReference("r", DecimalType(5, 5))())
val expectedType1 = Seq(DecimalType(10, 8))
val r1 = widenSetOperationTypes(Union(left1, right1)).asInstanceOf[Union]
val r2 = widenSetOperationTypes(
Except(left1, right1, isAll = false)).asInstanceOf[Except]
val r3 = widenSetOperationTypes(
Intersect(left1, right1, isAll = false)).asInstanceOf[Intersect]
checkOutput(r1.children.head, expectedType1)
checkOutput(r1.children.last, expectedType1)
checkOutput(r2.left, expectedType1)
checkOutput(r2.right, expectedType1)
checkOutput(r3.left, expectedType1)
checkOutput(r3.right, expectedType1)
val plan1 = LocalRelation(AttributeReference("l", DecimalType(10, 5))())
val rightTypes = Seq(ByteType, ShortType, IntegerType, LongType, FloatType, DoubleType)
val expectedTypes = Seq(DecimalType(10, 5), DecimalType(10, 5), DecimalType(15, 5),
DecimalType(25, 5), DoubleType, DoubleType)
rightTypes.zip(expectedTypes).foreach { case (rType, expectedType) =>
val plan2 = LocalRelation(
AttributeReference("r", rType)())
val r1 = widenSetOperationTypes(Union(plan1, plan2)).asInstanceOf[Union]
val r2 = widenSetOperationTypes(
Except(plan1, plan2, isAll = false)).asInstanceOf[Except]
val r3 = widenSetOperationTypes(
Intersect(plan1, plan2, isAll = false)).asInstanceOf[Intersect]
checkOutput(r1.children.last, Seq(expectedType))
checkOutput(r2.right, Seq(expectedType))
checkOutput(r3.right, Seq(expectedType))
val r4 = widenSetOperationTypes(Union(plan2, plan1)).asInstanceOf[Union]
val r5 = widenSetOperationTypes(
Except(plan2, plan1, isAll = false)).asInstanceOf[Except]
val r6 = widenSetOperationTypes(
Intersect(plan2, plan1, isAll = false)).asInstanceOf[Intersect]
checkOutput(r4.children.last, Seq(expectedType))
checkOutput(r5.left, Seq(expectedType))
checkOutput(r6.left, Seq(expectedType))
}
}
test("rule for date/timestamp operations") {
val dateTimeOperations = TypeCoercion.DateTimeOperations
val date = Literal(new java.sql.Date(0L))
val timestamp = Literal(new Timestamp(0L))
val interval = Literal(new CalendarInterval(0, 0))
val str = Literal("2015-01-01")
val intValue = Literal(0, IntegerType)
ruleTest(dateTimeOperations, Add(date, interval), Cast(TimeAdd(date, interval), DateType))
ruleTest(dateTimeOperations, Add(interval, date), Cast(TimeAdd(date, interval), DateType))
ruleTest(dateTimeOperations, Add(timestamp, interval),
Cast(TimeAdd(timestamp, interval), TimestampType))
ruleTest(dateTimeOperations, Add(interval, timestamp),
Cast(TimeAdd(timestamp, interval), TimestampType))
ruleTest(dateTimeOperations, Add(str, interval), Cast(TimeAdd(str, interval), StringType))
ruleTest(dateTimeOperations, Add(interval, str), Cast(TimeAdd(str, interval), StringType))
ruleTest(dateTimeOperations, Subtract(date, interval), Cast(TimeSub(date, interval), DateType))
ruleTest(dateTimeOperations, Subtract(timestamp, interval),
Cast(TimeSub(timestamp, interval), TimestampType))
ruleTest(dateTimeOperations, Subtract(str, interval), Cast(TimeSub(str, interval), StringType))
// interval operations should not be effected
ruleTest(dateTimeOperations, Add(interval, interval), Add(interval, interval))
ruleTest(dateTimeOperations, Subtract(interval, interval), Subtract(interval, interval))
ruleTest(dateTimeOperations, Add(date, intValue), DateAdd(date, intValue))
ruleTest(dateTimeOperations, Add(intValue, date), DateAdd(date, intValue))
ruleTest(dateTimeOperations, Subtract(date, intValue), DateSub(date, intValue))
ruleTest(dateTimeOperations, Subtract(date, date), DateDiff(date, date))
}
/**
* There are rules that need to not fire before child expressions get resolved.
* We use this test to make sure those rules do not fire early.
*/
test("make sure rules do not fire early") {
// InConversion
val inConversion = TypeCoercion.InConversion(conf)
ruleTest(inConversion,
In(UnresolvedAttribute("a"), Seq(Literal(1))),
In(UnresolvedAttribute("a"), Seq(Literal(1)))
)
ruleTest(inConversion,
In(Literal("test"), Seq(UnresolvedAttribute("a"), Literal(1))),
In(Literal("test"), Seq(UnresolvedAttribute("a"), Literal(1)))
)
ruleTest(inConversion,
In(Literal("a"), Seq(Literal(1), Literal("b"))),
In(Cast(Literal("a"), StringType),
Seq(Cast(Literal(1), StringType), Cast(Literal("b"), StringType)))
)
}
test("SPARK-15776 Divide expression's dataType should be casted to Double or Decimal " +
"in aggregation function like sum") {
val rules = Seq(FunctionArgumentConversion, Division(conf))
// Casts Integer to Double
ruleTest(rules, sum(Divide(4, 3)), sum(Divide(Cast(4, DoubleType), Cast(3, DoubleType))))
// Left expression is Double, right expression is Int. Another rule ImplicitTypeCasts will
// cast the right expression to Double.
ruleTest(rules, sum(Divide(4.0, 3)), sum(Divide(4.0, 3)))
// Left expression is Int, right expression is Double
ruleTest(rules, sum(Divide(4, 3.0)), sum(Divide(Cast(4, DoubleType), Cast(3.0, DoubleType))))
// Casts Float to Double
ruleTest(
rules,
sum(Divide(4.0f, 3)),
sum(Divide(Cast(4.0f, DoubleType), Cast(3, DoubleType))))
// Left expression is Decimal, right expression is Int. Another rule DecimalPrecision will cast
// the right expression to Decimal.
ruleTest(rules, sum(Divide(Decimal(4.0), 3)), sum(Divide(Decimal(4.0), 3)))
}
test("SPARK-17117 null type coercion in divide") {
val rules = Seq(FunctionArgumentConversion, Division(conf), ImplicitTypeCasts)
val nullLit = Literal.create(null, NullType)
ruleTest(rules, Divide(1L, nullLit), Divide(Cast(1L, DoubleType), Cast(nullLit, DoubleType)))
ruleTest(rules, Divide(nullLit, 1L), Divide(Cast(nullLit, DoubleType), Cast(1L, DoubleType)))
}
test("SPARK-28395 Division operator support integral division") {
val rules = Seq(FunctionArgumentConversion, Division(conf))
Seq(true, false).foreach { preferIntegralDivision =>
withSQLConf(SQLConf.PREFER_INTEGRAL_DIVISION.key -> s"$preferIntegralDivision") {
val result1 = if (preferIntegralDivision) {
IntegralDivide(1L, 1L)
} else {
Divide(Cast(1L, DoubleType), Cast(1L, DoubleType))
}
ruleTest(rules, Divide(1L, 1L), result1)
val result2 = if (preferIntegralDivision) {
IntegralDivide(1, Cast(1, ShortType))
} else {
Divide(Cast(1, DoubleType), Cast(Cast(1, ShortType), DoubleType))
}
ruleTest(rules, Divide(1, Cast(1, ShortType)), result2)
ruleTest(rules, Divide(1L, 1D), Divide(Cast(1L, DoubleType), Cast(1D, DoubleType)))
ruleTest(rules, Divide(Decimal(1.1), 1L), Divide(Decimal(1.1), 1L))
}
}
}
test("binary comparison with string promotion") {
val rule = TypeCoercion.PromoteStrings(conf)
ruleTest(rule,
GreaterThan(Literal("123"), Literal(1)),
GreaterThan(Cast(Literal("123"), IntegerType), Literal(1)))
ruleTest(rule,
LessThan(Literal(true), Literal("123")),
LessThan(Literal(true), Cast(Literal("123"), BooleanType)))
ruleTest(rule,
EqualTo(Literal(Array(1, 2)), Literal("123")),
EqualTo(Literal(Array(1, 2)), Literal("123")))
ruleTest(rule,
GreaterThan(Literal("1.5"), Literal(BigDecimal("0.5"))),
GreaterThan(Cast(Literal("1.5"), DoubleType), Cast(Literal(BigDecimal("0.5")),
DoubleType)))
Seq(true, false).foreach { convertToTS =>
withSQLConf(
SQLConf.COMPARE_DATE_TIMESTAMP_IN_TIMESTAMP.key -> convertToTS.toString) {
val date0301 = Literal(java.sql.Date.valueOf("2017-03-01"))
val timestamp0301000000 = Literal(Timestamp.valueOf("2017-03-01 00:00:00"))
val timestamp0301000001 = Literal(Timestamp.valueOf("2017-03-01 00:00:01"))
if (convertToTS) {
// `Date` should be treated as timestamp at 00:00:00 See SPARK-23549
ruleTest(rule, EqualTo(date0301, timestamp0301000000),
EqualTo(Cast(date0301, TimestampType), timestamp0301000000))
ruleTest(rule, LessThan(date0301, timestamp0301000001),
LessThan(Cast(date0301, TimestampType), timestamp0301000001))
} else {
ruleTest(rule, LessThan(date0301, timestamp0301000000),
LessThan(Cast(date0301, StringType), Cast(timestamp0301000000, StringType)))
ruleTest(rule, LessThan(date0301, timestamp0301000001),
LessThan(Cast(date0301, StringType), Cast(timestamp0301000001, StringType)))
}
}
}
}
test("cast WindowFrame boundaries to the type they operate upon") {
// Can cast frame boundaries to order dataType.
ruleTest(WindowFrameCoercion,
windowSpec(
Seq(UnresolvedAttribute("a")),
Seq(SortOrder(Literal(1L), Ascending)),
SpecifiedWindowFrame(RangeFrame, Literal(3), Literal(2147483648L))),
windowSpec(
Seq(UnresolvedAttribute("a")),
Seq(SortOrder(Literal(1L), Ascending)),
SpecifiedWindowFrame(RangeFrame, Cast(3, LongType), Literal(2147483648L)))
)
// Cannot cast frame boundaries to order dataType.
ruleTest(WindowFrameCoercion,
windowSpec(
Seq(UnresolvedAttribute("a")),
Seq(SortOrder(Literal.default(DateType), Ascending)),
SpecifiedWindowFrame(RangeFrame, Literal(10.0), Literal(2147483648L))),
windowSpec(
Seq(UnresolvedAttribute("a")),
Seq(SortOrder(Literal.default(DateType), Ascending)),
SpecifiedWindowFrame(RangeFrame, Literal(10.0), Literal(2147483648L)))
)
// Should not cast SpecialFrameBoundary.
ruleTest(WindowFrameCoercion,
windowSpec(
Seq(UnresolvedAttribute("a")),
Seq(SortOrder(Literal(1L), Ascending)),
SpecifiedWindowFrame(RangeFrame, CurrentRow, UnboundedFollowing)),
windowSpec(
Seq(UnresolvedAttribute("a")),
Seq(SortOrder(Literal(1L), Ascending)),
SpecifiedWindowFrame(RangeFrame, CurrentRow, UnboundedFollowing))
)
}
}
object TypeCoercionSuite {
case class AnyTypeUnaryExpression(child: Expression)
extends UnaryExpression with ExpectsInputTypes with Unevaluable {
override def inputTypes: Seq[AbstractDataType] = Seq(AnyDataType)
override def dataType: DataType = NullType
}
case class NumericTypeUnaryExpression(child: Expression)
extends UnaryExpression with ExpectsInputTypes with Unevaluable {
override def inputTypes: Seq[AbstractDataType] = Seq(NumericType)
override def dataType: DataType = NullType
}
case class AnyTypeBinaryOperator(left: Expression, right: Expression)
extends BinaryOperator with Unevaluable {
override def dataType: DataType = NullType
override def inputType: AbstractDataType = AnyDataType
override def symbol: String = "anytype"
}
case class NumericTypeBinaryOperator(left: Expression, right: Expression)
extends BinaryOperator with Unevaluable {
override def dataType: DataType = NullType
override def inputType: AbstractDataType = NumericType
override def symbol: String = "numerictype"
}
}
| kiszk/spark | sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/TypeCoercionSuite.scala | Scala | apache-2.0 | 74,039 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources
import org.apache.hadoop.fs.Path
import org.json4s.NoTypeHints
import org.json4s.jackson.Serialization
import org.apache.spark.sql.AnalysisException
import org.apache.spark.sql.types._
object DataSourceUtils {
/**
* The key to use for storing partitionBy columns as options.
*/
val PARTITIONING_COLUMNS_KEY = "__partition_columns"
/**
* Utility methods for converting partitionBy columns to options and back.
*/
private implicit val formats = Serialization.formats(NoTypeHints)
def encodePartitioningColumns(columns: Seq[String]): String = {
Serialization.write(columns)
}
def decodePartitioningColumns(str: String): Seq[String] = {
Serialization.read[Seq[String]](str)
}
/**
* Verify if the schema is supported in datasource. This verification should be done
* in a driver side.
*/
def verifySchema(format: FileFormat, schema: StructType): Unit = {
schema.foreach { field =>
if (!format.supportDataType(field.dataType)) {
throw new AnalysisException(
s"$format data source does not support ${field.dataType.catalogString} data type.")
}
}
}
// SPARK-24626: Metadata files and temporary files should not be
// counted as data files, so that they shouldn't participate in tasks like
// location size calculation.
private[sql] def isDataPath(path: Path): Boolean = {
val name = path.getName
!(name.startsWith("_") || name.startsWith("."))
}
}
| aosagie/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSourceUtils.scala | Scala | apache-2.0 | 2,310 |
package com.codexica.s3crate.filetree.history
import org.specs2.mutable.Specification
import play.api.libs.json.Json
/**
* @author Josh Albrecht (joshalbrecht@gmail.com)
*/
class CompressionMethodSpec extends Specification {
"Serialization" should {
"deserialize as exactly the same value" in {
val method = NoCompression()
Json.parse(Json.stringify(Json.toJson(method))).as[CompressionMethod] must be equalTo method
}
}
}
| joshalbrecht/s3crate | src/test/scala/com/codexica/s3crate/filetree/history/CompressionMethodSpec.scala | Scala | mit | 451 |
package com.twitter.finagle.example.memcache
import com.twitter.app.Flag
import com.twitter.app.App
import com.twitter.concurrent.NamedPoolThreadFactory
import com.twitter.finagle.builder.ClientBuilder
import com.twitter.finagle.memcached
import com.twitter.finagle.memcached.protocol.text.Memcached
import com.twitter.finagle.stats.OstrichStatsReceiver
import com.twitter.finagle.{Service, ServiceFactory}
import com.twitter.io.Buf
import com.twitter.ostrich.admin.{RuntimeEnvironment, AdminHttpService}
import com.twitter.util.{Future, Stopwatch}
import java.util.concurrent.Executors
import java.util.concurrent.atomic.AtomicLong
import org.jboss.netty.channel.socket.nio.NioClientSocketChannelFactory
class PersistentService[Req, Rep](factory: ServiceFactory[Req, Rep]) extends Service[Req, Rep] {
@volatile private[this] var currentService: Future[Service[Req, Rep]] = factory()
def apply(req: Req) =
currentService flatMap { service =>
service(req) onFailure { _ =>
currentService = factory()
}
}
}
object MemcacheStress extends App {
private[this] val config = new {
val concurrency: Flag[Int] = flag("concurrency", 400, "concurrency")
val hosts: Flag[String] = flag("hosts", "localhost:11211", "hosts")
val keysize: Flag[Int] = flag("keysize", 55, "keysize")
val valuesize: Flag[Int] = flag("valuesize", 1, "valuesize")
val nworkers: Flag[Int] = flag("nworkers", -1, "nworkers")
val stats: Flag[Boolean] = flag("stats", true, "stats")
val tracing: Flag[Boolean] = flag("tracing", true, "tracing")
}
val count = new AtomicLong
def proc(client: memcached.Client, key: String, value: Buf) {
client.set(key, value) ensure {
count.incrementAndGet()
proc(client, key, value)
}
}
def main() {
var builder = ClientBuilder()
.name("mc")
.codec(Memcached())
.hostConnectionLimit(config.concurrency())
.hosts(config.hosts())
if (config.nworkers() > 0)
builder = builder.channelFactory(
new NioClientSocketChannelFactory(
Executors.newCachedThreadPool(new NamedPoolThreadFactory("memcacheboss")),
Executors.newCachedThreadPool(new NamedPoolThreadFactory("memcacheIO")),
config.nworkers()
)
)
if (config.stats()) builder = builder.reportTo(new OstrichStatsReceiver)
if (config.tracing()) com.twitter.finagle.tracing.Trace.enable()
else com.twitter.finagle.tracing.Trace.disable()
val key = "x" * config.keysize()
val value = Buf.Utf8("y" * config.valuesize())
val runtime = RuntimeEnvironment(this, Array()/*no args for you*/)
val adminService = new AdminHttpService(2000, 100/*backlog*/, runtime)
adminService.start()
println(builder)
val factory = builder.buildFactory()
val elapsed = Stopwatch.start()
for (_ <- 0 until config.concurrency()) {
val svc = new PersistentService(factory)
val client = memcached.Client(svc)
proc(client, key, value)
}
while (true) {
Thread.sleep(5000)
val howlong = elapsed()
val howmuch = count.get()
assert(howmuch > 0)
printf("%d QPS\\n", howmuch / howlong.inSeconds)
}
}
}
| suls/finagle | finagle-example/src/main/scala/com/twitter/finagle/example/memcache/MemcacheStress.scala | Scala | apache-2.0 | 3,242 |
/*
* Copyright 2019 ABSA Group Limited
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package za.co.absa.spline.consumer.rest
import com.fasterxml.jackson.module.scala.DefaultScalaModule
import org.springframework.context.annotation.{Bean, ComponentScan, Configuration}
import org.springframework.format.FormatterRegistry
import org.springframework.web.method.support.HandlerMethodReturnValueHandler
import org.springframework.web.servlet.config.annotation.{EnableWebMvc, WebMvcConfigurer}
import za.co.absa.commons.config.ConfTyped
import za.co.absa.spline.common
import za.co.absa.spline.common.config.DefaultConfigurationStack
import za.co.absa.spline.common.webmvc.jackson.ObjectMapperBeanPostProcessor
import za.co.absa.spline.common.webmvc.{ScalaFutureMethodReturnValueHandler, UnitMethodReturnValueHandler}
import java.util
@EnableWebMvc
@Configuration
@ComponentScan(basePackageClasses = Array(
classOf[common.webmvc.controller._package],
classOf[controller._package]
))
class ConsumerRESTConfig extends WebMvcConfigurer {
import scala.concurrent.ExecutionContext.Implicits.global
override def addReturnValueHandlers(returnValueHandlers: util.List[HandlerMethodReturnValueHandler]): Unit = {
returnValueHandlers.add(new UnitMethodReturnValueHandler)
returnValueHandlers.add(new ScalaFutureMethodReturnValueHandler)
}
@Bean def jacksonConfigurer = new ObjectMapperBeanPostProcessor(_
.registerModule(DefaultScalaModule)
.setDefaultTyping(new ConsumerTypeResolver)
)
override def addFormatters(registry: FormatterRegistry): Unit = {
registry.removeConvertible(classOf[String], classOf[Array[Object]])
}
}
object ConsumerRESTConfig extends DefaultConfigurationStack with ConfTyped {
override val rootPrefix: String = "spline"
}
| AbsaOSS/spline | consumer-rest-core/src/main/scala/za/co/absa/spline/consumer/rest/ConsumerRESTConfig.scala | Scala | apache-2.0 | 2,300 |
package org.apache.mesos.chronos.scheduler.jobs
import org.joda.time._
import org.specs2.mutable._
class ScheduleStreamSpec extends SpecificationWithJUnit {
val fakeCurrentTime = DateTime.parse("2012-01-01T00:00:00Z")
"ScheduleStream" should {
"return a properly clipped schedule" in {
val orgSchedule = "R3/2012-01-01T00:00:00.000Z/P1D"
val stream = new ScheduleStream(orgSchedule, null)
stream.head must_==(orgSchedule, null, "")
stream.tail.get.head must_==("R2/2012-01-02T00:00:00.000Z/P1D", null, "")
stream.tail.get.tail.get.head must_==("R1/2012-01-03T00:00:00.000Z/P1D", null, "")
stream.tail.get.tail.get.tail.get.head must_==("R0/2012-01-04T00:00:00.000Z/P1D", null, "")
stream.tail.get.tail.get.tail.get.tail must_== None
}
"return a infinite schedule when no repetition is specified" in {
val orgSchedule = "R/2012-01-01T00:00:00.000Z/P1D"
val stream = new ScheduleStream(orgSchedule, null)
stream.head must_==(orgSchedule, null, "")
stream.tail.get.head must_==("R/2012-01-02T00:00:00.000Z/P1D", null, "")
}
}
}
| pekermert/chronos | src/test/scala/org/apache/mesos/chronos/scheduler/jobs/ScheduleStreamSpec.scala | Scala | apache-2.0 | 1,119 |
class Outer1 {
private val x: Int = 1
private class Inner {
def foo: x.type = x // OK
}
}
object Outer2 {
private val x: Int = 1
}
class Outer2 {
private class Inner {
def foo: Outer2.x.type = Outer2.x // OK
}
}
class Outer3 {
private val x: Int = 1
def meth: Unit = {
class Inner {
def foo: x.type = x // OK
}
}
}
| som-snytt/dotty | tests/pos/leaks.scala | Scala | apache-2.0 | 360 |
package jp.co.septeni_original.sbt.dao.generator.model
case class TableDesc(tableName: String, primaryDescs: Seq[PrimaryKeyDesc], columnDescs: Seq[ColumnDesc])
| septeni-original/sbt-dao-generator | src/main/scala/jp/co/septeni_original/sbt/dao/generator/model/TableDesc.scala | Scala | mit | 161 |
package com.avsystem.commons
package hocon
import com.avsystem.commons.misc.{AbstractValueEnum, AbstractValueEnumCompanion, EnumCtx}
import com.avsystem.commons.serialization.json.JsonStringOutput
import scala.annotation.tailrec
sealed abstract class HTree extends Product {
def tokens: HTokenRange
def pos: SourcePos = tokens.pos
lazy val children: List[HTree] = productIterator.flatMap {
case child: HTree => Iterator(child)
case optChild: Opt[HTree@unchecked] => optChild.iterator
case children: List[HTree@unchecked] => children.iterator
case _ => Iterator.empty
}.toList
}
object HTree {
final case class HSource(toplevel: HToplevel)(val tokens: HTokenRange) extends HTree
sealed abstract class HValue extends HTree
final case class HNull()(val tokens: HTokenRange) extends HValue
final case class HBoolean(value: Boolean)(val tokens: HTokenRange) extends HValue
final case class HNumber(value: BigDecimal)(val tokens: HTokenRange) extends HValue
final case class HString(value: String)(val syntax: HStringSyntax, val tokens: HTokenRange)
extends HValue with HRegularIncludeTarget
final case class HSubst(path: HPath)(val optional: Boolean, val tokens: HTokenRange) extends HValue
final case class HConcat(values: List[HValue])(val tokens: HTokenRange) extends HValue
sealed abstract class HToplevel extends HValue
final case class HArray(elements: List[HValue])(val tokens: HTokenRange) extends HToplevel
final case class HObject(stats: List[HStat])(val tokens: HTokenRange) extends HToplevel
final case class HPath(prefix: Opt[HPath], key: HKey)(val tokens: HTokenRange) extends HTree {
@tailrec private def collectValue(acc: List[String]): List[String] = prefix match {
case Opt(prefix) => prefix.collectValue(key.value :: acc)
case Opt.Empty => key.value :: acc
}
lazy val value: List[String] = collectValue(Nil)
}
final case class HKey(parts: List[HString])(val tokens: HTokenRange) extends HTree {
val value: String = parts.iterator.map(_.value).mkString
}
sealed abstract class HStat extends HTree
final case class HInclude(target: HIncludeTarget)(val tokens: HTokenRange) extends HStat
final case class HField(path: HPath, value: HValue)(val append: Boolean, val tokens: HTokenRange) extends HStat
sealed trait HIncludeTarget extends HTree
sealed trait HRegularIncludeTarget extends HIncludeTarget
final case class HRequiredInclude(target: HRegularIncludeTarget)(val tokens: HTokenRange)
extends HIncludeTarget
final case class HQualifiedInclude(qualifier: HIncludeQualifier, target: HString)(val tokens: HTokenRange)
extends HRegularIncludeTarget
final class HIncludeQualifier(implicit enumCtx: EnumCtx) extends AbstractValueEnum
object HIncludeQualifier extends AbstractValueEnumCompanion[HIncludeQualifier] {
final val Classpath, File, Url: Value = new HIncludeQualifier
}
final class HStringSyntax(implicit enumCtx: EnumCtx) extends AbstractValueEnum
object HStringSyntax extends AbstractValueEnumCompanion[HStringSyntax] {
final val Whitespace, Unquoted, Quoted, Multiline: Value = new HStringSyntax
}
def repr(tree: HTree): String = {
val sb = new JStringBuilder
def reprIn(tree: HTree, indent: Int, withPos: Boolean): Unit = {
val attrs = tree match {
case s: HString => s"[${s.syntax}]"
case s: HSubst if s.optional => s"[optional]"
case qi: HQualifiedInclude => s"[${qi.qualifier.name.toLowerCase}]"
case f: HField if f.append => s"[append]"
case _ => ""
}
if (withPos) {
sb.append(s"<${tree.pos.startLine + 1}:${tree.pos.startColumn + 1}:")
}
val prefix = s"${tree.productPrefix}$attrs"
sb.append(prefix).append("(")
tree match {
case HTree.HBoolean(value) => sb.append(value)
case HTree.HNumber(value) => sb.append(value)
case HTree.HString(value) => sb.append(JsonStringOutput.write(value))
case _ => tree.children match {
case Nil =>
case single :: Nil =>
reprIn(single, indent, single.pos != tree.pos)
case multiple =>
multiple.foreach { child =>
sb.append("\\n").append(" " * (indent + 1))
reprIn(child, indent + 1, withPos = true)
}
sb.append("\\n").append(" " * indent)
}
}
sb.append(")")
if (withPos) {
sb.append(s":${tree.pos.endLine + 1}:${tree.pos.endColumn + 1}>")
}
}
reprIn(tree, 0, withPos = true)
sb.toString
}
}
| AVSystem/scala-commons | commons-hocon/src/main/scala/com/avsystem/commons/hocon/HTree.scala | Scala | mit | 4,588 |
/*
* Copyright (C) 2009-2017 Lightbend Inc. <https://www.lightbend.com>
*/
package play.api.libs.openid
trait RichUrl[A] {
def hostAndPath: String
}
| wsargent/playframework | framework/src/play-openid/src/test/scala/play/api/libs/openid/RichUrl.scala | Scala | apache-2.0 | 154 |
/*
* Part of GDL book_api.
* Copyright (C) 2017 Global Digital Library
*
* See LICENSE
*/
package io.digitallibrary.bookapi.controller
import io.digitallibrary.bookapi.{TestEnvironment, UnitSuite}
import org.scalatra.test.scalatest.ScalatraFunSuite
class HealthControllerTest extends UnitSuite with TestEnvironment with ScalatraFunSuite {
lazy val controller = new HealthController
addServlet(controller, "/")
test("That /health returns 200 ok") {
get("/") {
status should equal (200)
}
}
}
| GlobalDigitalLibraryio/book-api | src/test/scala/io/digitallibrary/bookapi/controller/HealthControllerTest.scala | Scala | apache-2.0 | 524 |
/*
* Copyright (c) 2014.
*
* This file is part of picture-classifier.
*
* picture-classifier is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* picture-classifier is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Foobar. If not, see <http://www.gnu.org/licenses/>.
*/
package edu.osu.cse.ai.photoclassifier
import org.apache.log4j.Logger
import java.nio.file._
import java.nio.charset.StandardCharsets
import scala.collection.convert.Wrappers.JListWrapper
import java.io.File
/**
* Created by fathi on 4/17/14.
*/
object PhotoMetadataExtractor extends Logging with Serializable{
override def logger: Logger = Logger.getLogger(PhotoMetadataExtractor.getClass)
def traverseAllFiles(parentDir: File): Array[File] = {
parentDir.listFiles.filter(_.getName.endsWith("txt"))
}
def getAllMetadata(parentDir: File): Array[Option[PhotoRawMetadata]] = {
traverseAllFiles(parentDir).map(x => getMetadata(x))
}
def getAllValidMetadata(parentDir: File): Array[PhotoMetadata] = {
val metadatas = getAllMetadata(parentDir).filter(_.isDefined)
logger.debug("#All raw metadata files=%d".format(metadatas.length))
val mds = metadatas.map(md => PhotoMetadata.formRawMetadata(md.get))
logger.debug("#All nice metadata files=%d".format(mds.length))
mds
}
def getMetadata(file: File): Option[PhotoRawMetadata] = {
logger.debug("Reading file " + file)
logger.debug("Type of this scala object is %s".format(PhotoMetadataExtractor.getClass))
val photoId = file.getName
logger.debug(" Match: photoID is %s".format(photoId))
val path = FileSystems.getDefault.provider().getPath(file.toURI)
val lines: JListWrapper[String] = JListWrapper(Files.readAllLines(path, StandardCharsets.UTF_8))
val groupId = extractByKey("GroupKeyword", lines)
val url = extractByKey("URL", lines)
val iso = {
val rawValue = extractByKey("ISO", lines)
logger.debug("ISO WAS DETERMINED TO BE %s".format(rawValue))
if (rawValue.getOrElse("A").forall(Character.isDigit(_))) {
logger.debug("ISO WAS DETERMINED TO BE all digits %s".format(rawValue))
if (rawValue.getOrElse("0") != "0") {
logger.debug("ISO WAS DETERMINED TO BE non zero %s".format(rawValue))
rawValue
} else {
Option.empty[String]
}
} else {
Option.empty[String]
}
}
val focalLength = {
val rawValue = extractByKey("FocalLength", lines)
if (rawValue.getOrElse("0.0 mm").startsWith("0.0 mm")) Option.empty[String] else rawValue
}
val exposureTime = extractByKey("ExposureTime", lines)
val aperture = {
val rawValue = extractByKey("MaxApertureValue", lines)
if (rawValue.getOrElse("inf") == "inf") Option.empty[String] else rawValue
}
val flash = extractByKey("Flash", lines)
val dateTimeOriginal = extractByKey("DateTimeOriginal", lines)
if (groupId.isDefined && url.isDefined && iso.isDefined && focalLength.isDefined && exposureTime.isDefined && aperture.isDefined && flash.isDefined && dateTimeOriginal.isDefined) {
logger.debug("All features have valid values.")
Option(
new PhotoRawMetadata(photoId, groupId.get, url.get, iso.get, focalLength.get, exposureTime.get, aperture.get, flash.get, dateTimeOriginal.get))
} else {
logger.debug("At least one of the features is not valid. Check %s".format(
(photoId, groupId, url, iso, focalLength, exposureTime, aperture, flash, dateTimeOriginal).toString()))
Option.empty[PhotoRawMetadata]
}
}
def extractByKey(key: String, lines: JListWrapper[String], separator: String = "="): Option[String] = {
val extendedKey = key + separator
val targetLines = lines.filter(_.startsWith(extendedKey))
if (targetLines.isEmpty || targetLines.length > 1) {
logger.debug(" NO NO NO MATCH: %s is %s".format(key, ""))
Option.empty[String]
}
else {
val value = targetLines(0).substring(extendedKey.length)
logger.debug(" MATCH: %s is %s".format(key, value))
Option(value)
}
}
def getMetadataStats(photoDbDir: File, validGroups: Seq[String]): Seq[(String, Int)] = {
val metadataFiles = PhotoMetadataExtractor.getAllMetadata(photoDbDir)
val validMetadataFiles = metadataFiles.filter(md => md.isDefined).map(_.get)
validGroups.map(group => (group, validMetadataFiles.filter(_.groupId == group).length))
}
}
class PhotoRawMetadata(val photoId: String
, val groupId: String
, val url: String
, val iso: String
, val focalLength: String
, val exposureTime: String
, val aperture: String
, val flash: String
, val dateTimeOriginal: String) extends Serializable{
override def toString: String = {
"{photoId=%s, groupId=%s, url=%s, iso=%s, focalLength=%s, exposureTime=%s, aperture=%s, flash=%s, dateTimeOriginal=%s}".format(
photoId, groupId, url, iso, focalLength, exposureTime, aperture, flash, dateTimeOriginal
)
}
}
class PhotoMetadata(val photoId: String
, val groupId: String
, val url: String
, val iso: Double
, val focalLength: Double
, val exposureTime: Double
, val aperture: Double
, val flash: Double
, val dateTimeOriginal: Double) extends Serializable{
override def toString: String = {
"{photoId=%s, groupId=%s, url=%s, iso=%s, focalLength=%s, exposureTime=%s, aperture=%s, flash=%s, dateTimeOriginal=%s}".format(
photoId, groupId, url, iso, focalLength, exposureTime, aperture, flash, dateTimeOriginal
)
}
}
object PhotoMetadata extends Logging with Serializable{
override def logger: Logger = Logger.getLogger(PhotoMetadata.getClass)
def formRawMetadata(rawMetadata: PhotoRawMetadata): PhotoMetadata = {
logger.debug("Trying to find values for %s".format(rawMetadata))
val photoId = rawMetadata.photoId
val groupId = rawMetadata.groupId
val url = rawMetadata.url
val iso: Int = rawMetadata.iso.toInt
val focalLength: Double = rawMetadata.focalLength.split(" ")(0).toDouble
val exposureTime: Double = if (rawMetadata.exposureTime.contains("/")) {
1.0 / rawMetadata.exposureTime.split("/")(1).toDouble
} else {
rawMetadata.exposureTime.toDouble
}
val aperture = rawMetadata.aperture.toDouble
val flash = if (rawMetadata.flash.toLowerCase.contains("off")
|| rawMetadata.flash.toLowerCase.contains("no")) {
0.0
}
else {
if (rawMetadata.flash.toLowerCase.contains("fired") ||
rawMetadata.flash.toLowerCase.contains("on")) {
1.0
}
else {
throw new IllegalArgumentException("Value %s is not for flash settings".format(rawMetadata.flash))
}
}
val dateTimeOriginal = rawMetadata.dateTimeOriginal.split("[\\\\s:]")(3).toDouble
new PhotoMetadata(photoId, groupId, url, iso, focalLength, exposureTime, aperture, flash, dateTimeOriginal)
}
} | meisam/photo-classifier | core/src/main/scala/edu/osu/cse/ai/photoclassifier/PhotoMetadataExtractor.scala | Scala | gpl-3.0 | 7,689 |
package org.powlab.jeye.decode.pattern.stream
import org.powlab.jeye.core.Opcodes.OPCODE_DUP
import org.powlab.jeye.core.Opcodes.OPCODE_DUP2
import org.powlab.jeye.decode.graph.OpcodeDetails._
import org.powlab.jeye.decode.graph.OpcodeNode
import org.powlab.jeye.decode.graph.OpcodeTree
import org.powlab.jeye.decode.processor.stack.StackInformator._
import org.powlab.jeye.decode.processor.store.StoreInformator.isBaseStoreNode
import org.powlab.jeye.decode.processor.reference.ReferenceInformator.{isPutFieldNode, isPutStaticNode}
/**
* Dups + Store - говорит нам о том, что выражение нужно вернуть в операнд стэк
* Dups + putField - говорит нам о том, что выражение нужно вернуть в операнд стэк
* Группа состоит Store
*/
class StoreDupsStreamPattern extends StreamPattern {
def details(resolvedNode: OpcodeNode, tree: OpcodeTree): OpcodeDetail = new OpcodeDetail(DETAIL_STORE_DUPS)
def resolve(node: OpcodeNode, tree: OpcodeTree): OpcodeNode = {
var pNode = processLocal(node, tree)
if (pNode == null) {
pNode = processField(node, tree)
if (pNode == null) {
pNode = processStaticField(node, tree)
}
}
pNode
}
private def processLocal(storeNode: OpcodeNode, tree: OpcodeTree): OpcodeNode = {
if (!isBaseStoreNode(storeNode) || tree.previewCount(storeNode) != 1) {
return null
}
val dupNode = tree.preview(storeNode)
val pOpcode = dupNode.runtimeOpcode.opcode
if (! (isDupNode(dupNode) || isDup2Node(dupNode))) {
return null
}
storeNode
}
private def processField(putFieldNode: OpcodeNode, tree: OpcodeTree): OpcodeNode = {
if (!isPutFieldNode(putFieldNode) || tree.previewCount(putFieldNode) != 1) {
return null
}
val dupNode = tree.preview(putFieldNode)
val pOpcode = dupNode.runtimeOpcode.opcode
if (! isDupX1Node(dupNode)) {
return null
}
putFieldNode
}
private def processStaticField(putStaticFieldNode: OpcodeNode, tree: OpcodeTree): OpcodeNode = {
if (!isPutStaticNode(putStaticFieldNode) || tree.previewCount(putStaticFieldNode) != 1) {
return null
}
val dupNode = tree.preview(putStaticFieldNode)
val pOpcode = dupNode.runtimeOpcode.opcode
if (! isDupNode(dupNode)) {
return null
}
putStaticFieldNode
}
}
| powlab/jeye | src/main/scala/org/powlab/jeye/decode/pattern/stream/StoreDupsStreamPattern.scala | Scala | apache-2.0 | 2,424 |
package com.hilverd.simcallstack.unit
import org.scalatest.{Matchers, FunSpec}
import com.hilverd.simcallstack.CeilingOfHalf
class CeilingOfHalfTest extends FunSpec with Matchers {
it("should count") {
val n = 50000
val expectedResult = if (n % 2 == 0) n / 2 else n / 2 + 1
intercept[java.lang.StackOverflowError] {
CeilingOfHalf.f(n)
}
CeilingOfHalf.g(n) should be(expectedResult)
}
}
| hilverd/simulated-call-stack | src/test/scala/com/hilverd/simcallstack/unit/CeilingOfHalfTest.scala | Scala | mit | 420 |
/*
* Copyright 2015 Databricks Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.databricks.spark.sql.perf
import org.apache.spark.sql.Encoders
import org.apache.spark.sql.expressions.Aggregator
object TypedAverage extends Aggregator[Long, SumAndCount, Double] {
override def zero: SumAndCount = SumAndCount(0L, 0)
override def reduce(b: SumAndCount, a: Long): SumAndCount = {
b.count += 1
b.sum += a
b
}
override def bufferEncoder = Encoders.product
override def outputEncoder = Encoders.scalaDouble
override def finish(reduction: SumAndCount): Double = reduction.sum.toDouble / reduction.count
override def merge(b1: SumAndCount, b2: SumAndCount): SumAndCount = {
b1.count += b2.count
b1.sum += b2.sum
b1
}
}
case class Data(id: Long)
case class SumAndCount(var sum: Long, var count: Int)
class DatasetPerformance extends Benchmark {
import sqlContext.implicits._
val numLongs = 100000000
val ds = sqlContext.range(1, numLongs)
val rdd = sparkContext.range(1, numLongs)
val smallNumLongs = 1000000
val smallds = sqlContext.range(1, smallNumLongs).as[Long]
val smallrdd = sparkContext.range(1, smallNumLongs)
def allBenchmarks = range ++ backToBackFilters ++ backToBackMaps ++ computeAverage
val range = Seq(
new Query(
"DS: range",
ds.as[Data].toDF(),
executionMode = ExecutionMode.ForeachResults),
new Query(
"DF: range",
ds.toDF(),
executionMode = ExecutionMode.ForeachResults),
RDDCount(
"RDD: range",
rdd.map(Data(_)))
)
val backToBackFilters = Seq(
new Query(
"DS: back-to-back filters",
ds.as[Data]
.filter(_.id % 100 != 0)
.filter(_.id % 101 != 0)
.filter(_.id % 102 != 0)
.filter(_.id % 103 != 0).toDF()),
new Query(
"DF: back-to-back filters",
ds.toDF()
.filter("id % 100 != 0")
.filter("id % 101 != 0")
.filter("id % 102 != 0")
.filter("id % 103 != 0")),
RDDCount(
"RDD: back-to-back filters",
rdd.map(Data(_))
.filter(_.id % 100 != 0)
.filter(_.id % 101 != 0)
.filter(_.id % 102 != 0)
.filter(_.id % 103 != 0))
)
val backToBackMaps = Seq(
new Query(
"DS: back-to-back maps",
ds.as[Data]
.map(d => Data(d.id + 1L))
.map(d => Data(d.id + 1L))
.map(d => Data(d.id + 1L))
.map(d => Data(d.id + 1L)).toDF()),
new Query(
"DF: back-to-back maps",
ds.toDF()
.select($"id" + 1 as 'id)
.select($"id" + 1 as 'id)
.select($"id" + 1 as 'id)
.select($"id" + 1 as 'id)),
RDDCount(
"RDD: back-to-back maps",
rdd.map(Data)
.map(d => Data(d.id + 1L))
.map(d => Data(d.id + 1L))
.map(d => Data(d.id + 1L))
.map(d => Data(d.id + 1L)))
)
val computeAverage = Seq(
new Query(
"DS: average",
smallds.select(TypedAverage.toColumn).toDF(),
executionMode = ExecutionMode.CollectResults),
new Query(
"DF: average",
smallds.toDF().selectExpr("avg(id)"),
executionMode = ExecutionMode.CollectResults),
new SparkPerfExecution(
"RDD: average",
Map.empty,
prepare = () => Unit,
run = () => {
val sumAndCount =
smallrdd.map(i => (i, 1)).reduce((a, b) => (a._1 + b._1, a._2 + b._2))
sumAndCount._1.toDouble / sumAndCount._2
})
)
} | databricks/spark-sql-perf | src/main/scala/com/databricks/spark/sql/perf/DatasetPerformance.scala | Scala | apache-2.0 | 3,983 |
package com.danielasfregola.twitter4s.http.clients.rest.statuses
import akka.http.scaladsl.model.{HttpEntity, HttpMethods}
import com.danielasfregola.twitter4s.entities._
import com.danielasfregola.twitter4s.entities.enums.TweetMode
import com.danielasfregola.twitter4s.helpers.ClientSpec
class TwitterStatusClientSpec extends ClientSpec {
class TwitterStatusClientSpecContext extends RestClientSpecContext with TwitterStatusClient
"Twitter Status Client" should {
"perform a mentions timeline request" in new TwitterStatusClientSpecContext {
val result: RatedData[Seq[Tweet]] = when(mentionsTimeline(count = 10))
.expectRequest { request =>
request.method === HttpMethods.GET
request.uri.endpoint === "https://api.twitter.com/1.1/statuses/mentions_timeline.json"
request.uri.rawQueryString === Some(
"contributor_details=false&count=10&include_entities=true&trim_user=false")
}
.respondWithRated("/twitter/rest/statuses/mentions_timeline.json")
.await
result.rate_limit === rateLimit
result.data === loadJsonAs[Seq[Tweet]]("/fixtures/rest/statuses/mentions_timeline.json")
}
"perform a user timeline request by screen name" in new TwitterStatusClientSpecContext {
val result: RatedData[Seq[Tweet]] = when(userTimelineForUser(screen_name = "DanielaSfregola", count = 10))
.expectRequest { request =>
request.method === HttpMethods.GET
request.uri.endpoint === "https://api.twitter.com/1.1/statuses/user_timeline.json"
request.uri.rawQueryString === Some(
"contributor_details=false&count=10&exclude_replies=false&include_rts=true&screen_name=DanielaSfregola&trim_user=false")
}
.respondWithRated("/twitter/rest/statuses/user_timeline.json")
.await
result.rate_limit === rateLimit
result.data === loadJsonAs[Seq[Tweet]]("/fixtures/rest/statuses/user_timeline.json")
}
"perform a user timeline request by user id" in new TwitterStatusClientSpecContext {
val result: RatedData[Seq[Tweet]] = when(userTimelineForUserId(user_id = 123456L, count = 10))
.expectRequest { request =>
request.method === HttpMethods.GET
request.uri.endpoint === "https://api.twitter.com/1.1/statuses/user_timeline.json"
request.uri.rawQueryString === Some(
"contributor_details=false&count=10&exclude_replies=false&include_rts=true&trim_user=false&user_id=123456")
}
.respondWithRated("/twitter/rest/statuses/user_timeline.json")
.await
result.rate_limit === rateLimit
result.data === loadJsonAs[Seq[Tweet]]("/fixtures/rest/statuses/user_timeline.json")
}
"perform a home timeline request" in new TwitterStatusClientSpecContext {
val result: RatedData[Seq[Tweet]] = when(homeTimeline(count = 10))
.expectRequest { request =>
request.method === HttpMethods.GET
request.uri.endpoint === "https://api.twitter.com/1.1/statuses/home_timeline.json"
request.uri.rawQueryString === Some(
"contributor_details=false&count=10&exclude_replies=false&include_entities=true&trim_user=false")
}
.respondWithRated("/twitter/rest/statuses/home_timeline.json")
.await
result.rate_limit === rateLimit
result.data === loadJsonAs[Seq[Tweet]]("/fixtures/rest/statuses/home_timeline.json")
}
"perform a retweets of me request" in new TwitterStatusClientSpecContext {
val result: RatedData[Seq[Tweet]] = when(retweetsOfMe(count = 10))
.expectRequest { request =>
request.method === HttpMethods.GET
request.uri.endpoint === "https://api.twitter.com/1.1/statuses/retweets_of_me.json"
request.uri.rawQueryString === Some(
"contributor_details=false&count=10&exclude_replies=false&include_entities=true&trim_user=false")
}
.respondWithRated("/twitter/rest/statuses/retweets_of_me.json")
.await
result.rate_limit === rateLimit
result.data === loadJsonAs[Seq[Tweet]]("/fixtures/rest/statuses/retweets_of_me.json")
}
"perform a retweets request" in new TwitterStatusClientSpecContext {
val id = 648866645855879168L
val result: RatedData[Seq[Tweet]] = when(retweets(id, count = 10))
.expectRequest { request =>
request.method === HttpMethods.GET
request.uri.endpoint === s"https://api.twitter.com/1.1/statuses/retweets/$id.json"
request.uri.rawQueryString === Some("count=10&trim_user=false")
}
.respondWithRated("/twitter/rest/statuses/retweets.json")
.await
result.rate_limit === rateLimit
result.data === loadJsonAs[Seq[Tweet]]("/fixtures/rest/statuses/retweets.json")
}
"perform a show tweet request" in new TwitterStatusClientSpecContext {
val result: RatedData[Tweet] = when(getTweet(648866645855879168L))
.expectRequest { request =>
request.method === HttpMethods.GET
request.uri.endpoint === "https://api.twitter.com/1.1/statuses/show.json"
request.uri.rawQueryString === Some(
"id=648866645855879168&include_entities=true&include_my_retweet=false&trim_user=false")
}
.respondWithRated("/twitter/rest/statuses/show.json")
.await
result.rate_limit === rateLimit
result.data === loadJsonAs[Tweet]("/fixtures/rest/statuses/show.json")
}
"send a status update" in new TwitterStatusClientSpecContext {
val result: Tweet = when(createTweet("This is a test"))
.expectRequest { request =>
request.method === HttpMethods.POST
request.uri.endpoint === "https://api.twitter.com/1.1/statuses/update.json"
request.entity === HttpEntity(
`application/x-www-form-urlencoded`,
"display_coordinates=false&possibly_sensitive=false&status=This%20is%20a%20test&trim_user=false")
}
.respondWith("/twitter/rest/statuses/update.json")
.await
result === loadJsonAs[Tweet]("/fixtures/rest/statuses/update.json")
}
"send a status update with some media" in new TwitterStatusClientSpecContext {
val result: Tweet = when(createTweet("This is a test", media_ids = Seq(1L, 2L)))
.expectRequest { request =>
request.method === HttpMethods.POST
request.uri.endpoint === "https://api.twitter.com/1.1/statuses/update.json"
request.entity === HttpEntity(
`application/x-www-form-urlencoded`,
"display_coordinates=false&media_ids=1%2C2&possibly_sensitive=false&status=This%20is%20a%20test&trim_user=false")
}
.respondWith("/twitter/rest/statuses/update.json")
.await
result === loadJsonAs[Tweet]("/fixtures/rest/statuses/update.json")
}
"send direct message as tweet" in new TwitterStatusClientSpecContext {
val result: Tweet = when(createDirectMessageAsTweet("This is a test for a direct message", "DanielaSfregola"))
.expectRequest { request =>
request.method === HttpMethods.POST
request.uri.endpoint === "https://api.twitter.com/1.1/statuses/update.json"
request.entity === HttpEntity(
`application/x-www-form-urlencoded`,
"display_coordinates=false&possibly_sensitive=false&status=D%20DanielaSfregola%20This%20is%20a%20test%20for%20a%20direct%20message&trim_user=false"
)
}
.respondWith("/twitter/rest/statuses/direct_message.json")
.await
result === loadJsonAs[Tweet]("/fixtures/rest/statuses/direct_message.json")
}
"delete an existing tweet" in new TwitterStatusClientSpecContext {
val id = 648866645855879168L
val result: Tweet = when(deleteTweet(id))
.expectRequest { request =>
request.method === HttpMethods.POST
request.uri.endpoint === s"https://api.twitter.com/1.1/statuses/destroy/$id.json"
request.uri.rawQueryString === Some("trim_user=false")
}
.respondWith("/twitter/rest/statuses/destroy.json")
.await
result === loadJsonAs[Tweet]("/fixtures/rest/statuses/destroy.json")
}
"retweet a tweet" in new TwitterStatusClientSpecContext {
val id = 648866645855879168L
val result: Tweet = when(retweet(id))
.expectRequest { request =>
request.method === HttpMethods.POST
request.uri.endpoint === s"https://api.twitter.com/1.1/statuses/retweet/$id.json"
request.uri.rawQueryString === Some("trim_user=false")
}
.respondWith("/twitter/rest/statuses/retweet.json")
.await
result === loadJsonAs[Tweet]("/fixtures/rest/statuses/retweet.json")
}
"get a tweet by id in oembed format " in new TwitterStatusClientSpecContext {
val result: RatedData[OEmbedTweet] = when(oembedTweetById(648866645855879168L))
.expectRequest { request =>
request.method === HttpMethods.GET
request.uri.endpoint === "https://api.twitter.com/1.1/statuses/oembed.json"
request.uri.rawQueryString === Some(
"align=none&hide_media=false&hide_thread=false&hide_tweet=false&id=648866645855879168&lang=en&omit_script=false")
}
.respondWithRated("/twitter/rest/statuses/oembed.json")
.await
result.rate_limit === rateLimit
result.data === loadJsonAs[OEmbedTweet]("/fixtures/rest/statuses/oembed.json")
}
"get a tweet by url in oembed format " in new TwitterStatusClientSpecContext {
val url = s"https://twitter.com/Interior/status/648866645855879168"
val result: RatedData[OEmbedTweet] = when(oembedTweetByUrl(url))
.expectRequest { request =>
request.method === HttpMethods.GET
request.uri.endpoint === "https://api.twitter.com/1.1/statuses/oembed.json"
request.uri.rawQueryString === Some(
"align=none&hide_media=false&hide_thread=false&hide_tweet=false&lang=en&omit_script=false&url=https%3A%2F%2Ftwitter.com%2FInterior%2Fstatus%2F648866645855879168")
}
.respondWithRated("/twitter/rest/statuses/oembed.json")
.await
result.rate_limit === rateLimit
result.data === loadJsonAs[OEmbedTweet]("/fixtures/rest/statuses/oembed.json")
}
"get retweeters ids" in new TwitterStatusClientSpecContext {
val result: RatedData[UserIds] = when(retweeterIds(327473909412814850L))
.expectRequest { request =>
request.method === HttpMethods.GET
request.uri.endpoint === "https://api.twitter.com/1.1/statuses/retweeters/ids.json"
request.uri.rawQueryString === Some("count=100&cursor=-1&id=327473909412814850&stringify_ids=false")
}
.respondWithRated("/twitter/rest/statuses/retweeters_ids.json")
.await
result.rate_limit === rateLimit
result.data === loadJsonAs[UserIds]("/fixtures/rest/statuses/retweeters_ids.json")
}
"get retweeters ids stringified" in new TwitterStatusClientSpecContext {
val result: RatedData[UserStringifiedIds] = when(retweeterStringifiedIds(327473909412814850L))
.expectRequest { request =>
request.method === HttpMethods.GET
request.uri.endpoint === "https://api.twitter.com/1.1/statuses/retweeters/ids.json"
request.uri.rawQueryString === Some("count=100&cursor=-1&id=327473909412814850&stringify_ids=true")
}
.respondWithRated("/twitter/rest/statuses/retweeters_ids_stringified.json")
.await
result.rate_limit === rateLimit
result.data === loadJsonAs[UserStringifiedIds]("/fixtures/rest/statuses/retweeters_ids_stringified.json")
}
"perform a lookup" in new TwitterStatusClientSpecContext {
val result: RatedData[Seq[Tweet]] = when(tweetLookup(327473909412814850L, 327473909412814851L))
.expectRequest { request =>
request.method === HttpMethods.GET
request.uri.endpoint === "https://api.twitter.com/1.1/statuses/lookup.json"
request.uri.rawQueryString === Some(
"id=327473909412814850%2C327473909412814851&include_entities=true&map=false&trim_user=false")
}
.respondWithRated("/twitter/rest/statuses/lookup.json")
.await
result.rate_limit === rateLimit
result.data === loadJsonAs[Seq[Tweet]]("/fixtures/rest/statuses/lookup.json")
}
"perform a lookup extended" in new TwitterStatusClientSpecContext {
val result: RatedData[Seq[Tweet]] = when(
tweetLookup(Seq(963141440695078912L, 956111334898270209L), tweet_mode = TweetMode.Extended)
).expectRequest { request =>
request.method === HttpMethods.GET
request.uri.endpoint === "https://api.twitter.com/1.1/statuses/lookup.json"
request.uri.queryString() === Some(
"id=963141440695078912,956111334898270209&include_entities=true&map=false&trim_user=false&tweet_mode=extended")
}
.respondWithRated("/twitter/rest/statuses/lookup_extended.json")
.await
result.rate_limit === rateLimit
result.data === loadJsonAs[Seq[Tweet]]("/fixtures/rest/statuses/lookup_extended.json")
}
"reject request if no ids have been provided for the lookup" in new TwitterStatusClientSpecContext {
tweetLookup() must throwA[IllegalArgumentException](
"requirement failed: please, provide at least one status id to lookup")
}
"perform a mapped lookup" in new TwitterStatusClientSpecContext {
val result: RatedData[LookupMapped] = when(tweetLookupMapped(327473909412814850L, 327473909412814851L))
.expectRequest { request =>
request.method === HttpMethods.GET
request.uri.endpoint === "https://api.twitter.com/1.1/statuses/lookup.json"
request.uri.rawQueryString === Some(
"id=327473909412814850%2C327473909412814851&include_entities=true&map=true&trim_user=false")
}
.respondWithRated("/twitter/rest/statuses/lookup_mapped.json")
.await
result.rate_limit === rateLimit
result.data === loadJsonAs[LookupMapped]("/fixtures/rest/statuses/lookup_mapped.json")
}
"perform a mapped lookup extended" in new TwitterStatusClientSpecContext {
val result: RatedData[LookupMapped] = when(
tweetLookupMapped(Seq(963141440695078912L, 956111334898270209L), tweet_mode = TweetMode.Extended)
).expectRequest { request =>
request.method === HttpMethods.GET
request.uri.endpoint === "https://api.twitter.com/1.1/statuses/lookup.json"
request.uri.queryString() === Some(
"id=963141440695078912,956111334898270209&include_entities=true&map=true&trim_user=false&tweet_mode=extended")
}
.respondWithRated("/twitter/rest/statuses/lookup_mapped_extended.json")
.await
result.rate_limit === rateLimit
result.data === loadJsonAs[LookupMapped]("/fixtures/rest/statuses/lookup_mapped_extended.json")
}
}
}
| DanielaSfregola/twitter4s | src/test/scala/com/danielasfregola/twitter4s/http/clients/rest/statuses/TwitterStatusClientSpec.scala | Scala | apache-2.0 | 15,043 |
// Copyright (C) 2015 Sebastian Bøe, Joakim Andersson
// License: BSD 2-Clause (see LICENSE for details)
package TurboRav
import Chisel._
// Purely combinatorial Hazard Detection Unit (HDU)
// The HDU collects information from the different stages that could
// lead to a bubble and then distributes stall signals
// accordingly. When stalling, a pipeline stage should not modify it's
// pipeline register, and it should send a bubble to the next stage.
// In addition to stall signals the HDU generates flush signals. When
// a pipeline stage is flushed it should clear it's pipeline register,
// but not send a bubble to the next stage.
class HazardDetectionUnit extends Module {
val io = new HazardDetectionUnitIO()
// RPOTIP: If you find this on the critical path then you might be
// able to pre-compute this in decode.
val load_use = All(
io.hdu_mem.mem_read,
io.hdu_mem.rd_addr =/= UInt(0),
io.hdu_mem.rd_addr === io.hdu_exe.rs2_addr ||
io.hdu_mem.rd_addr === io.hdu_exe.rs1_addr
)
val mult_busy = io.hdu_exe.mult_busy
val mem_busy = io.hdu_mem.mem_busy
val branch_taken = io.hdu_exe.branch_taken
// It is the responsibility of the pipeline stage to insert bubbles
// when it is stalling.
val stall_wrb = Bool(false)
val stall_mem = stall_wrb
val stall_exe = stall_mem || load_use || mem_busy
val stall_dec = stall_exe || mult_busy
val stall_fch = stall_dec
io.hdu_wrb.stall := stall_wrb
io.hdu_exe.stall := stall_exe
io.hdu_dec.stall := stall_dec
io.hdu_fch.stall := stall_fch
// Don't flush mem or wrb because we don't speculatively issue
// instructions there (At least I hope we don't). Also don't flush
// fch, because it uses the branch_taken signal to know when to
// flush.
io.hdu_exe.flush := branch_taken
io.hdu_dec.flush := branch_taken
}
| SebastianBoe/turborav | hw/src/main/rtl/hdu.scala | Scala | bsd-2-clause | 1,843 |
package it.unibo.drescue.controller
import java.util
import java.util.concurrent.{ExecutorService, Executors, Future}
import com.rabbitmq.client.BuiltinExchangeType
import it.unibo.drescue.communication.GsonUtils
import it.unibo.drescue.communication.builder.ReqRescueTeamConditionMessageBuilderImpl
import it.unibo.drescue.communication.messages._
import it.unibo.drescue.communication.messages.response.AlertsMessageImpl
import it.unibo.drescue.connection._
import it.unibo.drescue.localModel.{AlertEntry, CivilProtectionData, EnrolledTeamInfo}
import it.unibo.drescue.model.{AlertImpl, RescueTeamImpl}
import it.unibo.drescue.view.{CustomDialog, MainView}
import org.slf4j.{Logger, LoggerFactory}
/**
* A class representing the application main controller
*
* @param model the application local model
* @param rabbitMQ the channel used to handle requests and responses
*/
class MainControllerImpl(var model: CivilProtectionData, val rabbitMQ: RabbitMQImpl) {
val ExchangeName = "rt_exchange"
val pool: ExecutorService = Executors.newFixedThreadPool(Runtime.getRuntime.availableProcessors() + 1)
private val Logger: Logger = LoggerFactory getLogger classOf[MainControllerImpl]
var view = new MainView(null, null, null, null, null, null)
var queueName: String = _
private var _sendOrStop: String = "Stop"
private var _alertInManage: AlertEntry = _
/**
* @return the button text that has to be active
*/
def sendOrStop = _sendOrStop
/**
* Sets to value the button text that has to be active
*
* @param value
*/
def sendOrStop_=(value: String): Unit = {
_sendOrStop = value
}
/**
* @return the AlertEntry corresponding to the selected alert in home view
*/
def alertInManage = _alertInManage
/**
* Sets to value the AlertEntry corresponding to the selected alert in home view
*
* @param value
*/
def alertInManage_=(value: AlertEntry): Unit = {
_alertInManage = value
}
/**
* Adds the main view to the controller
*
* @param viewValue the main view to be added
*/
def addView(viewValue: MainView): Unit = {
view = viewValue
}
/**
* Creates consumers to retrieve alerts and rescue teams after login
*
* @param username the civil protection ID
* @param rescueTeams the rescue teams list returned from the login
*/
def userLogged(username: String, rescueTeams: java.util.List[RescueTeamImpl]): Unit = {
model.cpID = username
model.enrolledRescueTeams = rescueTeams
initializeEnrolledTeamInfoList(rescueTeams)
Logger info ("Enrolled team info list: " + model.enrolledTeamInfoList)
rabbitMQ declareExchange(ExchangeName, BuiltinExchangeType.DIRECT)
queueName = rabbitMQ addReplyQueue()
rabbitMQ bindQueueToExchange(queueName, ExchangeName, rescueTeams)
val cpConsumer: RescueTeamConsumer = RescueTeamConsumer(rabbitMQ, this)
rabbitMQ addConsumer(cpConsumer, queueName)
rescueTeams forEach (rescueTeam => {
val rescueTeamConditionMessage = new ReqRescueTeamConditionMessageBuilderImpl()
.setRescueTeamID(rescueTeam.getRescueTeamID)
.setFrom(username)
.build()
rabbitMQ.sendMessage(ExchangeName, rescueTeam.getRescueTeamID, null, rescueTeamConditionMessage)
})
val message: Message = RequestCpAlertsMessageImpl(model.cpID)
val task: Future[String] = pool.submit(new RequestHandler(rabbitMQ, message, QueueType.ALERTS_QUEUE))
val response: String = task.get()
Logger info ("Main controller - alerts: " + response)
val messageName: MessageType = MessageUtils.getMessageNameByJson(response)
messageName match {
case MessageType.ALERTS_MESSAGE =>
val alertsMessage = GsonUtils.fromGson(response, classOf[AlertsMessageImpl])
initializeAlerts(alertsMessage.getAlerts)
case _ => startErrorDialog()
}
rabbitMQ.addQueue(model.cpID)
rabbitMQ addConsumer(AlertConsumer(rabbitMQ, this), model.cpID)
}
/**
* Initializes the local model alerts list
*
* @param list the alerts list local model has to be initialized to
*/
def initializeAlerts(list: java.util.List[AlertImpl]): Unit = {
model.lastAlerts = fromAlertImplListToAlertEntryList(list)
}
/**
* Converts a list of AlertImpl to a list of AlertEntry
*
* @param list the list to be converted
* @return the converted list
*/
def fromAlertImplListToAlertEntryList(list: java.util.List[AlertImpl]): java.util.List[AlertEntry] = {
val entryList: java.util.List[AlertEntry] = new util.ArrayList[AlertEntry]()
list.forEach((alert: AlertImpl) => {
entryList.add(fromAlertImplToAlertEntry(alert))
})
entryList
}
/**
* Converts an AlertImpl to an AlertEntry
*
* @param alert the AlertImpl to be converted
* @return the converted AlertEntry
*/
def fromAlertImplToAlertEntry(alert: AlertImpl): AlertEntry = {
new AlertEntry(
alert.getAlertID,
alert.getTimestamp,
alert.getLatitude,
alert.getLongitude,
alert.getUserID,
alert.getEventName,
alert.getDistrictID,
alert.getUpvotes)
}
/**
* Initializes the local model EnrolledTeamInfo list
*
* @param list the list local model has to be initialized to
*/
def initializeEnrolledTeamInfoList(list: java.util.List[RescueTeamImpl]): Unit = {
model.enrolledTeamInfoList = fromRescueTeamListToEnrolledTeamInfoList(list)
}
/**
* Converts a RescueTeam list to a EnrolledTeamInfo list
*
* @param list the list to be converted
* @return the converted list
*/
def fromRescueTeamListToEnrolledTeamInfoList(list: java.util.List[RescueTeamImpl]): java.util.List[EnrolledTeamInfo] = {
val entryList: java.util.List[EnrolledTeamInfo] = new util.ArrayList[EnrolledTeamInfo]()
list.forEach((rescueTeam: RescueTeamImpl) => {
entryList.add(fromRescueTeamToEnrolledTeamInfoEntry(rescueTeam))
})
entryList
}
/**
* Converts a RescueTeam to a EnrollTeamInfo
*
* @param rescueTeam the RescueTeam to be converted
* @return the converted EnrolledTeamInfo
*/
def fromRescueTeamToEnrolledTeamInfoEntry(rescueTeam: RescueTeamImpl): EnrolledTeamInfo = {
new EnrolledTeamInfo(
rescueTeam.getRescueTeamID,
rescueTeam.getName,
rescueTeam.getPhoneNumber,
true,
"",
"")
}
/**
* Initializes the local model not enrolled teams list with a DB query result message
*/
def initializeNotEnrolled(): Unit = {
val message: Message = GetRescueTeamsNotEnrolledMessageImpl(model.cpID)
val task: Future[String] = pool.submit(new RequestHandler(rabbitMQ, message, QueueType.CIVIL_PROTECTION_QUEUE))
val response: String = task.get()
Logger info ("Main controller - initialize not enrolled rescue team: " + response)
val messageName: MessageType = MessageUtils.getMessageNameByJson(response)
messageName match {
case MessageType.RESCUE_TEAMS_MESSAGE =>
val teamsMessage = GsonUtils.fromGson(response, classOf[RescueTeamsMessageImpl])
initializeNotEnrolledModel(teamsMessage.rescueTeamsList)
case _ => startErrorDialog()
}
}
/**
* Starts a custom error dialog
*/
def startErrorDialog() = {
val dialog = new CustomDialog(this).createDialog(CustomDialog.Error)
dialog.showAndWait()
}
/**
* Initializes the local model not enrolled teams
*
* @param list the list local model has to be initialized to
*/
def initializeNotEnrolledModel(list: java.util.List[RescueTeamImpl]) = {
model.notEnrolledRescueTeams = list
}
/**
* Changes the view to the next view
*
* @param nextView the next view
*/
def changeView(nextView: String) = {
view.changeView(nextView)
}
/**
* @return the main view
*/
def _view: MainView = view
} | SofiaRosetti/S3-16-d-rescue | civilprotection/src/main/scala/it/unibo/drescue/controller/MainControllerImpl.scala | Scala | gpl-3.0 | 7,904 |
package mesosphere.marathon
package api.v2
import java.net.URI
import java.time.Clock
import akka.event.EventStream
import akka.stream.Materializer
import akka.stream.scaladsl.Sink
import com.wix.accord.Validator
import javax.inject.Inject
import javax.servlet.http.HttpServletRequest
import javax.ws.rs._
import javax.ws.rs.container.{AsyncResponse, Suspended}
import javax.ws.rs.core.Response.Status
import javax.ws.rs.core.{Context, MediaType, Response}
import mesosphere.marathon.Normalization._
import mesosphere.marathon.api.RestResource.RestStreamingBody
import mesosphere.marathon.api.v2.Validation.validateOrThrow
import mesosphere.marathon.api.v2.validation.PodsValidation
import mesosphere.marathon.api.{AuthResource, RestResource, TaskKiller}
import mesosphere.marathon.core.appinfo.{PodSelector, PodStatusService, Selector}
import mesosphere.marathon.core.event._
import mesosphere.marathon.core.group.GroupManager
import mesosphere.marathon.core.instance.Instance
import mesosphere.marathon.core.plugin.PluginManager
import mesosphere.marathon.core.pod.{PodDefinition, PodManager}
import mesosphere.marathon.plugin.auth._
import mesosphere.marathon.raml.{Pod, Raml}
import mesosphere.marathon.state.{PathId, Timestamp, VersionInfo}
import mesosphere.marathon.util.RoleSettings
import play.api.libs.json.Json
import scala.async.Async._
import scala.concurrent.ExecutionContext
@Path("v2/pods")
@Consumes(Array(MediaType.APPLICATION_JSON))
@Produces(Array(MediaType.APPLICATION_JSON))
class PodsResource @Inject() (
val config: MarathonConf,
clock: Clock,
taskKiller: TaskKiller,
podSystem: PodManager,
podStatusService: PodStatusService,
groupManager: GroupManager,
scheduler: MarathonScheduler,
pluginManager: PluginManager
)(implicit
val authenticator: Authenticator,
val authorizer: Authorizer,
eventBus: EventStream,
mat: Materializer,
val executionContext: ExecutionContext
) extends RestResource
with AuthResource {
import PodsResource._
// If we can normalize using the internal model, do that instead.
// The version of the pod is changed here to make sure, the user has not send a version.
private def normalize(pod: PodDefinition): PodDefinition = pod.copy(versionInfo = VersionInfo.OnlyVersion(clock.now()))
private def unmarshal(bytes: Array[Byte]): Pod = {
// no normalization or validation here, that happens elsewhere and in a precise order
Json.parse(bytes).as[Pod]
}
/**
* HEAD is used to determine whether some Marathon variant supports pods.
*
* Performs basic authentication checks, but none for authorization: there's
* no sensitive data being returned here anyway.
*
* @return HTTP OK if pods are supported
*/
@HEAD
def capability(@Context req: HttpServletRequest, @Suspended asyncResponse: AsyncResponse): Unit =
sendResponse(asyncResponse) {
async {
implicit val identity = await(authenticatedAsync(req))
ok()
}
}
@POST
def create(
body: Array[Byte],
@DefaultValue("false") @QueryParam("force") force: Boolean,
@Context req: HttpServletRequest,
@Suspended asyncResponse: AsyncResponse
): Unit =
sendResponse(asyncResponse) {
async {
implicit val identity = await(authenticatedAsync(req))
val podRaml = unmarshal(body)
val roleSettings = RoleSettings.forService(config, PathId(podRaml.id).canonicalPath(PathId.root), groupManager.rootGroup(), false)
implicit val normalizer: Normalization[Pod] = PodNormalization(PodNormalization.Configuration(config, roleSettings))
implicit val podValidator: Validator[Pod] = PodsValidation.podValidator(config, scheduler.mesosMasterVersion())
implicit val podDefValidator: Validator[PodDefinition] = PodsValidation.podDefValidator(pluginManager, roleSettings)
validateOrThrow(podRaml)
val podDef = normalize(Raml.fromRaml(podRaml.normalize))
validateOrThrow(podDef)(podDefValidator)
checkAuthorization(CreateRunSpec, podDef)
val deployment = await(podSystem.create(podDef, force))
eventBus.publish(PodEvent(req.getRemoteAddr, req.getRequestURI, PodEvent.Created))
Response
.created(new URI(podDef.id.toString))
.header(RestResource.DeploymentHeader, deployment.id)
.entity(new RestStreamingBody[raml.Pod](Raml.toRaml(podDef)))
.build()
}
}
@PUT
@Path("""{id:.+}""")
def update(
@PathParam("id") id: String,
body: Array[Byte],
@DefaultValue("false") @QueryParam("force") force: Boolean,
@Context req: HttpServletRequest,
@Suspended asyncResponse: AsyncResponse
): Unit =
sendResponse(asyncResponse) {
async {
implicit val identity = await(authenticatedAsync(req))
import PathId._
val podId = id.toAbsolutePath
val podRaml = unmarshal(body)
val roleSettings = RoleSettings.forService(config, podId, groupManager.rootGroup(), force)
implicit val normalizer: Normalization[Pod] = PodNormalization(PodNormalization.Configuration(config, roleSettings))
implicit val podValidator: Validator[Pod] = PodsValidation.podValidator(config, scheduler.mesosMasterVersion())
implicit val podDefValidator: Validator[PodDefinition] = PodsValidation.podDefValidator(pluginManager, roleSettings)
validateOrThrow(podRaml)
if (podId != podRaml.id.toAbsolutePath) {
Response
.status(Status.BAD_REQUEST)
.entity(
s"""
|{"message": "'$podId' does not match definition's id ('${podRaml.id}')" }
""".stripMargin
)
.build()
} else {
val podDef = normalize(Raml.fromRaml(podRaml.normalize))
validateOrThrow(podDef)
checkAuthorization(UpdateRunSpec, podDef)
val deployment = await(podSystem.update(podDef, force))
eventBus.publish(PodEvent(req.getRemoteAddr, req.getRequestURI, PodEvent.Updated))
val builder = Response
.ok(new URI(podDef.id.toString))
.entity(new RestStreamingBody(Raml.toRaml(podDef)))
.header(RestResource.DeploymentHeader, deployment.id)
builder.build()
}
}
}
@GET
def findAll(@Context req: HttpServletRequest, @Suspended asyncResponse: AsyncResponse): Unit =
sendResponse(asyncResponse) {
async {
implicit val identity = await(authenticatedAsync(req))
val pods = podSystem.findAll(isAuthorized(ViewRunSpec, _))
ok(pods.map(Raml.toRaml(_)))
}
}
@GET
@Path("""{id:.+}""")
def find(@PathParam("id") id: String, @Context req: HttpServletRequest, @Suspended asyncResponse: AsyncResponse): Unit =
sendResponse(asyncResponse) {
async {
implicit val identity = await(authenticatedAsync(req))
import PathId._
withValid(id.toAbsolutePath) { id =>
podSystem.find(id).fold(notFound(s"""{"message": "pod with $id does not exist"}""")) { pod =>
withAuthorization(ViewRunSpec, pod) {
ok(Raml.toRaml(pod))
}
}
}
}
}
@DELETE
@Path("""{id:.+}""")
def remove(
@PathParam("id") idOrig: String,
@DefaultValue("false") @QueryParam("force") force: Boolean,
@Context req: HttpServletRequest,
@Suspended asyncResponse: AsyncResponse
): Unit =
sendResponse(asyncResponse) {
async {
implicit val identity = await(authenticatedAsync(req))
import PathId._
val id = idOrig.toAbsolutePath
validateOrThrow(id)
podSystem.find(id) match {
case Some(pod) =>
checkAuthorization(DeleteRunSpec, pod)
val deployment = await(podSystem.delete(id, force))
eventBus.publish(PodEvent(req.getRemoteAddr, req.getRequestURI, PodEvent.Deleted))
Response
.status(Status.ACCEPTED)
.location(new URI(deployment.id)) // TODO(jdef) probably want a different header here since deployment != pod
.header(RestResource.DeploymentHeader, deployment.id)
.build()
case None =>
unknownPod(id)
}
}
}
@GET
@Path("""{id:.+}::status""")
def status(@PathParam("id") id: String, @Context req: HttpServletRequest, @Suspended asyncResponse: AsyncResponse): Unit =
sendResponse(asyncResponse) {
async {
implicit val identity = await(authenticatedAsync(req))
import PathId._
await(withValidF(id.toAbsolutePath) { id =>
podStatusService.selectPodStatus(id, authzSelector).map {
case None => notFound(id)
case Some(status) => ok(Json.stringify(Json.toJson(status)))
}
})
}
}
@GET
@Path("""{id:.+}::versions""")
def versions(@PathParam("id") id: String, @Context req: HttpServletRequest, @Suspended asyncResponse: AsyncResponse): Unit =
sendResponse(asyncResponse) {
async {
implicit val identity = await(authenticatedAsync(req))
import PathId._
import mesosphere.marathon.api.v2.json.Formats.TimestampFormat
await(withValidF(id.toAbsolutePath) { id =>
async {
val versions = await(podSystem.versions(id).runWith(Sink.seq))
podSystem.find(id).fold(notFound(id)) { pod =>
withAuthorization(ViewRunSpec, pod) {
ok(Json.stringify(Json.toJson(versions)))
}
}
}
})
}
}
@GET
@Path("""{id:.+}::versions/{version}""")
def version(
@PathParam("id") id: String,
@PathParam("version") versionString: String,
@Context req: HttpServletRequest,
@Suspended asyncResponse: AsyncResponse
): Unit =
sendResponse(asyncResponse) {
async {
implicit val identity = await(authenticatedAsync(req))
import PathId._
val version = Timestamp(versionString)
await(withValidF(id.toAbsolutePath) { id =>
async {
await(podSystem.version(id, version)).fold(notFound(id)) { pod =>
withAuthorization(ViewRunSpec, pod) {
ok(Raml.toRaml(pod))
}
}
}
})
}
}
@GET
@Path("::status")
def allStatus(@Context req: HttpServletRequest, @Suspended asyncResponse: AsyncResponse): Unit =
sendResponse(asyncResponse) {
async {
implicit val identity = await(authenticatedAsync(req))
val ids = podSystem.ids()
val future = podStatusService.selectPodStatuses(ids, authzSelector)
ok(Json.stringify(Json.toJson(await(future))))
}
}
@DELETE
@Path("""{id:.+}::instances/{instanceId}""")
def killInstance(
@PathParam("id") idOrig: String,
@PathParam("instanceId") instanceId: String,
@DefaultValue("false") @QueryParam("wipe") wipe: Boolean,
@Context req: HttpServletRequest,
@Suspended asyncResponse: AsyncResponse
): Unit =
sendResponse(asyncResponse) {
async {
implicit val identity = await(authenticatedAsync(req))
import PathId._
import com.wix.accord.dsl._
implicit val validId: Validator[String] = validator[String] { ids =>
ids should matchRegexFully(Instance.Id.InstanceIdRegex)
}
// don't need to authorize as taskKiller will do so.
val id = idOrig.toAbsolutePath
validateOrThrow(id)
validateOrThrow(instanceId)
val parsedInstanceId = Instance.Id.fromIdString(instanceId)
val instances = await(taskKiller.kill(id, _.filter(_.instanceId == parsedInstanceId), wipe))
instances.headOption match {
case None => (unknownTask(instanceId))
case Some(instance) =>
val raml = Raml.toRaml(instance)
ok(Json.stringify(Json.toJson(raml)))
}
}
}
@DELETE
@Path("""{id:.+}::instances""")
def killInstances(
@PathParam("id") idOrig: String,
@DefaultValue("false") @QueryParam("wipe") wipe: Boolean,
body: Array[Byte],
@Context req: HttpServletRequest,
@Suspended asyncResponse: AsyncResponse
): Unit =
sendResponse(asyncResponse) {
async {
implicit val identity = await(authenticatedAsync(req))
import PathId._
import Validation._
import com.wix.accord.dsl._
implicit val validIds: Validator[Set[String]] = validator[Set[String]] { ids =>
ids is every(matchRegexFully(Instance.Id.InstanceIdRegex))
}
// don't need to authorize as taskKiller will do so.
val id = idOrig.toAbsolutePath
validateOrThrow(id)
val instancesToKill = Json.parse(body).as[Set[String]]
validateOrThrow(instancesToKill)
val instancesDesired = instancesToKill.map(Instance.Id.fromIdString(_))
def toKill(instances: Seq[Instance]): Seq[Instance] = {
instances.filter(instance => instancesDesired.contains(instance.instanceId))
}
val instances = await(taskKiller.kill(id, toKill, wipe)).map { instance => Raml.toRaml(instance) }
ok(instances)
}
}
private def notFound(id: PathId): Response = unknownPod(id)
}
object PodsResource {
def authzSelector(implicit authz: Authorizer, identity: Identity): PodSelector =
Selector[PodDefinition] { pod =>
authz.isAuthorized(identity, ViewRunSpec, pod)
}
}
| mesosphere/marathon | src/main/scala/mesosphere/marathon/api/v2/PodsResource.scala | Scala | apache-2.0 | 13,575 |
package com.eevolution.context.dictionary.infrastructure.repository
import com.eevolution.context.dictionary.domain.model.PackageImportProcess
import com.eevolution.context.dictionary.infrastructure.db.DbContext._
/**
* Copyright (C) 2003-2017, e-Evolution Consultants S.A. , http://www.e-evolution.com
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
* Email: emeris.hernandez@e-evolution.com, http://www.e-evolution.com , http://github.com/EmerisScala
* Created by emeris.hernandez@e-evolution.com , www.e-evolution.com on 26/10/17.
*/
/**
* Package Import Process Mapping
*/
trait PackageImportProcessMapping {
val queryPackageImportProcess = quote {
querySchema[PackageImportProcess]("AD_Package_Imp_Proc",
_.packageImportProcessId-> "AD_Package_Imp_Proc_ID",
_.tenantId-> "AD_Client_ID",
_.organizationId-> "AD_Org_ID",
_.isActive-> "IsActive",
_.created-> "Created",
_.createdBy-> "CreatedBy",
_.updated-> "Updated",
_.updatedBy-> "UpdatedBy",
_.overrideDict-> "AD_Override_Dict",
_.packageDir-> "AD_Package_Dir",
_.packageSource-> "AD_Package_Source",
_.packageSourceType-> "AD_Package_Source_Type",
_.processing-> "Processing",
_.uuid-> "UUID")
}
}
| adempiere/ADReactiveSystem | dictionary-impl/src/main/scala/com/eevolution/context/dictionary/infrastructure/repository/PackageImportProcessMapping.scala | Scala | gpl-3.0 | 1,866 |
/**
* Copyright (c) 2015 Orbeon, Inc. http://orbeon.com
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
package org.orbeon.oxf.json
import org.orbeon.oxf.util.ScalaUtils._
import org.orbeon.oxf.xml.SaxonUtils.makeNCName
import spray.json._
import scala.language.postfixOps
//
// Functions to convert JSON to XML following the XForms 2.0 specification.
//
protected trait JsonToXmlAlgorithm {
// Abstract members to keep the XML APIs completely separate
type XmlStream
def startElem (rcv: XmlStream, name: String): Unit
def endElem (rcv: XmlStream, name: String): Unit
def addAttribute(rcv: XmlStream, name: String, value: String): Unit
def text (rcv: XmlStream, value: String): Unit
// Convert a JSON AST to a stream of XML events
def jsonToXmlImpl(ast: JsValue, rcv: XmlStream): Unit = {
def escapeString(s: String) =
s.iterateCodePoints map { cp ⇒
if (cp <= 0x1F || cp == 0x7F)
cp + 0xE000
else
cp
} codePointsToString
def withElement[T](localName: String, atts: Seq[(String, String)] = Nil)(body: ⇒ T): T = {
startElem(rcv, localName)
atts foreach { case (name, value) ⇒ addAttribute(rcv, name, value) }
val result = body
endElem(rcv, localName)
result
}
def processValue(jsValue: JsValue): Unit =
jsValue match {
case JsString(v) ⇒
// Don't add `type="string"` since it's the default
text(rcv, escapeString(v))
case JsNumber(v) ⇒
addAttribute(rcv, Symbols.Type, Symbols.Number)
text(rcv, v.toString)
case JsBoolean(v) ⇒
addAttribute(rcv, Symbols.Type, Symbols.Boolean)
text(rcv, v.toString)
case JsNull ⇒
addAttribute(rcv, Symbols.Type, Symbols.Null)
case JsObject(fields) ⇒
addAttribute(rcv, Symbols.Type, Symbols.Object)
fields foreach { case (name, value) ⇒
val ncName = makeNCName(name, keepFirstIfPossible = true)
val nameAtt = ncName != name list (Symbols.Name → escapeString(name))
withElement(ncName, nameAtt) {
processValue(value)
}
}
case JsArray(arrayValues) ⇒
addAttribute(rcv, Symbols.Type, Symbols.Array)
arrayValues foreach { arrayValue ⇒
withElement(Symbols.Anonymous) {
processValue(arrayValue)
}
}
}
withElement(Symbols.JSON) {
processValue(ast)
}
}
} | joansmith/orbeon-forms | src/main/scala/org/orbeon/oxf/json/JsonToXmlAlgorithm.scala | Scala | lgpl-2.1 | 3,574 |
/***********************************************************************
* Copyright (c) 2013-2019 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.fs.tools
import java.io.File
import java.util
import java.util.ServiceLoader
import com.beust.jcommander.{IValueValidator, Parameter, ParameterException}
import org.locationtech.geomesa.fs.data.FileSystemDataStore
import org.locationtech.geomesa.fs.data.FileSystemDataStoreFactory.FileSystemDataStoreParams
import org.locationtech.geomesa.fs.storage.api.FileSystemStorageFactory
import org.locationtech.geomesa.fs.tools.FsDataStoreCommand.FsParams
import org.locationtech.geomesa.tools.utils.ParameterConverters.KeyValueConverter
import org.locationtech.geomesa.tools.{DataStoreCommand, DistributedCommand}
import org.locationtech.geomesa.utils.classpath.ClassPathUtils
import org.locationtech.geomesa.utils.io.PathUtils
/**
* Abstract class for FSDS commands
*/
trait FsDataStoreCommand extends DataStoreCommand[FileSystemDataStore] {
import scala.collection.JavaConverters._
override def params: FsParams
override def connection: Map[String, String] = {
val url = PathUtils.getUrl(params.path)
val builder = Map.newBuilder[String, String]
builder += (FileSystemDataStoreParams.PathParam.getName -> url.toString)
if (params.configuration != null && !params.configuration.isEmpty) {
val xml = FileSystemDataStoreParams.convertPropsToXml(params.configuration.asScala.mkString("\\n"))
builder += (FileSystemDataStoreParams.ConfigsParam.getName -> xml)
}
builder.result()
}
}
object FsDataStoreCommand {
import scala.collection.JavaConverters._
trait FsDistributedCommand extends FsDataStoreCommand with DistributedCommand {
abstract override def libjarsFiles: Seq[String] =
Seq("org/locationtech/geomesa/fs/tools/fs-libjars.list") ++ super.libjarsFiles
abstract override def libjarsPaths: Iterator[() => Seq[File]] = Iterator(
() => ClassPathUtils.getJarsFromEnvironment("GEOMESA_FS_HOME", "lib"),
() => ClassPathUtils.getJarsFromClasspath(classOf[FileSystemDataStore])
) ++ super.libjarsPaths
}
trait FsParams {
@Parameter(names = Array("--path", "-p"), description = "Path to root of filesystem datastore", required = true)
var path: String = _
@Parameter(names = Array("--config"), description = "Configuration properties, in the form k=v", variableArity = true)
var configuration: java.util.List[String] = _
}
trait PartitionParam {
@Parameter(names = Array("--partitions"), description = "Partitions to operate on (if empty all partitions will be used)", variableArity = true)
var partitions: java.util.List[String] = new util.ArrayList[String]()
}
trait OptionalEncodingParam {
@Parameter(names = Array("--encoding", "-e"), description = "Encoding (parquet, orc, converter, etc)", validateValueWith = classOf[EncodingValidator])
var encoding: String = _
}
trait OptionalSchemeParams {
@Parameter(names = Array("--partition-scheme"), description = "PartitionScheme typesafe config string or file")
var scheme: java.lang.String = _
@Parameter(names = Array("--leaf-storage"), description = "Use Leaf Storage for Partition Scheme", arity = 1)
var leafStorage: java.lang.Boolean = true
@Parameter(names = Array("--storage-opt"), variableArity = true, description = "Additional storage opts (k=v)", converter = classOf[KeyValueConverter])
var storageOpts: java.util.List[(String, String)] = new java.util.ArrayList[(String, String)]()
}
class EncodingValidator extends IValueValidator[String] {
override def validate(name: String, value: String): Unit = {
val encodings = ServiceLoader.load(classOf[FileSystemStorageFactory]).asScala.map(_.encoding).toList
if (!encodings.exists(_.equalsIgnoreCase(value))) {
throw new ParameterException(s"$value is not a valid encoding for parameter $name." +
s"Available encodings are: ${encodings.mkString(", ")}")
}
}
}
}
| elahrvivaz/geomesa | geomesa-fs/geomesa-fs-tools/src/main/scala/org/locationtech/geomesa/fs/tools/FsDataStoreCommand.scala | Scala | apache-2.0 | 4,379 |
package fi.veikkaus.dcontext
import java.io.Closeable
/**
* Created by arau on 28.6.2016.
*/
trait DContext extends java.io.Closeable {
def system = get[DSystem](DContext.systemId)
def get[T](name:String) : Option[T]
def getType(name:String) : Option[Class[_]]
def keySet : scala.collection.Set[String]
def apply[T](name:String) = get[T](name).get
def ++(other:DContext) = DContext.mask(this, other)
def ++(other:MutableDContext) = DContext.mask(this, other)
}
object DContext {
def systemId = ".system"
def empty = new DContext {
override def getType(name: String) = None
override def get[T](name: String) = None
override def keySet: Set[String] = Set.empty
override def apply[T](name: String): T = throw new NoSuchElementException(name)
override def close() = Unit
}
def apply(ctx : Map[String, Any]) = new ImmutableDContext(ctx)
def apply(ctx : (String, Any)* ) = new ImmutableDContext(ctx.toMap)
def mask(masked:DContext, mask:DContext) = new DContext {
private def orElse[T](a:Option[T], b: => Option[T]) = {
a match {
case Some(v) => Some(v)
case None => b
}
}
override def getType(name: String) = {
orElse(mask.getType(name), masked.getType(name))
}
override def get[T](name: String) = {
orElse(mask.get(name), masked.get(name))
}
override def keySet: Set[String] = {
mask.keySet.toSet ++ masked.keySet
}
override def close() = {
mask.close()
masked.close()
}
}
def mask(masked:DContext, mask:MutableDContext) = new MutableDContext {
private def orElse[T](a:Option[T], b: => Option[T]) = {
a match {
case Some(v) => Some(v)
case None => b
}
}
override def getType(name: String) = {
orElse(mask.getType(name), masked.getType(name))
}
override def get[T](name: String) : Option[T] = {
orElse(mask.get(name), masked.get(name))
}
override def keySet: Set[String] = {
mask.keySet.toSet ++ masked.keySet
}
override def close() = {
mask.close()
masked.close()
}
override def remove(key:String): Unit = {
mask.remove(key)
}
override def put (key: String, value: Any, closer: Option[Closeable]): Unit = {
mask.put(key, value, closer)
}
}
}
| VeikkausOy/dcontext | dcontext/src/main/scala/fi/veikkaus/dcontext/DContext.scala | Scala | bsd-3-clause | 2,329 |
/*
* Scala (https://www.scala-lang.org)
*
* Copyright EPFL and Lightbend, Inc.
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package scala.compat.java8.issue247
import scala.compat.java8.FunctionConverters._
import java.util.function.IntFunction
object Main {
def invoke(jfun: IntFunction[String]): String = jfun(2)
def main(args: Array[String]): Unit = {
val sfun = (i: Int) => s"ret: $i"
val ret = invoke(sfun.asJava)
assert(ret == "ret: 2")
println(s"OK. $ret")
}
}
| scala/scala-java8-compat | src/test/scala/scala/compat/java8/issue247/Main.scala | Scala | apache-2.0 | 659 |
package no.vestein.webapp.eventhandler
trait EventListener[T <: Event] {
def invoke(event: T)
}
| WoodStone/PurpleRain-ScalaJS | src/main/scala/no/vestein/webapp/eventhandler/EventListener.scala | Scala | gpl-3.0 | 101 |
package net.sansa_stack.rdf.spark.qualityassessment.metrics.licensing
import net.sansa_stack.rdf.common.qualityassessment.utils.NodeUtils._
import org.apache.jena.graph.Triple
import org.apache.spark.rdd.RDD
/**
* @author Gezim Sejdiu
*/
object HumanReadableLicense {
/**
* Human -readable indication of a license
* This metric checks whether a human-readable text, stating the of licensing model
* attributed to the resource, has been provided as part of the dataset.
* It looks for objects containing literal values and analyzes the text searching for key, licensing related terms.
*/
def assessHumanReadableLicense(dataset: RDD[Triple]): Double = {
val hasValidLicense = dataset.filter { f =>
f.getSubject.isURI() && hasLicenceIndications(f.getPredicate) &&
f.getObject.isLiteral() && isLicenseStatement(f.getObject)
}
if (hasValidLicense.count() > 0) 1.0 else 0.0
}
}
| SANSA-Stack/SANSA-RDF | sansa-rdf/sansa-rdf-spark/src/main/scala/net/sansa_stack/rdf/spark/qualityassessment/metrics/licensing/HumanReadableLicense.scala | Scala | apache-2.0 | 927 |
package com.nabijaczleweli.minecrasmer.item
import java.util
import com.nabijaczleweli.minecrasmer.creativetab.CreativeTabMineCrASMer
import com.nabijaczleweli.minecrasmer.entity.{EntityItemCleaner, EntityItemShredder}
import com.nabijaczleweli.minecrasmer.reference.{Container, Reference}
import com.nabijaczleweli.minecrasmer.resource.{MineCrASMerLocation, ReloadableString, ReloadableStrings, ResourcesReloadedEvent}
import com.nabijaczleweli.minecrasmer.util.{IMultiModelItem, IOreDictRegisterable}
import net.minecraft.client.Minecraft
import net.minecraft.client.resources.model.{ModelBakery, ModelResourceLocation}
import net.minecraft.creativetab.CreativeTabs
import net.minecraft.entity.Entity
import net.minecraft.entity.item.EntityItem
import net.minecraft.item.{Item, ItemStack}
import net.minecraft.util.MathHelper
import net.minecraft.world.World
import net.minecraftforge.fml.common.eventhandler.SubscribeEvent
import net.minecraftforge.fml.relauncher.{Side, SideOnly}
import net.minecraftforge.oredict.OreDictionary
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
object ItemQuartz extends Item with IMultiModelItem with IOreDictRegisterable {
Container.eventBus register this
private val subIconNames = Array("plate%s", "shards%s", "cleanshards%s")
private val subNameNames = Array("plate", "shards", "cleanshards")
private val subOreDictNames = Array("plateQuartz", "shardsQuartz", null)
@SideOnly(Side.CLIENT)
private lazy val localizedNames =
new ReloadableStrings(Future({subIconNames.indices map {idx => new ReloadableString(s"${super.getUnlocalizedName}.${subNameNames(idx)}.name")}}.toList))
val plateDamage = 0
val shardsDamage = 1
val cleanShardsDamage = 2
setUnlocalizedName(Reference.NAMESPACED_PREFIX + "quartz")
setCreativeTab(CreativeTabMineCrASMer)
setHasSubtypes(true)
override def getMaxDamage =
0
override def getItemStackDisplayName(is: ItemStack) =
localizedNames(MathHelper.clamp_int(is.getItemDamage, 0, localizedNames.length))
@SideOnly(Side.CLIENT)
override def getSubItems(item: Item, tab: CreativeTabs, list: util.List[ItemStack]) =
if(item.isInstanceOf[this.type])
for(i <- 0 until localizedNames.length)
list.asInstanceOf[util.List[ItemStack]] add new ItemStack(item, 1, i)
override def createEntity(world: World, location: Entity, itemstack: ItemStack) = {
val source = location.asInstanceOf[EntityItem]
var ent: EntityItem = null
source.getEntityItem.getItemDamage match {
case `plateDamage` =>
ent = new EntityItemShredder(world, itemstack)
case `shardsDamage` =>
ent = new EntityItemCleaner(world, itemstack)
}
ent copyDataFromOld source
ent
}
override def hasCustomEntity(stack: ItemStack) =
stack.getItemDamage == plateDamage || stack.getItemDamage == shardsDamage
override def getUnlocalizedName(stack: ItemStack) =
"item." + Reference.NAMESPACED_PREFIX + '.' + subNameNames(stack.getMetadata)
@SubscribeEvent
@SideOnly(Side.CLIENT)
def onResourcesReloaded(event: ResourcesReloadedEvent) {
localizedNames.reload()
}
override def registerOreDict() {
val is = new ItemStack(this)
for(i <- plateDamage to shardsDamage) {
is setItemDamage i
OreDictionary.registerOre(oreDictName(i), is)
}
}
def oreDictName(dmg: Int) =
subOreDictNames(MathHelper.clamp_int(dmg, 0, subOreDictNames.length - 1))
def tier(is: ItemStack) = {
var temp = -1
for(name <- OreDictionary getOreIDs is map {OreDictionary.getOreName} if temp == -1)
temp = subOreDictNames indexOf name
temp + 1
}
@SideOnly(Side.CLIENT)
override def registerModels() {
for(i <- plateDamage to cleanShardsDamage) {
Minecraft.getMinecraft.getRenderItem.getItemModelMesher.register(this, i, new ModelResourceLocation(Reference.NAMESPACED_PREFIX + subIconNames(i).format("_quartz"),
"inventory"))
ModelBakery.registerItemVariants(this, MineCrASMerLocation(subIconNames(i).format("_quartz")))
}
}
}
| nabijaczleweli/ASMifier | src/main/scala/com/nabijaczleweli/minecrasmer/item/ItemQuartz.scala | Scala | mit | 4,080 |
import org.scalatest.junit.AssertionsForJUnit
import scala.collection.mutable.ListBuffer
import org.junit.Assert._
import org.junit.Test
import org.junit.Before
import Chisel._
class MyFloat extends Bundle {
val sign = Bool()
val exponent = Bits(width = 8)
val significand = Bits(width = 23)
}
class MyFPUnit extends Component {
val io = new MyFloat()
}
class ExampleSuite extends AssertionsForJUnit {
@Test def verifyEasy() {
// Uses JUnit-style assertions, we are only interested
// in generating code coverage for now.
chiselMain(Array[String](), () => new MyFPUnit())
assertTrue(true)
}
}
| seyedmaysamlavasani/GorillaPP | chisel/chisel/src/test/scala/Example.scala | Scala | bsd-3-clause | 621 |
import backend.flowTypes.{WordObject, TwitterMessage, Sentiment, FlowObject}
import org.specs2.mutable._
import org.specs2.runner._
import org.junit.runner._
class FlowTest extends FlowObject {
/** Retrieves the content of the given field. None if invalid field */
override def content(field: String): Option[Any] = field match {
case "foo" | "default" => Some(foo)
case "bar" => Some(bar)
case "buz" => Some(buz)
case "biz" => Some(biz)
case _ => None
}
override def fields(): List[String] = List("foo", "bar", "buz", "biz", "default")
val foo: Int = 5
val bar: String = "20"
val buz: Double = 4.2
val biz: String = "biz"
override val uid: Long = 0
override val originUid: Long = 1
}
@RunWith(classOf[JUnitRunner])
class MessageSpec extends Specification {
"FlowObject test class FlowTest" should {
val o = new FlowTest
"have proper conversions for int" in {
o.contentAsInt() must beSome(5)
o.contentAsDouble() must beSome(5.0)
o.contentAsString() must beSome("5")
}
"have proper conversions for string" in {
o.contentAsInt("bar") must beSome(20)
o.contentAsDouble("bar") must beSome(20.0)
o.contentAsString("bar") must beSome("20")
}
"have proper conversions for double" in {
o.contentAsInt("buz") must beSome(4)
o.contentAsDouble("buz") must beSome(4.2)
o.contentAsString("buz") must beSome("4.2")
}
"have failure for invalid conversions" in {
o.contentAsInt("biz") must beNone
o.contentAsDouble("biz") must beNone
o.contentAsString("doesnotexist") must beNone
}
}
"TwitterMessage" should {
val msg = TwitterMessage(0, "Bernd")
"have 'message' field" in {
msg.fields() must contain("message")
msg.contentAsString("message") must beSome(msg.message)
}
"fail arbitrary conversions" in {
msg.contentAsInt("message") must beNone
}
"not have random field" in {
msg.contentAsString("doesNotExist") must beNone
}
"default to 'message'" in {
msg.contentAsString() must beSome(msg.message)
msg.fields() must contain("default")
}
}
"Word" should {
val msg = WordObject(0, 1, "Foo")
"have 'word' field" in {
msg.fields() must contain("word")
msg.contentAsString("word") must beSome(msg.word)
}
"have predecessor" in {
msg.originUid must beEqualTo(1)
WordObject(10, msg, "Blub").originUid must beEqualTo(msg.uid)
}
}
"Sentiment" should {
val msg = Sentiment(0, 1, 5,4)
"have 'sentiment' default field" in {
msg.fields() must contain("sentiment")
msg.content() must beEqualTo(msg.content("sentiment"))
msg.contentAsDouble() must beSome(1)
}
"have predecessor" in {
msg.originUid must beEqualTo(1)
Sentiment(10, msg, 23, 22).originUid must beEqualTo(msg.uid)
}
}
}
| hacst/reactiveStreamPlay | test/MessageSpec.scala | Scala | bsd-3-clause | 2,905 |
package test;
import java.lang.{System => S}
object test {
import S.out.{print => p, println => print}
val foo = 1;
p("hello"); print("world"); S.out.println("!");
S.out.flush();
}
object test1 {
import test._;
foo
}
| yusuke2255/dotty | tests/pending/pos/imports-pos.scala | Scala | bsd-3-clause | 233 |
package controllers
import dao.{AuthorityDao, ReportCardEntryTypeDao}
import database.{ReportCardEntryTypeDb, ReportCardEntryTypeTable}
import models.{ReportCardEntryType, ReportCardEntryTypeProtocol}
import play.api.libs.json.{Json, Reads, Writes}
import play.api.mvc.ControllerComponents
import security.LWMRole.{CourseAssistant, CourseEmployee, CourseManager, God}
import security.SecurityActionChain
import java.util.UUID
import javax.inject.{Inject, Singleton}
import scala.concurrent.{ExecutionContext, Future}
@Singleton
class ReportCardEntryTypeController @Inject()(
cc: ControllerComponents,
val authorityDao: AuthorityDao,
val abstractDao: ReportCardEntryTypeDao,
val securedAction: SecurityActionChain,
implicit val ctx: ExecutionContext
) extends AbstractCRUDController[ReportCardEntryTypeProtocol, ReportCardEntryTypeTable, ReportCardEntryTypeDb, ReportCardEntryType](cc) {
import logger.AccessLoggingAction.log
case class BatchUpdateRequest(users: List[UUID], assignmentEntry: UUID, entryType: String, bool: Option[Boolean], int: Option[Int])
private def batchReads: Reads[BatchUpdateRequest] = Json.reads[BatchUpdateRequest]
override protected implicit val writes: Writes[ReportCardEntryType] = ReportCardEntryType.writes
override protected implicit val reads: Reads[ReportCardEntryTypeProtocol] = ReportCardEntryTypeProtocol.reads
def updateFrom(course: String, id: String) = restrictedContext(course)(Update) asyncAction log { request =>
(for {
uuid <- id.uuidF
protocol <- Future.fromTry(parseJson(request))
_ <- abstractDao.updateFields(uuid, protocol.bool, protocol.int)
} yield ReportCardEntryType(
protocol.entryType,
protocol.bool,
protocol.int,
uuid
)).jsonResult
}
def batchUpdate(course: String, labwork: String) = restrictedContext(course)(Update) asyncAction { request =>
for {
lid <- labwork.uuidF
p <- Future.fromTry(parseJson(request)(batchReads))
res <- if (p.bool.isDefined)
abstractDao.updateFields(p.users, p.assignmentEntry, lid, p.entryType, p.bool.get)
else if (p.int.isDefined)
abstractDao.updateFields(p.users, p.assignmentEntry, lid, p.entryType, p.int.get)
else
Future.failed(new Throwable("either bool or int must be set"))
} yield Created(Json.toJson(res))
}
override protected def toDbModel(protocol: ReportCardEntryTypeProtocol, existingId: Option[UUID]): ReportCardEntryTypeDb = ???
override protected def restrictedContext(restrictionId: String): PartialFunction[Rule, SecureContext] = {
case Update => SecureBlock(restrictionId, List(CourseManager, CourseEmployee, CourseAssistant))
case _ => PartialSecureBlock(List(God))
}
override protected def contextFrom: PartialFunction[Rule, SecureContext] =
forbiddenAction()
}
| THK-ADV/lwm-reloaded | app/controllers/ReportCardEntryTypeController.scala | Scala | mit | 2,846 |
// Copyright (c) 2014 David Miguel Antunes <davidmiguel {at} antunes.net>
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package com.github.david04.liftutils.modtbl
import scala.xml.NodeSeq
import net.liftweb.util.Helpers
import net.liftweb.util.Helpers._
import net.liftweb.http.js.JsCmds.Run
trait ClientSideSearchableTable extends Table {
override def keepClasses = "modtbl-search-around" :: super.keepClasses
override protected def rowTransforms(row: R, rowId: String, rowIdx: Int): NodeSeq => NodeSeq =
super.rowTransforms(row, rowId, rowIdx) andThen
"tr [class+]" #> "modtbl-searchable"
override protected def pageTransforms(): NodeSeq => NodeSeq =
super.pageTransforms() andThen
".modtbl-search" #> {
val inputId = Helpers.nextFuncName
".modtbl-search [id]" #> inputId &
".modtbl-search [onkeyup]" #>
Run("" +
"(function(){" +
" var query = $('#" + inputId + "').val();" +
" $('#" + id('table) + " tbody tr')" +
" .each(function(){" +
" if($(this).text().toLowerCase().indexOf(query) == -1) $(this).hide();" +
" else $(this).show();" +
" });" +
"})()")
}
}
| david04/liftutils | src/main/scala/com/github/david04/liftutils/modtbl/ClientSideSearchableTable.scala | Scala | mit | 2,316 |
package org.jetbrains.plugins.scala
package codeInspection.collections
import org.jetbrains.plugins.scala.codeInspection.InspectionBundle
import org.jetbrains.plugins.scala.lang.psi.api.expr.ScExpression
/**
* Nikolay.Tropin
* 2014-05-05
*/
class FilterHeadOptionInspection extends OperationOnCollectionInspection {
override def possibleSimplificationTypes: Array[SimplificationType] =
Array(FilterHeadOption)
}
object FilterHeadOption extends SimplificationType {
def hint = InspectionBundle.message("filter.headOption.hint")
override def getSimplification(expr: ScExpression): Option[Simplification] = {
expr match {
case qual`.filter`(cond)`.headOption`() if !hasSideEffects(cond) =>
Some(replace(expr).withText(invocationText(qual, "find", cond)).highlightFrom(qual))
case _ => None
}
}
}
| triggerNZ/intellij-scala | src/org/jetbrains/plugins/scala/codeInspection/collections/FilterHeadOptionInspection.scala | Scala | apache-2.0 | 841 |
package controllers
import java.util.concurrent.TimeUnit
import org.specs2.mutable._
import play.api.libs.json._
import play.api.test.Helpers._
import play.api.test._
import scala.concurrent._
import scala.concurrent.duration._
/**
* You can mock out a whole application including requests, plugins etc.
* For more information, consult the wiki.
*/
class TasksIT extends Specification {
val timeout: FiniteDuration = FiniteDuration(5, TimeUnit.SECONDS)
val tasksetJson: JsValue = Json.parse(
"""
{
"uuid": "testuuid",
"tasks": [
{
"s": "task1s",
"p": "task1p",
"o": "task1o"
},
{
"s": "task2s",
"p": "task2p",
"o": "task2o"
}
]
}
""")
"Tasks API" should {
"Create Tasks from a JSON-LD body" in {
running(FakeApplication()) {
val request =
FakeRequest.apply(PUT, "/tasks").withTextBody(tasksetJson)
val response = route(request)
response.isDefined mustEqual true
val result = Await.result(response.get, timeout)
result.header.status must equalTo(CREATED)
}
}
"Create a Tasks from a JSON-LD body" in {
running(FakeApplication()) {
val request =
FakeRequest.apply(POST, "/tasksets").withJsonBody(tasksetJson)
val response = route(request)
response.isDefined mustEqual true
val result = Await.result(response.get, timeout)
result.header.status must equalTo(CREATED)
}
}
// "fail inserting a non valid json" in {
// running(FakeApplication()) {
// val request = FakeRequest.apply(POST, "/user").withJsonBody(Json.obj(
// "firstName" -> 98,
// "lastName" -> "London",
// "age" -> 27))
// val response = route(request)
// response.isDefined mustEqual true
// val result = Await.result(response.get, timeout)
// contentAsString(response.get) mustEqual "invalid json"
// result.header.status mustEqual BAD_REQUEST
// }
// }
//
// "update a valid json" in {
// running(FakeApplication()) {
// val request = FakeRequest.apply(PUT, "/user/Jack/London").withJsonBody(Json.obj(
// "firstName" -> "Jack",
// "lastName" -> "London",
// "age" -> 27,
// "active" -> true))
// val response = route(request)
// response.isDefined mustEqual true
// val result = Await.result(response.get, timeout)
// result.header.status must equalTo(CREATED)
// }
// }
//
// "fail updating a non valid json" in {
// running(FakeApplication()) {
// val request = FakeRequest.apply(PUT, "/user/Jack/London").withJsonBody(Json.obj(
// "firstName" -> "Jack",
// "lastName" -> "London",
// "age" -> 27))
// val response = route(request)
// response.isDefined mustEqual true
// val result = Await.result(response.get, timeout)
// contentAsString(response.get) mustEqual "invalid json"
// result.header.status mustEqual BAD_REQUEST
// }
// }
}
}
| BonarBeavis/veritask | test/controllers/TasksIT.scala | Scala | apache-2.0 | 3,374 |
/*
* Copyright 2014–2018 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.qscript.qsu
import quasar.Planner.PlannerError
import quasar.{Qspec, TreeMatchers}
import quasar.ejson.EJson
import quasar.fp._
import quasar.qscript.{construction, Hole, ExcludeId, OnUndefined, SrcHole}
import quasar.qscript.qsu.{QScriptUniform => QSU}
import slamdata.Predef.{Map => _, _}
import matryoshka._
import matryoshka.data._
import org.specs2.matcher.{Expectable, MatchResult, Matcher}
import pathy.Path._
import scalaz.{\/, EitherT, Need, StateT}
import scalaz.syntax.applicative._
import scalaz.syntax.either._
import scalaz.syntax.show._
import Fix._
import QSU.Rotation
import QSUGraph.Extractors._
object eshSpec extends Qspec with QSUTTypes[Fix] with TreeMatchers {
type QSU[A] = QScriptUniform[A]
val qsu = QScriptUniform.DslT[Fix]
val func = construction.Func[Fix]
val recFunc = construction.RecFunc[Fix]
type F[A] = EitherT[StateT[Need, Long, ?], PlannerError, A]
val hole: Hole = SrcHole
def index(i: Int): FreeMapA[QAccess[Hole] \/ Int] =
i.right[QAccess[Hole]].pure[FreeMapA]
"convert singly nested LeftShift/ThetaJoin" in {
val dataset = qsu.leftShift(
qsu.read(rootDir </> file("dataset")),
recFunc.Hole,
ExcludeId,
OnUndefined.Omit,
func.RightTarget,
Rotation.ShiftArray)
val multiShift = QSUGraph.fromTree(qsu.multiLeftShift(
dataset,
List(
(func.ProjectKeyS(func.Hole, "foo"), ExcludeId, Rotation.ShiftArray),
(func.ProjectKeyS(func.Hole, "bar"), ExcludeId, Rotation.ShiftArray)
),
OnUndefined.Omit,
func.Add(index(0), index(1))
))
multiShift must expandTo {
case qg@Map(
LeftShift(
LeftShift(
LeftShift(
Read(afile),
shiftedReadStruct,
ExcludeId,
OnUndefined.Omit,
shiftedReadRepair,
Rotation.ShiftArray
),
projectFoo,
ExcludeId,
OnUndefined.Emit,
innerRepair,
Rotation.ShiftArray
),
projectBar,
ExcludeId,
OnUndefined.Emit,
outerRepair,
Rotation.ShiftArray
),
fm
) =>
fm must beTreeEqual(
func.Add(
func.ProjectKeyS(func.Hole, "0"),
func.ProjectKeyS(func.Hole, "1")))
projectBar.linearize must beTreeEqual(
func.ProjectKeyS(func.ProjectKeyS(func.Hole, "original"), "bar")
)
projectFoo.linearize must beTreeEqual(
func.ProjectKeyS(func.Hole, "foo")
)
shiftedReadStruct.linearize must beTreeEqual(
func.Hole
)
shiftedReadRepair must beTreeEqual(
func.RightTarget
)
innerRepair must beTreeEqual(
func.StaticMapS(
"original" -> func.AccessLeftTarget(Access.valueHole(_)),
"0" -> func.RightTarget)
)
outerRepair must beTreeEqual(
func.Cond(
func.Eq(
func.AccessLeftTarget(Access.id(IdAccess.identity[Fix[EJson]]('esh0), _)),
func.AccessLeftTarget(Access.id(IdAccess.identity[Fix[EJson]]('esh1), _))),
func.StaticMapS(
"original" ->
func.ProjectKeyS(func.AccessLeftTarget(Access.valueHole(_)), "original"),
"0" ->
func.ProjectKeyS(func.AccessLeftTarget(Access.valueHole(_)), "0"),
"1" -> func.RightTarget),
func.Undefined
)
)
ok
}
ok
}
"convert singly nested LeftShift/ThetaJoin with onUndefined = OnUndefined.Emit" in {
val dataset = qsu.leftShift(
qsu.read(rootDir </> file("dataset")),
recFunc.Hole,
ExcludeId,
OnUndefined.Omit,
func.RightTarget,
Rotation.ShiftArray)
val multiShift = QSUGraph.fromTree(qsu.multiLeftShift(
dataset,
List(
(func.ProjectKeyS(func.Hole, "foo"), ExcludeId, Rotation.ShiftArray),
(func.ProjectKeyS(func.Hole, "bar"), ExcludeId, Rotation.ShiftArray)
),
OnUndefined.Emit,
func.Add(index(0), index(1))
))
multiShift must expandTo {
case qg@Map(
LeftShift(
LeftShift(
LeftShift(
Read(afile),
shiftedReadStruct,
ExcludeId,
OnUndefined.Omit,
shiftedReadRepair,
Rotation.ShiftArray
),
projectFoo,
ExcludeId,
OnUndefined.Emit,
innerRepair,
Rotation.ShiftArray
),
projectBar,
ExcludeId,
OnUndefined.Emit,
outerRepair,
Rotation.ShiftArray
),
fm
) => ok
}
ok
}
"convert doubly nested LeftShift/ThetaJoin" in {
val dataset = qsu.leftShift(
qsu.read(rootDir </> file("dataset")),
recFunc.Hole,
ExcludeId,
OnUndefined.Omit,
func.RightTarget,
Rotation.ShiftArray)
val multiShift = QSUGraph.fromTree(qsu.multiLeftShift(
dataset,
List(
(func.ProjectKeyS(func.Hole, "foo"), ExcludeId, Rotation.ShiftArray),
(func.ProjectKeyS(func.Hole, "bar"), ExcludeId, Rotation.ShiftArray),
(func.ProjectKeyS(func.Hole, "baz"), ExcludeId, Rotation.ShiftArray)
),
OnUndefined.Omit,
func.Subtract(func.Add(index(0), index(1)), index(2))
))
multiShift must expandTo {
case qg @ Map(
LeftShift(
LeftShift(
LeftShift(
LeftShift(
Read(afile),
shiftedReadStruct,
ExcludeId,
OnUndefined.Omit,
shiftedReadRepair,
Rotation.ShiftArray
),
projectFoo,
ExcludeId,
OnUndefined.Emit,
innermostRepair,
Rotation.ShiftArray
),
projectBar,
ExcludeId,
OnUndefined.Emit,
innerRepair,
Rotation.ShiftArray
),
projectBaz,
ExcludeId,
OnUndefined.Emit,
outerRepair,
Rotation.ShiftArray
),
fm
) =>
fm must beTreeEqual(
func.Subtract(
func.Add(
func.ProjectKeyS(func.Hole, "0"),
func.ProjectKeyS(func.Hole, "1")),
func.ProjectKeyS(func.Hole, "2")))
projectBaz.linearize must beTreeEqual(
func.ProjectKeyS(func.ProjectKeyS(func.Hole, "original"), "baz")
)
projectBar.linearize must beTreeEqual(
func.ProjectKeyS(func.ProjectKeyS(func.Hole, "original"), "bar")
)
projectFoo.linearize must beTreeEqual(
func.ProjectKeyS(func.Hole, "foo")
)
shiftedReadStruct.linearize must beTreeEqual(
func.Hole
)
shiftedReadRepair must beTreeEqual(
func.RightTarget
)
innermostRepair must beTreeEqual(
func.StaticMapS(
"original" -> func.AccessLeftTarget(Access.valueHole(_)),
"0" -> func.RightTarget)
)
innerRepair must beTreeEqual(
func.Cond(
func.Eq(
func.AccessLeftTarget(Access.id(IdAccess.identity[Fix[EJson]]('esh0), _)),
func.AccessLeftTarget(Access.id(IdAccess.identity[Fix[EJson]]('esh1), _))),
func.StaticMapS(
"original" ->
func.ProjectKeyS(func.AccessLeftTarget(Access.valueHole(_)), "original"),
"0" ->
func.ProjectKeyS(func.AccessLeftTarget(Access.valueHole(_)), "0"),
"1" -> func.RightTarget),
func.Undefined))
outerRepair must beTreeEqual(
func.Cond(
func.Eq(
func.AccessLeftTarget(Access.id(IdAccess.identity[Fix[EJson]]('esh1), _)),
func.AccessLeftTarget(Access.id(IdAccess.identity[Fix[EJson]]('esh2), _))),
func.StaticMapS(
"original" ->
func.ProjectKeyS(func.AccessLeftTarget(Access.valueHole(_)), "original"),
"0" ->
func.ProjectKeyS(func.AccessLeftTarget(Access.valueHole(_)), "0"),
"1" ->
func.ProjectKeyS(func.AccessLeftTarget(Access.valueHole(_)), "1"),
"2" -> func.RightTarget),
func.Undefined
))
ok
}
ok
}
def expandTo(pf: PartialFunction[QSUGraph, MatchResult[_]]): Matcher[QSUGraph] =
new Matcher[QSUGraph] {
def apply[S <: QSUGraph](s: Expectable[S]): MatchResult[S] = {
val actual =
ApplyProvenance[Fix, F](s.value).flatMap(
ExpandShifts[Fix, F]
).run.eval(0).value
val mapped = actual.toOption.flatMap(aq => pf.lift(aq.graph)) map { r =>
result(
r.isSuccess,
s.description + " is correct: " + r.message,
s.description + " is incorrect: " + r.message,
s)
}
// TODO Show[QSUGraph[Fix]]
mapped.getOrElse(
failure(s"${actual.shows} did not match expected pattern", s))
}
}
}
| jedesah/Quasar | connector/src/test/scala/quasar/qscript/qsu/ExpandShiftsSpec.scala | Scala | apache-2.0 | 9,871 |
package generator.graphql
import generator.graphql.helpers.TestHelpers
import io.apibuilder.builders.ApiBuilderServiceBuilders
import io.apibuilder.generator.v0.models.InvocationForm
import org.scalatest.matchers.must.Matchers
import org.scalatest.wordspec.AnyWordSpec
class GraphQLSchemaGeneratorSpec extends AnyWordSpec with Matchers
with ApiBuilderServiceBuilders
with TestHelpers
{
"Services with no types" in {
val s = makeService()
rightOrErrors(GraphQLSchemaGenerator.invoke(InvocationForm(s))).map(_.name) must equal(
Seq("schema.graphql")
)
}
}
| mbryzek/apidoc-generator | graphql-generator/src/test/scala/generator/graphql/GraphQLSchemaGeneratorSpec.scala | Scala | mit | 583 |
package de.dfki.cps.specific.sysml
import scala.util.parsing.input.Positional
trait MultiplicityElement {
val multiplicity: Multiplicity
}
case class Multiplicity(
lower: BigInt,
upper: UnlimitedNatural
) extends Positional {
/** @see UML Spec 15-03-01 - Table 7.1 *
def collectionType =
( isOrdered, isUnique) match {
case ( false , true ) => de.dfki.cps.specific.ocl.Types.SetType
case ( true , true ) => de.dfki.cps.specific.ocl.Types.OrderedSetType
case ( false , false ) => de.dfki.cps.specific.ocl.Types.BagType
case ( true , false ) => de.dfki.cps.specific.ocl.Types.SequenceType
}*/
def boundsString = (lower,upper) match {
case (x,UnlimitedNatural.Infinity) if x == 0 => Some("*")
case (n,UnlimitedNatural.Infinity) => Some(s"$n..*")
case (n,UnlimitedNatural.Finite(m)) if (n == m) => Some(s"$n")
case (n,m) => Some(s"$n..$m")
}
/*def designatorString = {
val ordered = if (isOrdered) Some("ordered") else None
val nonunique = if (!isUnique) Some("nonunique") else None
ordered.orElse(nonunique).map { _ =>
(ordered ++ nonunique).mkString(", ")
}.map(i => s" { $i }")
}*/
override def toString = boundsString.map(x => s"[$x]").mkString
} | DFKI-CPS/specific-sysml | src/main/scala/de/dfki/cps/specific/sysml/Multiplicity.scala | Scala | mit | 1,261 |
package com.outr.arango.api.model
import io.circe.Json
case class GeneralGraphListVertexHttpExamplesRc404(error: Boolean,
code: Option[Int] = None,
errorMessage: Option[String] = None,
errorNum: Option[Int] = None) | outr/arangodb-scala | api/src/main/scala/com/outr/arango/api/model/GeneralGraphListVertexHttpExamplesRc404.scala | Scala | mit | 370 |
package models.storage
import models.storage.event.move.MoveObject
import no.uio.musit.formatters.WithDateTimeFormatters
import no.uio.musit.models.ObjectTypes.ObjectType
import no.uio.musit.models.{ActorId, NamedPathElement, NodePath, ObjectUUID}
import org.joda.time.DateTime
import play.api.libs.json.{Json, Writes}
case class LocationHistory(
registeredBy: ActorId,
registeredDate: DateTime,
doneBy: Option[ActorId],
doneDate: Option[DateTime],
id: ObjectUUID,
objectType: ObjectType,
from: FacilityLocation,
to: FacilityLocation
)
object LocationHistory extends WithDateTimeFormatters {
def fromMoveObject(
moveObject: MoveObject,
from: FacilityLocation,
to: FacilityLocation
): LocationHistory = {
LocationHistory(
// registered by and date is required on Event, so they must be there.
registeredBy = moveObject.registeredBy.get,
registeredDate = moveObject.registeredDate.get,
doneBy = moveObject.doneBy,
doneDate = moveObject.doneDate,
id = moveObject.affectedThing.get,
objectType = moveObject.objectType,
from = from,
to = to
)
}
implicit val writes: Writes[LocationHistory] = Json.writes[LocationHistory]
}
// TODO: DELETE ME when Migration is performed in production
case class LocationHistory_Old(
registeredBy: ActorId,
registeredDate: DateTime,
doneBy: Option[ActorId],
doneDate: DateTime,
id: Long,
objectType: ObjectType,
from: FacilityLocation,
to: FacilityLocation
)
object LocationHistory_Old extends WithDateTimeFormatters {
implicit val writes: Writes[LocationHistory_Old] = Json.writes[LocationHistory_Old]
}
case class FacilityLocation(
path: NodePath,
pathNames: Seq[NamedPathElement]
)
object FacilityLocation {
def fromTuple(t: (NodePath, Seq[NamedPathElement])) = FacilityLocation(t._1, t._2)
implicit val writes: Writes[FacilityLocation] = Json.writes[FacilityLocation]
}
| MUSIT-Norway/musit | service_backend/app/models/storage/LocationHistory.scala | Scala | gpl-2.0 | 1,978 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.