code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1 value | license stringclasses 15 values | size int64 5 1M |
|---|---|---|---|---|---|
/** Licensed to Gravity.com under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. Gravity.com licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.gravity.hbase.schema
import org.apache.hadoop.hbase.client._
import org.apache.hadoop.hbase.util._
import scala.Predef
import scala.collection.JavaConversions._
import org.apache.hadoop.conf.Configuration
import java.io._
import org.apache.hadoop.io.{BytesWritable, Writable}
import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp
import org.apache.hadoop.hbase.filter.{Filter, FilterList, SingleColumnValueFilter}
import scala.collection._
import org.joda.time.DateTime
import scala.collection.mutable.{ListBuffer, Buffer}
import java.util.{HashMap, NavigableSet}
/* )\\._.,--....,'``.
.b--. /; _.. \\ _\\ (`._ ,.
`=,-,-'~~~ `----(,_..'--(,_..'`-.;.' */
/** Expresses an input stream that can read ordered primitives from a binary input, and can also use the ByteConverter[T] interface to read serializable objects.
*
*/
class PrimitiveInputStream(input: InputStream) extends DataInputStream(input) {
/**
* Read an object, assuming the existence of a ComplexByteConverter[T] implementation
* The byte converter is stateless and should be therefore defined somewhere as an implicit object
*/
def readObj[T](implicit c: ComplexByteConverter[T]): T = {
c.read(this)
}
def skipLong() {this.skipBytes(8)}
//WORK IN PROGRESS
def readRow[T <: HbaseTable[T, R, RR], R, RR <: HRow[T, R]](table: HbaseTable[T, R, RR]): RR = {
val rowBytesLength = readInt()
val rowBytes = new Array[Byte](rowBytesLength)
read(rowBytes)
val rowId = table.rowKeyConverter.fromBytes(rowBytes)
val ds = DeserializedResult(rowId.asInstanceOf[AnyRef], table.families.length)
val famCount = readInt()
for (i <- 0 until famCount) {
val fam = table.familyByIndex(i)
val kvLength = readInt()
for (ii <- 0 until kvLength) {
val isTypedColumn = readBoolean
val converter = if (isTypedColumn) {
val colIdx = readInt
val col = table.columnByIndex(colIdx)
col
} else {
fam
}
val keyLength = readInt
val keyBytes = new Array[Byte](keyLength)
read(keyBytes)
val valueLength = readInt
val valueBytes = new Array[Byte](valueLength)
read(valueBytes)
val key = converter.keyFromBytesUnsafe(keyBytes)
val value = converter.valueFromBytesUnsafe(valueBytes)
ds.add(fam,key,value,0l)
}
}
table.rowBuilder(ds)
}
}
/** Expresses an output stream that can write ordered primitives into a binary output, and can also use the ByteConverter[T] interface to write serializable objects.
*/
class PrimitiveOutputStream(output: OutputStream) extends DataOutputStream(output) {
//WORK IN PROGRESS
def writeRow[T <: HbaseTable[T, R, RR], R, RR <: HRow[T, R]](table: HbaseTable[T,R,RR],row: RR) {
//Serialize row id
val rowIdBytes = row.table.rowKeyConverter.toBytes(row.rowid)
writeInt(rowIdBytes.length)
write(rowIdBytes)
//Write number of families
writeInt(row.result.values.length)
var idx = 0
while (idx < row.result.values.length) {
val family = row.result.values(idx)
val colFam = row.table.familyByIndex(idx)
if(family == null) {
writeInt(0)
}else {
writeInt(family.size())
family.foreach {
case (colKey: AnyRef, colVal: AnyRef) =>
//See if it's a strongly typed column
val converters: KeyValueConvertible[_, _, _] = row.table.columnsByName.get(colKey) match {
case Some(col) if col.family.index == colFam.index =>
writeBoolean(true)
writeInt(col.columnIndex)
col
case _ =>
writeBoolean(false)
colFam
}
val keyBytes = converters.keyToBytesUnsafe(colKey)
writeInt(keyBytes.length)
write(keyBytes)
val valBytes = converters.valueToBytesUnsafe(colVal)
writeInt(valBytes.length)
write(valBytes)
}
}
idx += 1
}
}
/**
* Write an object, assuming the existence of a ComplexByteConverter[T] implementation.
* The byte converter is stateless and should be therefore defined somewhere as an implicit object
*/
def writeObj[T](obj: T)(implicit c: ComplexByteConverter[T]) {
c.write(obj, this)
}
}
/**
* Class to be implemented by custom converters
*/
abstract class ByteConverter[T] {
def toBytes(t: T): Array[Byte]
def fromBytes(bytes: Array[Byte]): T = fromBytes(bytes, 0, bytes.length)
def fromBytes(bytes: Array[Byte], offset: Int, length: Int): T
def fromByteString(str: String): T = {
fromBytes(Bytes.toBytesBinary(str))
}
def toByteString(item: T): String = {
Bytes.toStringBinary(toBytes(item))
}
def toBytesWritable(t: T): BytesWritable = {
new BytesWritable(toBytes(t))
}
def fromBytesWritable(bytes: BytesWritable): T = {
fromBytes(bytes.getBytes)
}
}
/**
* Simple high performance conversions from complex types to bytes
*/
abstract class ComplexByteConverter[T] extends ByteConverter[T] {
override def toBytes(t: T): Array[Byte] = {
val bos = new ByteArrayOutputStream()
val dout = new PrimitiveOutputStream(bos)
write(t, dout)
bos.toByteArray
}
def write(data: T, output: PrimitiveOutputStream)
override def fromBytes(bytes: Array[Byte], offset: Int, length: Int): T = {
val din = new PrimitiveInputStream(new ByteArrayInputStream(bytes, offset, length))
read(din)
}
override def fromBytes(bytes: Array[Byte]): T = {
val din = new PrimitiveInputStream(new ByteArrayInputStream(bytes))
read(din)
}
def read(input: PrimitiveInputStream): T
def safeReadField[A](input: PrimitiveInputStream)(readField: (PrimitiveInputStream)=>A, valueOnFail: A): A = {
if (input.available() < 1) return valueOnFail
try {
readField(input)
}
catch {
case _: IOException => valueOnFail
}
}
}
trait MapStream[K,V] {
val c : ByteConverter[K]
val d : ByteConverter[V]
def writeMap(map:Map[K,V], output: PrimitiveOutputStream) {
val length = map.size
output.writeInt(length)
for ((k, v) <- map) {
val keyBytes = c.toBytes(k)
val valBytes = d.toBytes(v)
output.writeInt(keyBytes.length)
output.write(keyBytes)
output.writeInt(valBytes.length)
output.write(valBytes)
}
}
def readMap(input:PrimitiveInputStream) : Array[(K,V)] = {
val length = input.readInt()
val kvarr = Array.ofDim[(K, V)](length)
var i = 0
while (i < length) {
val keyLength = input.readInt
val keyArr = new Array[Byte](keyLength)
input.read(keyArr)
val key = c.fromBytes(keyArr)
val valLength = input.readInt
val valArr = new Array[Byte](valLength)
input.read(valArr)
val value = d.fromBytes(valArr)
kvarr(i) = (key -> value)
i = i + 1
}
kvarr
}
}
class ImmutableMapConverter[K, V](implicit val c: ByteConverter[K],val d: ByteConverter[V]) extends ComplexByteConverter[scala.collection.immutable.Map[K, V]] with MapStream[K,V] {
override def write(map: scala.collection.immutable.Map[K, V], output: PrimitiveOutputStream) {
writeMap(map,output)
}
override def read(input: PrimitiveInputStream): Predef.Map[K, V] = {
val kvarr = readMap(input)
scala.collection.immutable.Map[K, V](kvarr: _*)
}
}
class MutableMapConverter[K, V](implicit val c: ByteConverter[K],val d: ByteConverter[V]) extends ComplexByteConverter[scala.collection.mutable.Map[K, V]] with MapStream[K,V] {
override def write(map: scala.collection.mutable.Map[K, V], output: PrimitiveOutputStream) {
writeMap(map,output)
}
override def read(input: PrimitiveInputStream): mutable.Map[K, V] = {
val kvarr = readMap(input)
scala.collection.mutable.Map[K, V](kvarr: _*)
}
}
class MapConverter[K, V](implicit val c: ByteConverter[K],val d: ByteConverter[V]) extends ComplexByteConverter[Map[K, V]] with MapStream[K,V] {
override def write(map: Map[K, V], output: PrimitiveOutputStream) {
writeMap(map,output)
}
override def read(input: PrimitiveInputStream): Map[K, V] = {
val kvarr = readMap(input)
Map[K, V](kvarr: _*)
}
}
class MutableSetConverter[T](implicit c: ByteConverter[T]) extends ComplexByteConverter[scala.collection.mutable.Set[T]] with CollStream[T] {
override def write(set: scala.collection.mutable.Set[T], output: PrimitiveOutputStream) {
writeColl(set, set.size, output, c)
}
override def read(input: PrimitiveInputStream): scala.collection.mutable.Set[T] = {
scala.collection.mutable.Set(readColl(input, c):_*)
}
}
class ImmutableSetConverter[T](implicit c: ByteConverter[T]) extends ComplexByteConverter[scala.collection.immutable.Set[T]] with CollStream[T] {
override def write(set: scala.collection.immutable.Set[T], output: PrimitiveOutputStream) {
writeColl(set, set.size, output, c)
}
override def read(input: PrimitiveInputStream): scala.collection.immutable.Set[T] = {
readColl(input, c).toSet
}
}
class SetConverter[T](implicit c: ByteConverter[T]) extends ComplexByteConverter[Set[T]] with CollStream[T] {
override def write(set: Set[T], output: PrimitiveOutputStream) {
writeColl(set, set.size, output, c)
}
override def read(input: PrimitiveInputStream): Set[T] = {
readColl(input, c).toSet
}
}
class SeqConverter[T](implicit c: ByteConverter[T]) extends ComplexByteConverter[Seq[T]] with CollStream[T] {
override def write(seq: Seq[T], output: PrimitiveOutputStream) {
writeColl(seq, seq.length, output, c)
}
override def read(input: PrimitiveInputStream): scala.Seq[T] = readColl(input, c).toSeq
}
class BufferConverter[T](implicit c: ByteConverter[T]) extends ComplexByteConverter[Buffer[T]] with CollStream[T] {
override def write(buf: Buffer[T], output: PrimitiveOutputStream) {
writeBuf(buf, output)
}
def writeBuf(buf: Buffer[T], output: PrimitiveOutputStream) {
writeColl(buf, buf.length, output, c)
}
override def read(input: PrimitiveInputStream): mutable.Buffer[T] = readColl(input, c)
}
trait CollStream[T] {
def writeColl(items: Iterable[T], length: Int, output: PrimitiveOutputStream, c: ByteConverter[T]) {
output.writeInt(length)
val iter = items.iterator
while (iter.hasNext) {
val t = iter.next()
val bytes = c.toBytes(t)
output.writeInt(bytes.length)
output.write(bytes)
}
}
def readColl(input: PrimitiveInputStream, c: ByteConverter[T]): Buffer[T] = {
val length = input.readInt()
val cpx = if (c.isInstanceOf[ComplexByteConverter[T]]) c.asInstanceOf[ComplexByteConverter[T]] else null
var i = 0
val buff = Buffer[T]()
while (i < length) {
val arrLength = input.readInt()
if (cpx != null) {
buff += cpx.read(input)
} else {
val arr = new Array[Byte](arrLength)
input.read(arr)
buff += c.fromBytes(arr)
}
i = i + 1
}
buff
}
}
| GravityLabs/HPaste | src/main/scala/com/gravity/hbase/schema/Serialization.scala | Scala | apache-2.0 | 11,890 |
package org.apache.spark.mllib.api.ruby
import java.util.ArrayList
import org.apache.spark.mllib.util.LinearDataGenerator
import org.apache.spark.mllib.regression.LabeledPoint
object RubyMLLibUtilAPI {
// Ruby does have a problem with creating Array[Double]
def generateLinearInput(
intercept: Double,
weights: ArrayList[String],
nPoints: Int,
seed: Int,
eps: Double = 0.1): Seq[LabeledPoint] = {
LinearDataGenerator.generateLinearInput(intercept, weights.toArray.map(_.toString.toDouble), nPoints, seed, eps)
}
}
| ondra-m/ruby-spark | ext/spark/src/main/scala/RubyMLLibUtilAPI.scala | Scala | mit | 560 |
/***********************************************************************
* Copyright (c) 2013-2015 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0 which
* accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
*************************************************************************/
package org.locationtech.geomesa.raster.data
import org.geotools.coverage.grid.GridGeometry2D
import org.geotools.coverage.grid.io.AbstractGridFormat
import org.geotools.parameter.Parameter
import org.locationtech.geomesa.raster.util.RasterUtils
import org.locationtech.geomesa.utils.geohash.{BoundingBox, Bounds}
import org.opengis.parameter.GeneralParameterValue
/**
* Takes the Array[GeneralParameterValue] from the read() function of GeoMesaCoverageReader and pulls
* out the gridGeometry, envelope, height and width, resolution, and bounding box from the query
* parameters. These are then used to query Accumulo and retrieve out the correct raster information.
* @param parameters the Array of GeneralParameterValues from the GeoMesaCoverageReader read() function.
*/
class GeoMesaCoverageQueryParams(parameters: Array[GeneralParameterValue]) {
val paramsMap = parameters.map { gpv => (gpv.getDescriptor.getName.getCode, gpv) }.toMap
val gridGeometry = paramsMap(AbstractGridFormat.READ_GRIDGEOMETRY2D.getName.toString)
.asInstanceOf[Parameter[GridGeometry2D]].getValue
val envelope = gridGeometry.getEnvelope
val dim = gridGeometry.getGridRange2D.getBounds
val rasterParams = RasterUtils.sharedRasterParams(gridGeometry, envelope)
val width = rasterParams.width
val height = rasterParams.height
val resX = rasterParams.resX
val resY = rasterParams.resY
val suggestedQueryResolution = rasterParams.suggestedQueryResolution
val min = Array(correctedMinLongitude, correctedMinLatitude)
val max = Array(correctedMaxLongitude, correctedMaxLatitude)
val bbox = BoundingBox(Bounds(min(0), max(0)), Bounds(min(1), max(1)))
def toRasterQuery: RasterQuery = RasterQuery(bbox, suggestedQueryResolution, None, None)
def correctedMaxLongitude: Double = Math.max(Math.min(envelope.getMaximum(0), 180), -180.0)
def correctedMinLongitude: Double = Math.min(Math.max(envelope.getMinimum(0), -180), 180.0)
def correctedMaxLatitude: Double = Math.max(Math.min(envelope.getMaximum(1), 90), -90.0)
def correctedMinLatitude: Double = Math.min(Math.max(envelope.getMinimum(1), -90), 90.0)
} | drackaer/geomesa | geomesa-raster/src/main/scala/org/locationtech/geomesa/raster/data/GeoMesaCoverageQueryParams.scala | Scala | apache-2.0 | 2,605 |
package com.treode.cps.stub.scheduler
import java.util.concurrent.atomic.AtomicReference
import java.util.concurrent.{ForkJoinPool, ScheduledThreadPoolExecutor}
import scala.util.Random
import scala.util.continuations.reset
import com.treode.cps.thunk
import com.treode.cps.scheduler.{Scheduler, SchedulerConfig}
private trait TestSchedulerConfig extends SchedulerConfig {
/** Implement Scheduler.await appropriately for this test scheduler. */
def await [A] (s: TestScheduler, k: => A @thunk): A
/** Run until the condition is false or until there are no more tasks; include scheduled tasks
* if `timers` is true.
*/
def run (cond: => Boolean, timers: Boolean)
/** Shutdown the scheduler and cleanup threads. */
def shutdown()
}
class TestScheduler private [scheduler] (cfg: TestSchedulerConfig) extends Scheduler (cfg) {
override def await [A] (k: => A @thunk): A = cfg.await (this, k)
def _await [A] (k: => A @thunk): A = super.await (k)
/** Run until the condition is false or until there are no more tasks. New tasks may be enqueued
* while running, and this may be called multiple times. Include timers in the run if
* indicated, otherwise ignore any scheduled tasks.
*/
def run (cond: => Boolean = true, timers: Boolean = true): Unit = cfg.run (cond, timers)
/** Shutdown the scheduler and cleanup threads, if any. */
def shutdown() = cfg.shutdown()
}
/** Run one task at a time in one thread, choosing the task first in first out. */
private class SequentialConfig extends TestSchedulerConfig {
private [this] var exception = None: Option [Throwable]
private val stub = new SequentialStub
val timer = stub
val executor = stub
def handleUncaughtException (e: Throwable) = exception = Some (e)
def makeThump (s: Scheduler, k: () => Any) =
SchedulerConfig.makeSafeThump (s, k)
def makeThunk [A] (s: Scheduler, k: Either [Throwable, A] => Any) =
SchedulerConfig.makeSafeThunk (s, k)
def await [A] (s: TestScheduler, k: => A @thunk): A = {
var v: A = null .asInstanceOf [A]
reset {
val _t1 = k
v = _t1
}
run (v == null, false)
v
}
def run (cond: => Boolean, timers: Boolean) {
while (exception == None && !executor.isQuiet (timers) && cond)
executor.executeOne (timers)
exception match {
case None => ()
case Some (e) => exception = None; throw e
}}
def shutdown() = ()
}
/** Run one task at a time in one thread, choosing the task randomly. */
private class RandomConfig (r: Random) extends TestSchedulerConfig {
private [this] var exception = None: Option [Throwable]
val random = r
private val stub = new RandomStub (random)
val timer = stub
val executor = stub
def handleUncaughtException (e: Throwable) = exception = Some (e)
def makeThump (s: Scheduler, k: () => Any) =
SchedulerConfig.makeSafeThump (s, k)
def makeThunk [A] (s: Scheduler, k: Either [Throwable, A] => Any) =
SchedulerConfig.makeSafeThunk (s, k)
def await [A] (s: TestScheduler, k: => A @thunk): A = {
var v: A = null .asInstanceOf [A]
reset {
val _t1 = k
v = _t1
}
run (v == null, false)
v
}
def run (cond: => Boolean, timers: Boolean) {
while (exception == None && !executor.isQuiet (timers) && cond)
executor.executeOne (timers)
exception match {
case None => ()
case Some (e) => exception = None; throw e
}}
def shutdown() = ()
}
private class MultithreadedConfig extends TestSchedulerConfig {
val exception = new AtomicReference (None: Option [Throwable])
val executor = new ForkJoinPool (
Runtime.getRuntime ().availableProcessors (),
ForkJoinPool.defaultForkJoinWorkerThreadFactory,
null,
true)
val timer = new ScheduledThreadPoolExecutor (1)
private def isQuiet =
executor.isQuiescent && (timer.getCompletedTaskCount - timer.getTaskCount == 0)
def handleUncaughtException (e: Throwable) =
exception.compareAndSet (None, Some (e))
def makeThump (s: Scheduler, k: () => Any) =
SchedulerConfig.makeSafeThump (s, k)
def makeThunk [A] (s: Scheduler, k: Either [Throwable, A] => Any) =
SchedulerConfig.makeSafeThunk (s, k)
def await [A] (s: TestScheduler, k: => A @thunk): A = s._await (k)
def run (cond: => Boolean, timers: Boolean) {
Thread.sleep (100)
while (exception.get () == None && !isQuiet && cond)
Thread.sleep (100)
exception.get () match {
case None => ()
case Some (e) => exception.set (None); throw e
}}
def shutdown() {
executor.shutdownNow()
timer.shutdownNow()
}}
object TestScheduler {
/** A single-threaded scheduler that selects tasks in FIFO order. The value `timers` for
* `timers` serves as a default for TestScheduler.run
*/
def sequential(): TestScheduler =
new TestScheduler (new SequentialConfig)
/** A single-threaded scheduler that selects tasks in psuedo-random order. */
def random (seed: Long): TestScheduler =
new TestScheduler (new RandomConfig (new Random (seed)))
/** A single-threaded scheduler that selects tasks in psuedo-random order. */
def random (random: Random = Random): TestScheduler =
new TestScheduler (new RandomConfig (random))
/** By default, run until the condition is false, that is use `cond` for `run()`. */
def multithreaded(): TestScheduler =
new TestScheduler (new MultithreadedConfig)
}
| Treode/cps | src/stub/scala/com/treode/cps/stub/scheduler/schedulers.scala | Scala | apache-2.0 | 5,451 |
package peregin.gpv.gui
import java.awt.Image
import java.io.File
import javax.swing.ImageIcon
import org.jdesktop.swingx.mapviewer.DefaultTileFactory
import peregin.gpv.Setup
import peregin.gpv.gui.map.{MicrosoftTileFactory, MapQuestTileFactory, AltitudePanel, MapPanel}
import peregin.gpv.model.{Mode, Telemetry}
import peregin.gpv.util.{Io, Logging, Timed}
import scala.swing._
import scala.swing.event.{SelectionChanged, ButtonClicked, MouseClicked}
class TelemetryPanel(openGpsData: File => Unit) extends MigPanel("ins 2", "", "[fill]") with Logging with Timed {
var telemetry = Telemetry.empty()
// file chooser widget
val fileChooser = new FileChooserPanel("Load GPS data file:", openGpsData, ExtensionFilters.gps)
add(fileChooser, "pushx, growx")
// tile chooser dropdown
case class TileOption(name: String, factory: DefaultTileFactory) {
override def toString: String = name
}
val mapChooser = new ComboBox(Seq(
TileOption("Aerial (Microsoft)", new MicrosoftTileFactory),
TileOption("Open Street Map", new MapQuestTileFactory)
))
private val mapType = new MigPanel("ins 0", "", "[grow, fill]") {
add(new Label("Map Type"), "wrap")
add(mapChooser, "")
}
add(mapType, "wrap")
val mapKit = new MapPanel
private val mapKitWrapper = Component.wrap(mapKit)
add(mapKit, "span 2, growx, wrap")
val altitude = new AltitudePanel
add(altitude, "span 2, pushy, grow, gaptop 10, wrap")
val direction = new ComboBox(Seq("Forward", "Backward"))
val spinner = new DurationSpinner
private val elevationMode = new ButtonGroup() {
buttons += new RadioButton("Distance") {
selected = true
icon = new ImageIcon(Io.loadImage("images/distance.png").getScaledInstance(16, 16, Image.SCALE_SMOOTH))
tooltip = "Distance"
}
buttons += new RadioButton("Time") {
selected = false
icon = new ImageIcon(Io.loadImage("images/time.png").getScaledInstance(16, 16, Image.SCALE_SMOOTH))
tooltip = "Time"
}
}
private val controlPanel = new MigPanel("ins 0 5 0 5", "", "") {
add(new Label("Shift"), "")
add(direction, "")
add(spinner, "align left")
add(new BoxPanel(Orientation.Horizontal) {contents ++= elevationMode.buttons}, "pushx, align right")
}
add(controlPanel, "growx")
listenTo(altitude.mouse.clicks, mapKit, mapChooser.selection)
elevationMode.buttons.foreach(ab => listenTo(ab))
reactions += {
case MouseClicked(`altitude`, pt, _, 1, false) => timed(s"time/elevation for x=${pt.x}") {
val sonda = altitude.sondaForPoint(pt)
altitude.refreshPoi(sonda)
mapKit.refreshPoi(sonda.map(_.location))
}
case MouseClicked(`mapKitWrapper`, pt, _, 1, false) => timed(s"geo/map for x=${pt.x}, y=${pt.y}") {
val gp = mapKit.getMainMap.convertPointToGeoPosition(pt)
log.info(s"geo location $gp")
val sonda = telemetry.sondaForPosition(gp)
altitude.refreshPoi(sonda)
mapKit.refreshPoi(sonda.map(_.location))
}
case ButtonClicked(_: RadioButton) =>
val mode = elevationMode.selected.map(_.text).getOrElse(elevationMode.buttons.head.text) match {
case "Distance" => Mode.DistanceBased
case "Time" => Mode.TimeBased
}
altitude.refresh(mode)
case SelectionChanged(`mapChooser`) =>
val item = mapChooser.selection.item
log.info(s"switching to $item")
val center = mapKit.getCenterPosition
mapKit.setTileFactory(item.factory)
mapKit.setCenterPosition(center)
}
def refresh(setup: Setup, telemetry: Telemetry): Unit = {
fileChooser.fileInput.text = setup.gpsPath.getOrElse("")
this.telemetry = telemetry
mapKit.refresh(telemetry)
mapKit.setAddressLocation(telemetry.centerGeoPosition)
mapKit.refreshPoi(None)
mapKit.refreshProgress(None)
altitude.refresh(telemetry)
altitude.refreshPoi(None)
altitude.refreshProgress(None)
spinner.duration = setup.shift.abs
direction.selection.index = if (setup.shift < 0) 1 else 0
}
def getShift: Long = spinner.duration * (if (direction.selection.index == 0) 1 else -1)
// dispatched by the video controller, invoked from EDT
def updateVideoProgress(videoTimeInMillis: Long): Unit = {
val sonda = telemetry.sondaForRelativeTime(videoTimeInMillis + getShift)
altitude.refreshProgress(sonda)
mapKit.refreshProgress(sonda.map(_.location))
}
}
| peregin/gps-overlay-on-video | src/main/scala/peregin/gpv/gui/TelemetryPanel.scala | Scala | mit | 4,401 |
class C extends B {
override def bar(): Int = 1
override def foo(): Int = {
val f: () => Int = super.foo
f()
}
}
object Test {
def main(args: Array[String]): Unit = {
var c = new C()
assert(c.foo() + c.bar() == 42)
}
}
| scala/scala | test/files/neg/t12523/Test.scala | Scala | apache-2.0 | 245 |
package teststate.core
import teststate.data._
import teststate.typeclass.Profunctor
object Types {
type SackE[-A, +B, +E] = Sack[A, NamedError[Failure[E]] Or B]
type CheckShape1[C[-_, _]] = ({ type T[-O, -S, E] = C[OS[O, S], E] })
type CheckShapeA[C[-_, _], -A, E] = SackE[A, C[A, E], E]
type CheckShape [C[-_, _], -O, -S, E] = CheckShapeA[C, OS[O, S], E]
type Points [-O, -S, E] = CheckShape[Point , O, S, E]
type Arounds [-O, -S, E] = CheckShape[Around , O, S, E]
type Invariants[-O, -S, E] = CheckShape[Invariant, O, S, E]
// OS →ˢ (NamedError E | OS →ᶜ E)
implicit def checkShapeProfunctorOps[C[-_, _], O, S, E](a: CheckShape[C, O, S, E]): Profunctor.Ops[Sack, OS[O, S], NamedError[Failure[E]] Or C[OS[O, S], E]] =
new Profunctor.Ops[Sack, OS[O, S], NamedError[Failure[E]] Or C[OS[O, S], E]](a)
implicit def checkShapeAProfunctorOps[C[-_, _], A, E](a: CheckShapeA[C, A, E]): Profunctor.Ops[Sack, A, NamedError[Failure[E]] Or C[A, E]] =
new Profunctor.Ops[Sack, A, NamedError[Failure[E]] Or C[A, E]](a)
}
| japgolly/test-state | core/shared/src/main/scala/teststate/core/Types.scala | Scala | apache-2.0 | 1,069 |
package ch.bsisa.hyperbird.patman.simulations.messages
case class HospitalStatesRequest() {
} | bsisa/hb-api | app/ch/bsisa/hyperbird/patman/simulations/messages/HospitalStatesRequest.scala | Scala | gpl-2.0 | 95 |
/*
* Copyright 2014 Twitter Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License. You may obtain
* a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.twitter.storehaus
import com.twitter.concurrent.AsyncMutex
import com.twitter.util.{ Future, Return }
/**
* Provides read-through caching on a readable store fronted by a cache.
*
* Keys are fetched from backing store on cache miss and cache read failures.
*
* All cache operations are best effort i.e. 'get' will return the key from
* backing store even if adding/updating the cached copy fails.
*
* On the other hand, any failure while reading from backing store
* is propagated to the client.
*
* Thread-safety is achieved using a mutex.
*
* @author Ruban Monu
*/
class ReadThroughStore[K, V](backingStore: ReadableStore[K, V], cache: Store[K, V])
extends ReadableStore[K, V] {
protected [this] lazy val mutex = new AsyncMutex
private [this] def getFromBackingStore(k: K) : Future[Option[V]] = {
// attempt to fetch the key from backing store and
// write the key to cache, best effort
backingStore.get(k).flatMap { storeValue =>
mutex.acquire.flatMap { p =>
cache.put((k, storeValue))
.map { u : Unit => storeValue }
.rescue { case x: Exception => Future.value(storeValue) }
.ensure { p.release }
}
}
}
override def get(k: K): Future[Option[V]] = cache.get(k) transform {
case Return(v @ Some(_)) => Future.value(v)
case _ => getFromBackingStore(k)
}
override def multiGet[K1 <: K](ks: Set[K1]): Map[K1, Future[Option[V]]] = {
// attempt to read from cache first
val cacheResults : Map[K1, Future[Either[Option[V], Exception]]] =
cache.multiGet(ks).map { case (k, f) =>
(k, f.map { optv => Left(optv) } rescue { case x: Exception => Future.value(Right(x)) })
}
// attempt to read all failed keys and cache misses from backing store
val f: Future[Map[K1, Option[V]]] =
FutureOps.mapCollect(cacheResults).flatMap { cacheResult =>
val failedKeys = cacheResult.filter { _._2.isRight }.keySet
val responses = cacheResult.filter { _._2.isLeft }.map { case (k, r) => (k, r.left.get) }
val hits = responses.filter { !_._2.isEmpty }
val missedKeys = responses.filter { _._2.isEmpty }.keySet
FutureOps.mapCollect(backingStore.multiGet(missedKeys ++ failedKeys)).flatMap { storeResult =>
// write fetched keys to cache, best effort
mutex.acquire.flatMap { p =>
FutureOps.mapCollect(cache.multiPut(storeResult))(FutureCollector.bestEffort[(K1, Unit)])
.map { u => hits ++ storeResult }
.ensure { p.release }
}
}
}
FutureOps.liftValues(ks, f, { (k: K1) => Future.None })
}
}
| AndreasPetter/storehaus | storehaus-core/src/main/scala/com/twitter/storehaus/ReadThroughStore.scala | Scala | apache-2.0 | 3,257 |
package com.github.kickshare.indexer.es.scroll
import org.elasticsearch.action.search.SearchResponse
/**
* API for reading/scrolling documents from index.
*/
trait ScrollSearch {
/**
* Provides iterator with paged results.
* @return Iterator of SearchResponses representing Scroll-based results
*/
def getResponses(): Iterator[SearchResponse]
}
| kucera-jan-cz/kickshare | kickshare-tools/data-indexer/src/main/scala/com/github/kickshare/indexer/es/scroll/ScrollSearch.scala | Scala | apache-2.0 | 362 |
package dpla.ingestion3.harvesters.file.tar
import java.io._
import java.net.URI
import java.util.zip.GZIPInputStream
import dpla.ingestion3.harvesters.file.tar.DefaultSource.{SerializableConfiguration, _}
import org.apache.commons.io.IOUtils
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileStatus, FileSystem, Path}
import org.apache.hadoop.mapreduce.Job
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.execution.datasources.{FileFormat, OutputWriterFactory, PartitionedFile}
import org.apache.spark.sql.sources.{DataSourceRegister, Filter}
import org.apache.spark.sql.types.{BinaryType, StringType, StructField, StructType}
import org.apache.spark.unsafe.types.UTF8String
import org.apache.tools.bzip2.CBZip2InputStream
import org.apache.tools.tar.TarInputStream
import scala.util.Try
/**
* This is a DataSource that can handle Tar files in a number of formats: tar.gz, tar.bz2, or just .tar.
*
* It figures out which type it is based on the extension (.tgz and .tbz2 are also valid).
*
* Use it like: val df = spark.read.format("dpla.ingestion3.harvesters.file.tar").load(infile)
*
*/
class DefaultSource extends FileFormat with DataSourceRegister {
def shortName(): String = "tar"
/**
* Writing not implemented yet. Not sure we need it.
*
*/
override def prepareWrite(
sparkSession: SparkSession,
job: Job,
options: Map[String, String],
dataSchema: StructType
): OutputWriterFactory =
throw new UnsupportedOperationException("Writing not implemented.")
/**
* Schema is always a tuple of the name of the tarfile, the full path of the entry, and the bytes of the entry.
*
*/
override def
inferSchema(sparkSession: SparkSession, options: Map[String, String], files: Seq[FileStatus]): Option[StructType] =
Some(StructType(Seq(
StructField("tarname", StringType, nullable = false),
StructField("filename", StringType, nullable = false),
StructField("data", BinaryType, nullable = false)
)))
/**
* Not even thinking about how to make this data splittable. In many cases this will handle, it's definitely not.
*
*/
override def isSplitable(
sparkSession: SparkSession,
options: Map[String, String],
path: Path): Boolean = false
override def buildReader(
spark: SparkSession,
dataSchema: StructType,
partitionSchema: StructType,
requiredSchema: StructType,
filters: Seq[Filter],
options: Map[String, String],
hadoopConf: Configuration): (PartitionedFile) => Iterator[InternalRow] = {
//We avoid closing over the non-serializable HadoopConf by wrapping it in something serializable and
//broadcasting it. Really, Hadoop? It's like, a hashtable. You could save us a lot of trouble by making it
//Serializable. But we need a Configuration to laod the data using Hadoop's FileSystem class, which means we
//should be able to read files from everywhere Spark can.
val broadcastedConf = spark.sparkContext.broadcast(new SerializableConfiguration(hadoopConf))
//this is a function that Spark will call on every file given as input. It's going to be run on a worker node,
//not the master, so we have to take a little care not to close over anything that's not serializable.
(file: PartitionedFile) => {
//this contains the stream if it's something we can read, or None if not.
val tarInputStreamOption: Option[TarInputStream] = loadStream(broadcastedConf, file)
tarInputStreamOption match {
//we got some data to iterate over
case Some(tarInputStream) =>
iterateFiles(file.filePath, tarInputStream)
//The default case cowardly fails. Possibly a directory with
//other files was passed, so we don't want to blow up
case None =>
Iterator.empty
}
}
}
}
object DefaultSource {
def iter(tarInputStream: TarInputStream): Stream[TarResult] = {
Option(tarInputStream.getNextEntry) match {
case None =>
Stream.empty
case Some(entry) =>
val result =
if (entry.isDirectory)
TarResult(
UTF8String.fromString(entry.getName),
None,
isDirectory = true
)
else
TarResult(
UTF8String.fromString(entry.getName),
Some(IOUtils.toByteArray(tarInputStream, entry.getSize)),
isDirectory = false
)
result #:: iter(tarInputStream)
}
}
def iterateFiles(filePath: String, tarInputStream: TarInputStream): Iterator[InternalRow] = {
/*
InternalRow doesn't want a plain old String because that doesn't enforce an encoding on disk. Also, UTF8String
models the data as an array of bytes internally, which is probably denser than native Strings.
InternalRow generally deals in these wrapped, more primitive representations than the standard library. We only
happen to be using one of these primative wrappers here, but there are more.
*/
val filePathUTF8 = UTF8String.fromString(filePath)
/*
This helper method lazily traverses the TarInputStream.
Can't use @tailrec here because the compiler can't recognize it as tail recursive, but this won't blow the stack.
*/
iter(tarInputStream)
.filterNot(_.isDirectory) //we don't care about directories
.map((result: TarResult) =>
InternalRow(
UTF8String.fromString(filePath),
result.entryPath,
result.data.getOrElse(Array[Byte]())
)
).iterator
}
def loadStream(broadcastedConf: Broadcast[SerializableConfiguration], file: PartitionedFile):
Option[TarInputStream] = {
val hadoopConf = broadcastedConf.value.value
val fs = FileSystem.get(hadoopConf)
val path = new Path(new URI(file.filePath))
file.filePath match {
case name if name.endsWith("gz") || name.endsWith("tgz") =>
Some(new TarInputStream(new GZIPInputStream(fs.open(path))))
case name if name.endsWith("bz2") || name.endsWith("tbz2") =>
//skip the "BZ" header added by bzip2.
val fileStream = fs.open(path)
fileStream.skip(2)
Some(new TarInputStream(new CBZip2InputStream(fileStream)))
case name if name.endsWith("tar") =>
Some(new TarInputStream(fs.open(path)))
case _ => None //We don't recognize the extension.
}
}
/**
* This is one of those things that seems to exist in a lot of Hadoop ecosystem projects that Hadoop should just fix.
* Basically this is just a wrapper that allows a Hadoop Configuration object to be sent over the wire.
* Needed so that workers can find their files on whatever filesystem is passed in (HDFS, S3, etc.)
*
* @param value the Hadoop Configuration object to be sent.
*/
class SerializableConfiguration(@transient var value: Configuration) extends Serializable {
private def writeObject(out: ObjectOutputStream): Unit =
Try {
out.defaultWriteObject()
value.write(out)
} getOrElse (
(e: Exception) => throw new IOException("Unable to read config from input stream.", e)
)
private def readObject(in: ObjectInputStream): Unit =
Try {
value = new Configuration(false)
value.readFields(in)
} getOrElse (
(e: Exception) => throw new IOException("Unable to read config from input stream.", e)
)
}
/**
* Case class for holding the result of a single tar entry iteration.
*
* @param entryPath Path of the entry in the tar file.
* @param data Binary data in the entry
* @param isDirectory If the entry represents a directory.
*/
case class TarResult(entryPath: UTF8String, data: Option[Array[Byte]], isDirectory: Boolean)
}
| dpla/ingestion3 | src/main/scala/dpla/ingestion3/harvesters/file/tar/DefaultSource.scala | Scala | mit | 8,302 |
package org.skycastle.core.design
/**
*
*/
trait ChangeResult {
val isSuccess: Boolean
def onSuccess(block: => Unit) = if(isSuccess) block
def onFailure(block: => Unit) = if(!isSuccess) block
}
case object ChangeSuccess extends ChangeResult {
override val isSuccess = true
}
trait ChangeFailure extends ChangeResult {
override val isSuccess = false
def message: String = this.getClass.getSimpleName
}
case class ChangeNotPossible(override val message: String) extends ChangeFailure
| zzorn/skycastle | src/main/scala/org/skycastle/core/design/ChangeResult.scala | Scala | gpl-2.0 | 502 |
package wrangler.api
import java.io.File
import scalaz._, Scalaz._
import sbt._, Path._
import org.eclipse.jgit.transport._
import org.eclipse.jgit.errors.UnsupportedCredentialItem
case class BasicLogin(id: String, password: String)
/**
* Parses netrc credentials to use for authentication.
* By default it will parse `~/.netrc`
*/
object Netrc {
val defaultPath = s"""${System.getProperty("user.home")}/.netrc"""
def getLogin(host: String, path: String = defaultPath) : Option[BasicLogin] = {
val file = new File(path)
if (!file.exists) {println("doesn't exist"); None}
else {
val lines = IO.readLines(file).toArray
val pos = lines.indexWhere(_.startsWith(s"machine $host"))
if (pos == -1 || pos + 2 > lines.length) None
else {
if (lines(pos + 1).startsWith("login")) {
val login = lines(pos + 1).drop("login".length + 1)
if (lines(pos + 2).startsWith("password"))
Some(BasicLogin(login, lines(pos + 2).drop("password".length + 1)))
else None
} else None
}
}
}
}
/** Netrc credential provider for JGit.*/
class NetrcCredentialsProvider(path: String = Netrc.defaultPath) extends CredentialsProvider {
override def isInteractive = false
override def supports(items: CredentialItem*) =
items.forall(i => i.isInstanceOf[CredentialItem.Username] || i.isInstanceOf[CredentialItem.Password])
override def get(uri: URIish, items: CredentialItem*) = {
val login = Netrc.getLogin(uri.getHost, path)
login.cata(l => {
items.foreach {
case i: CredentialItem.Username => i.setValue(l.id)
case i: CredentialItem.Password => i.setValue(l.password.toArray)
case i: CredentialItem.StringType if i.getPromptText == "Password: " =>
i.setValue(l.password)
case i => throw new UnsupportedCredentialItem(uri, s"${i.getClass.getName}:${i.getPromptText}")
}
true
},
false
)
}
}
| CommBank/wrangler | src/main/scala/wrangler/api/Netrc.scala | Scala | apache-2.0 | 1,985 |
package com.advancedspark.serving.prediction.pmml
import scala.util.parsing.json.JSON
import org.jpmml.evaluator.Evaluator
import org.jpmml.evaluator.ModelEvaluatorFactory
import org.jpmml.evaluator.visitors.PredicateInterner
import org.jpmml.evaluator.visitors.PredicateOptimizer
import org.jpmml.model.ImportFilter
import org.jpmml.model.JAXBUtil
import org.springframework.boot.SpringApplication
import org.springframework.boot.autoconfigure.SpringBootApplication
import org.springframework.cloud.netflix.hystrix.EnableHystrix
import org.springframework.http.HttpStatus
import org.springframework.http.ResponseEntity
import org.springframework.web.bind.annotation.PathVariable
import org.springframework.web.bind.annotation.RequestBody
import org.springframework.web.bind.annotation.RequestMapping
import org.springframework.web.bind.annotation.RequestMethod
import org.springframework.web.bind.annotation.RestController
import org.xml.sax.InputSource
import com.soundcloud.prometheus.hystrix.HystrixPrometheusMetricsPublisher
import io.prometheus.client.spring.boot.EnablePrometheusEndpoint
import io.prometheus.client.spring.boot.EnableSpringBootMetricsCollector
import io.prometheus.client.hotspot.StandardExports
@SpringBootApplication
@RestController
@EnableHystrix
@EnablePrometheusEndpoint
@EnableSpringBootMetricsCollector
class PredictionService {
val pmmlRegistry = new scala.collection.mutable.HashMap[String, Evaluator]
HystrixPrometheusMetricsPublisher.register("prediction_pmml")
new StandardExports().register()
@RequestMapping(path=Array("/update-pmml/{namespace}/{pmmlName}/{version}"),
method=Array(RequestMethod.POST),
produces=Array("application/xml; charset=UTF-8"))
def updatePmml(@PathVariable("namespace") namespace: String,
@PathVariable("pmmlName") pmmlName: String,
@PathVariable("version") version: String,
@RequestBody pmmlString: String):
ResponseEntity[HttpStatus] = {
try {
// Write the new pmml (XML format) to local disk
val path = new java.io.File(s"store/${namespace}/${pmmlName}/${version}")
if (!path.isDirectory()) {
path.mkdirs()
}
val file = new java.io.File(s"store/${namespace}/${pmmlName}/${version}/${pmmlName}.pmml")
if (!file.exists()) {
file.createNewFile()
}
val fos = new java.io.FileOutputStream(file)
fos.write(pmmlString.getBytes())
val transformedSource = ImportFilter.apply(new InputSource(new java.io.StringReader(pmmlString)))
val pmml = JAXBUtil.unmarshalPMML(transformedSource)
val predicateOptimizer = new PredicateOptimizer()
predicateOptimizer.applyTo(pmml)
val predicateInterner = new PredicateInterner()
predicateInterner.applyTo(pmml)
val modelEvaluatorFactory = ModelEvaluatorFactory.newInstance()
val modelEvaluator: Evaluator = modelEvaluatorFactory.newModelEvaluator(pmml)
// Update PMML in Cache
pmmlRegistry.put(namespace + "/" + pmmlName + "/" + version, modelEvaluator)
new ResponseEntity(HttpStatus.OK)
} catch {
case e: Throwable => {
throw e
}
}
}
@RequestMapping(path=Array("/evaluate-pmml/{namespace}/{pmmlName}/{version}"),
method=Array(RequestMethod.POST),
produces=Array("application/json; charset=UTF-8"))
def evaluatePmml(@PathVariable("namespace") namespace: String,
@PathVariable("pmmlName") pmmlName: String,
@PathVariable("version") version: String,
@RequestBody inputJson: String): String = {
try {
val parsedInputOption = JSON.parseFull(inputJson)
val inputs: Map[String, Any] = parsedInputOption match {
case Some(parsedInput) => parsedInput.asInstanceOf[Map[String, Any]]
case None => Map[String, Any]()
}
val modelEvaluatorOption = pmmlRegistry.get(namespace + "/" + pmmlName + "/" + version)
val modelEvaluator = modelEvaluatorOption match {
case None => {
val fis = new java.io.FileInputStream(s"store/${namespace}/${pmmlName}/${version}/${pmmlName}.pmml")
val transformedSource = ImportFilter.apply(new InputSource(fis))
val pmml = JAXBUtil.unmarshalPMML(transformedSource)
val predicateOptimizer = new PredicateOptimizer()
predicateOptimizer.applyTo(pmml)
val predicateInterner = new PredicateInterner()
predicateInterner.applyTo(pmml)
val modelEvaluatorFactory = ModelEvaluatorFactory.newInstance()
val modelEvaluator = modelEvaluatorFactory.newModelEvaluator(pmml)
// Cache modelEvaluator
pmmlRegistry.put(namespace + "/" + pmmlName + "/" + version, modelEvaluator)
modelEvaluator
}
case Some(modelEvaluator) => modelEvaluator
}
val results = new PMMLEvaluationCommand(pmmlName, namespace, pmmlName, version, modelEvaluator, inputs, s"""{"result": "fallback"}""", 25, 20, 10)
.execute()
s"""{"results":[${results}]}"""
} catch {
case e: Throwable => {
throw e
}
}
}
}
object PredictionServiceMain {
def main(args: Array[String]): Unit = {
SpringApplication.run(classOf[PredictionService])
}
} | Resly/pipeline | prediction.ml/pmml/src/main/scala/com/advancedspark/serving/prediction/pmml/PredictionService.scala | Scala | apache-2.0 | 5,434 |
package org.geoscript.geocss
import scala.util.parsing.input.CharSequenceReader
import org.scalatest.FunSuite
import org.scalatest.matchers.{ Matcher, MatchResult, ShouldMatchers }
/**
* Tests for specific low-level productions in the CSS grammar
*/
class GrammarTest extends FunSuite with ShouldMatchers {
import CssParser._
def failOn(text: String): Matcher[CssParser.Parser[_]] = not(succeedOn(text))
def succeedOn(text: String): Matcher[CssParser.Parser[_]] =
new Matcher[CssParser.Parser[_]] {
def apply(p: CssParser.Parser[_]): MatchResult =
new MatchResult(
CssParser.parseAll(p, text).successful,
"Parser %s did not accept %s" format(p, text),
"Parser %s accepted %s" format(p, text)
)
}
test("property names") {
propname should failOn("123")
propname should failOn("-123")
propname should succeedOn("abc")
propname should succeedOn("abc-123")
propname should succeedOn("-gt-magic")
}
test("numbers") {
number should succeedOn("123")
number should succeedOn("1.23")
number should succeedOn(".123")
number should succeedOn("123.")
number should succeedOn("-123")
number should succeedOn("-.123")
number should failOn(".-123")
number should failOn("1.2.3")
number should failOn("1..23")
number should failOn("1-23")
number should failOn("one, dash, twenty-three")
}
test("percentages") {
percentage should succeedOn("12%")
percentage should succeedOn("1.2%")
percentage should succeedOn("-12%")
percentage should succeedOn("-.12%")
percentage should failOn(".-12%")
percentage should failOn("12")
percentage should failOn("%")
}
test("urls") {
url should succeedOn ("url(http://example.com/foo.png)")
url should failOn("foo()")
url should failOn("url('http://example.com/icon.png')")
url should failOn("""foo("http://example.com/icon.png")""")
url should failOn("rgb(50) 150, 250)")
url should failOn("rgb(50,150,250)")
}
test("functions") {
function should succeedOn("foo()")
function should succeedOn("url('http://example.com/icon.png')")
function should succeedOn("""foo("http://example.com/icon.png")""")
function should succeedOn("rgb(50, 150, 250)")
function should succeedOn("rgb(50,150,250)")
}
}
| dwins/geoscript.scala | geocss/src/test/scala/org/geoscript/geocss/GrammarTest.scala | Scala | mit | 2,348 |
/*
* Copyright 2017 Chris Nappin
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package controllers
import com.nappin.play.recaptcha.RecaptchaSettings._
import com.nappin.play.recaptcha.{NonceActionBuilder, RecaptchaVerifier, WidgetHelper}
import org.junit.runner.RunWith
import org.specs2.mock.Mockito
import org.specs2.runner.JUnitRunner
import org.specs2.specification.Scope
import play.api.Application
import play.api.inject.guice.GuiceApplicationBuilder
import play.api.libs.json.Json
import play.api.mvc.{AnyContent, ControllerComponents, Request}
import play.api.data.FormBinding
import play.api.test.{FakeRequest, PlaySpecification, WithApplication}
import play.api.test.CSRFTokenHelper._
import scala.concurrent.{ExecutionContext, Future}
/**
* Unit test for the <code>JavascriptForm</code> controller, using a mocked out Verifier.
*/
@RunWith(classOf[JUnitRunner])
class JavascriptFormSpec extends PlaySpecification with Mockito {
private implicit val context = ExecutionContext.Implicits.global
private val configuration: Map[String, String] = Map(
PrivateKeyConfigProp -> "private-key",
PublicKeyConfigProp -> "public-key",
RequestTimeoutConfigProp -> "5 seconds")
abstract class WithWidgetHelper(configProps: Map[String, AnyRef]) extends WithApplication(
GuiceApplicationBuilder().configure(configProps).build()) with Scope
"The Javascript form controller" should {
"return the example form" in new WithWidgetHelper(configuration) {
val controller = getController(app, VERIFIER_ACTION_NONE)
val request = FakeRequest(GET, "/js-form").withCSRFToken
val page = controller.show().apply(request)
status(page) must equalTo(OK)
contentType(page) must beSome("text/html")
contentAsString(page) must contain("Example Javascript Form")
}
"load the data to pre-populate the form" in new WithWidgetHelper(configuration) {
val controller = getController(app, VERIFIER_ACTION_NONE)
val request = FakeRequest(GET, "/js-form/load").withCSRFToken
val response = controller.load().apply(request)
status(response) must equalTo(OK)
contentType(response) must beSome("application/json")
contentAsJson(response) must equalTo(Json.parse(
"{\\"username\\":\\"user1\\"," +
"\\"email\\":\\"user1@abc.com\\"," +
"\\"age\\":42," +
"\\"agree\\":true}"))
}
"reject an empty form submission" in new WithWidgetHelper(configuration) {
val controller = getController(app, VERIFIER_ACTION_EMPTY_FORM)
val request = FakeRequest(POST, "/js-form")
.withJsonBody(Json.obj())
.withCSRFToken
await(controller.submitForm().apply(request)) must throwAn[IllegalStateException]
}
"reject missing mandatory fields" in new WithWidgetHelper(configuration) {
val controller = getController(app, VERIFIER_ACTION_USERNAME_MISSING)
val request = FakeRequest(POST, "/js-form")
.withJsonBody(Json.obj(
"recaptcha_response_field" -> "r"))
.withCSRFToken
val response = controller.submitForm().apply(request)
status(response) must equalTo(UNPROCESSABLE_ENTITY)
contentType(response) must beSome("application/json")
contentAsJson(response) must equalTo(Json.parse(
"{\\"username\\":[\\"Username is required\\"]}"))
}
"reject recaptcha failure" in new WithWidgetHelper(configuration) {
val controller = getController(app, VERIFIER_ACTION_RECAPTCHA_FAILURE)
val request = FakeRequest(POST, "/js-form")
.withJsonBody(Json.obj(
"recaptcha_response_field" -> "r"))
.withCSRFToken
val response = controller.submitForm().apply(request)
status(response) must equalTo(UNPROCESSABLE_ENTITY)
contentType(response) must beSome("application/json")
contentAsJson(response) must equalTo(Json.parse(
"{\\"captcha\\":[\\"Incorrect, please try again\\"]}"))
}
"handle recaptcha success" in new WithWidgetHelper(configuration) {
val controller = getController(app, VERIFIER_ACTION_RECAPTCHA_SUCCESS)
val request = FakeRequest(POST, "/js-form")
.withJsonBody(Json.obj(
"recaptcha_response_field" -> "r",
"username" -> "a",
"age" -> "42"))
.withCSRFToken
val response = controller.submitForm().apply(request)
status(response) must equalTo(OK)
contentType(response) must beSome("application/json")
contentAsJson(response) must equalTo(Json.parse(
"{\\"title\\":\\"User Registered\\"," +
"\\"feedback\\":\\"If we had a database, the user's registration would have probably been saved at this point.\\"}"))
}
}
val VERIFIER_ACTION_NONE = 0
val VERIFIER_ACTION_EMPTY_FORM = 1
val VERIFIER_ACTION_USERNAME_MISSING = 2
val VERIFIER_ACTION_RECAPTCHA_FAILURE = 3
val VERIFIER_ACTION_RECAPTCHA_SUCCESS = 4
/**
* Get a controller, with mock dependencies populated and primed with the specified behaviour.
*
* @param app The current play app
* @param verifierAction The verifier behaviour to prime
* @return The controller
*/
def getController(app: Application, verifierAction: Int): JavascriptForm = {
val widgetHelper = app.injector.instanceOf[WidgetHelper]
val formTemplate = app.injector.instanceOf[views.html.javascriptForm]
val nonceAction = app.injector.instanceOf[NonceActionBuilder]
val verifier = mock[RecaptchaVerifier] // mock the verifier (rest of dependencies are real)
val cc = app.injector.instanceOf[ControllerComponents]
val controller = new JavascriptForm(formTemplate, nonceAction, verifier, widgetHelper, cc)
verifierAction match {
case VERIFIER_ACTION_NONE =>
// does nothing
;
case VERIFIER_ACTION_EMPTY_FORM =>
// simulates empty form submission
verifier.bindFromRequestAndVerify(any[play.api.data.Form[UserRegistration]])(
any[Request[AnyContent]], any[ExecutionContext]) throws new IllegalStateException("Oops")
case VERIFIER_ACTION_USERNAME_MISSING =>
// simulates username field (mandatory field) missing
verifier.bindFromRequestAndVerify(any[play.api.data.Form[JavascriptRegistration]])(
any[Request[AnyContent]], any[ExecutionContext]) returns
Future {
controller.userForm.withError("username", "Username is required")
}
case VERIFIER_ACTION_RECAPTCHA_FAILURE =>
// simulates recaptcha response incorrect
verifier.bindFromRequestAndVerify(any[play.api.data.Form[JavascriptRegistration]])(
any[Request[AnyContent]], any[ExecutionContext]) returns
Future {
controller.userForm.withError(
RecaptchaVerifier.formErrorKey, "incorrect-captcha-sol")
}
case VERIFIER_ACTION_RECAPTCHA_SUCCESS =>
val request = FakeRequest(POST, "/js-form").withJsonBody(Json.obj(
"recaptcha_response_field" -> "r",
"username" -> "a",
"age" -> "42"))
val formBinding = FormBinding.Implicits.formBinding
verifier.bindFromRequestAndVerify(any[play.api.data.Form[JavascriptRegistration]])(
any[Request[AnyContent]], any[ExecutionContext]) returns
Future {
controller.userForm.bindFromRequest()(request, formBinding)
}
}
controller
}
}
| chrisnappin/play-recaptcha-v2-example | test/controllers/JavascriptFormSpec.scala | Scala | apache-2.0 | 7,921 |
package com.charlesahunt.proteus.client
import cats.effect.Sync
import com.charlesahunt.proteus.config.ProteusConfig
import com.charlesahunt.proteus.models._
import com.charlesahunt.proteus.{DELETE, api, error, errorMessage, gharial, isError}
import com.typesafe.scalalogging.Logger
import io.circe.generic.auto._
import io.circe.parser.decode
import io.circe.syntax._
import scalaj.http._
/**
* Manages Graph API operations
*
* @param graphName
*/
class GraphClient[F[_]](config: ProteusConfig, graphName: String)(implicit override val sync: Sync[F])
extends ArangoClient[F](config: ProteusConfig)(sync: Sync[F]) {
private val logger = Logger[GraphClient[F]]
/**
* Create the Graph
*
* @param edges
* @return
*/
def createGraph(
edges: List[EdgeDefinition]): F[Either[Exception, GraphResponse]] = sync.delay {
val response = withAuth(Http(s"$arangoHost/$api/$gharial")).postData(Graph(graphName, edges).asJson.noSpaces).asString
decode[ResultMessage](response.body) match {
case Right(ok) =>
if(isError(ok)) error(s"Error creating graph with code {}}}")
else ok.graph.toRight[Exception](new Exception("Graph response missing"))
case Left(error) =>
logger.error("GraphClient.createGraph", error.getMessage)
Left(error)
}
}
/**
* Drops Graph
*
* @return
*/
def dropGraph: F[Either[Exception, Boolean]] = sync.delay {
val response = withAuth(Http(s"$arangoHost/$api/$gharial/$graphName").method(DELETE)).asString
decode[DropGraphResponse](response.body) match {
case Right(ok) =>
if(ok.error) error(s"Error dropping graph with code ${ok.code.toString}")
else Right(ok.removed)
case Left(error) =>
logger.error("GraphClient.dropGraph", error.getMessage)
Left(error)
}
}
/**
* Adds a vertex to the given collection.
*
* @param collectionName
* @return
*/
def createVertexCollection(collectionName: String): F[Either[Exception, GraphResponse]] = sync.delay {
val collection = CollectionName(collectionName)
val response = withAuth(Http(s"$arangoHost/$api/$gharial/$graphName/vertex").postData(collection.asJson.noSpaces)).asString
decode[ResultMessage](response.body) match {
case Right(ok) =>
if(isError(ok)) error(errorMessage(ok.errorMessage))
else ok.graph.toRight[Exception](new Exception("Graph reesponse missing"))
case Left(error) =>
logger.error("GraphClient.createVertexCollection", error.getMessage)
Left(error)
}
}
/**
* Adds a vertex to the given collection.
* free style json body
*
* @param vertexCollection
* @param json
* @return
*/
def createVertex(
vertexCollection: String,
json: String
): F[Either[Exception, EdgeOrVertex]] = sync.delay {
val response = withAuth(Http(s"$arangoHost/$api/$gharial/$graphName/vertex/$vertexCollection").postData(json)).asString
decode[ResultMessage](response.body) match {
case Right(ok) =>
if(isError(ok)) error(errorMessage(ok.errorMessage))
else ok.vertex.toRight[Exception](new Exception("Vertex missing from response"))
case Left(error) =>
logger.error("GraphClient.createVertex", error.getMessage)
Left(error)
}
}
/**
* Adds an additional edge definition to the graph.
*
* @param collectionName
* @param from
* @param to
* @return
*/
def createEdgeCollection(
collectionName: String,
from: List[String],
to: List[String]): F[Either[Exception, List[EdgeDefinition]]] = sync.delay {
val edge = EdgeDefinition(collectionName, from, to).asJson.noSpaces
val response = withAuth(Http(s"$arangoHost/$api/$gharial/$graphName/edge/").postData(edge)).asString
decode[ResultMessage](response.body) match {
case Right(ok) =>
if(isError(ok)) error(errorMessage(ok.errorMessage))
else ok.graph.map(_.edgeDefinitions).toRight[Exception](new Exception("Edge definition response missing"))
case Left(error) =>
logger.error("GraphClient.createEdgeCollection", error.getMessage)
Left(error)
}
}
/**
* Creates a new edge in the collection. Within the body the has to contain a _from and _to value referencing
* to valid vertices in the graph. Furthermore the edge has to be valid in the definition of this edge collection.
*
* free-style json body
*
* @param collectionName
* @param from
* @param to
* @return
*/
def createEdge(
collectionName: String,
edgeType: String,
from: String,
to: String): F[Either[Exception, EdgeOrVertex]] = sync.delay {
val edge = Edge(edgeType, from, to).asJson.noSpaces
val response = withAuth(Http(s"$arangoHost/$api/$gharial/$graphName/edge/$collectionName").postData(edge)).asString
decode[ResultMessage](response.body) match {
case Right(ok) =>
if(isError(ok)) error(errorMessage(ok.errorMessage))
else ok.edge.toRight[Exception](new Exception("Edge response missing"))
case Left(error) =>
logger.error("GraphClient.createEdge", error.getMessage)
Left(error)
}
}
/**
* Removes an edge from the collection.
*
* @param collectionName
* @param edgeKey
* @return
*/
def deleteEdge(collectionName: String, edgeKey: String): F[Either[Exception, Unit]] = sync.delay {
val response = withAuth(Http(s"$arangoHost/$api/$gharial/$graphName/edge/$collectionName/$edgeKey").method(DELETE)).asString
decode[ResultMessage](response.body) match {
case Right(ok) =>
if(isError(ok)) error(errorMessage(ok.errorMessage))
else Right(())
case Left(error) =>
logger.error("GraphClient.deleteEdge", error.getMessage)
Left(error)
}
}
/**
* Removes a vertex from the collection.
*
* @param collectionName
* @param vertexKey
* @return
*/
def deleteVertex(collectionName: String, vertexKey: String): F[Either[Exception, Unit]] = sync.delay {
val response = withAuth(Http(s"$arangoHost/$api/$gharial/$graphName/vertex/$collectionName/$vertexKey").method(DELETE)).asString
decode[ResultMessage](response.body) match {
case Right(ok) =>
if(isError(ok)) error(errorMessage(ok.errorMessage))
else Right(())
case Left(error) =>
logger.error("GraphClient.deleteVertex", error.getMessage)
Left(error)
}
}
/**
* Remove one edge definition from the graph. This will only remove the edge collection, the vertex collections
* remain untouched and can still be used in your queries.
*
* @param collectionName
* @return
*/
def deleteEdgeCollection(collectionName: String): F[Either[Exception, Unit]] = sync.delay {
val response = withAuth(Http(s"$arangoHost/$api/$gharial/$graphName/edge/$collectionName").method(DELETE)).asString
decode[ResultMessage](response.body) match {
case Right(ok) =>
if(isError(ok)) error(errorMessage(ok.errorMessage))
else Right(())
case Left(error) =>
logger.error("GraphClient.deleteEdgeCollection", error.getMessage)
Left(error)
}
}
/**
* Removes a vertex collection from the graph and optionally deletes the collection, if it is not used in any other graph.
*
* @param collectionName
* @return
*/
def deleteVertexCollection(collectionName: String): F[Either[Exception, Unit]] = sync.delay {
val response = withAuth(Http(s"$arangoHost/$api/$gharial/$graphName/vertex/$collectionName").method(DELETE)).asString
decode[ResultMessage](response.body) match {
case Right(ok) =>
if(isError(ok)) error(errorMessage(ok.errorMessage))
else Right(())
case Left(error) =>
logger.error("GraphClient.deleteVertexCollection", error.getMessage)
Left(error)
}
}
//TODO modify edge, replace edge, and same for collection of edges
//TODO modify vertex, replace vertex, and same for collection of vertices
} | CharlesAHunt/proteus | src/main/scala/com/charlesahunt/proteus/client/GraphClient.scala | Scala | apache-2.0 | 8,096 |
package yang.flexmapping
import yang.flexmapping.datas.DataHolder
import scala.collection.mutable.ArrayBuffer
/**
* Created by y28yang on 4/9/2016.
*/
class StructuredEventWarpper {
var domainName=""
var eventType=""
var eventName=""
var filterData= ArrayBuffer.empty[DataHolder]
}
| wjingyao2008/firsttry | NextGenAct/src/main/scala/yang/flexmapping/StructuredEventWarpper.scala | Scala | apache-2.0 | 299 |
package org.goingok.httpServer
import akka.http.scaladsl.Http
import org.goingok.BuildInfo
import org.goingok.data.persistence.db.DatabaseOps
import scala.util.{Failure, Success}
/** The akka-http server
*
* The server is started by [[org.goingok.Application]]
* The generic API structure is described by [[org.goingok.httpServer.GenericApi]]
* which is bound to the server through the ''routes'' value.
* Specific endpoints are specified by [[org.goingok.httpServer.GoingOkAPI]] which
* inherits the ''GenericApi''.
*
* The ActorSystem, Materializer and Context are imported with [[org.goingok.GoingOkContext]]
*
*/
object Server extends GoingOkAPI {
import org.goingok.GoingOkContext._
var dbOk = false
def startServer(address:String, port:Int) = {
log.info("->> STARTING {} - version {} <<-",BuildInfo.name,BuildInfo.version)
log.info("Connecting to DB server")
connectDb
log.info("Starting http server at {}:{}",address,port)
Http().bindAndHandle(routes,address,port)
}
def connectDb: Unit = {
DatabaseOps.version.onComplete {
case Success(result:String) => {
dbOk = true
log.info("Current version is: "+result)
// Create tables that don't exist
DatabaseOps.checkAndCreateTables()
// Get the number of rows for all tables
val tableRows = DatabaseOps.tableSizes()
if(tableRows.isLeft) tableRows.left.map(i => log.info("Database tables exist: {}",i))
else log.error("There was a problem with accessing the database tables")
}
case Failure(e:Exception) => log.error("Could not get version from db: "+e.getMessage)
case _ => log.error("There was a problem getting the version from the database")
}
}
}
| GoingOK/goingok-server | src/main/scala/org/goingok/httpServer/Server.scala | Scala | apache-2.0 | 1,759 |
package com.aristocrat.mandrill.requests.Ips
import com.aristocrat.mandrill.requests.MandrillRequest
case class PoolInfo(key: String, pool: String) extends MandrillRequest
| aristocratic/mandrill | src/main/scala/com/aristocrat/mandrill/requests/Ips/PoolInfo.scala | Scala | mit | 174 |
import scala.reflect.runtime.universe._
import scala.tools.reflect.Eval
object Test extends dotty.runtime.LegacyApp {
{
var counter = 0
lazy val x = { counter += 1; counter }
lazy val y = { counter += 1; counter }
val code = reify {
def foo = y // ensures that y is the first freevar we find
val bar = reify { println(x * y) }
bar.eval
println(x)
println(y)
}
code.eval
}
}
| yusuke2255/dotty | tests/disabled/macro/run/reify_newimpl_51.scala | Scala | bsd-3-clause | 433 |
package mesosphere.marathon.core.appinfo.impl
import mesosphere.marathon.MarathonSpec
import mesosphere.marathon.core.appinfo.{ AppInfo, AppSelector }
import mesosphere.marathon.state._
import mesosphere.marathon.test.Mockito
import org.scalatest.{ Matchers, GivenWhenThen }
import scala.concurrent.Future
class DefaultAppInfoServiceTest extends MarathonSpec with GivenWhenThen with Mockito with Matchers {
import mesosphere.FutureTestSupport._
class Fixture {
lazy val groupManager = mock[GroupManager]
lazy val appRepo = mock[AppRepository]
lazy val baseData = mock[AppInfoBaseData]
def newBaseData(): AppInfoBaseData = baseData
lazy val appInfoService = new DefaultAppInfoService(groupManager, appRepo, newBaseData)
def verifyNoMoreInteractions(): Unit = {
noMoreInteractions(groupManager)
noMoreInteractions(appRepo)
noMoreInteractions(baseData)
}
}
private val app1: AppDefinition = AppDefinition(PathId("/test1"))
val someApps = Set(
app1,
AppDefinition(PathId("/test2")),
AppDefinition(PathId("/test3"))
)
val someNestedApps = Set(
AppDefinition(PathId("/nested/test1")),
AppDefinition(PathId("/nested/test2"))
)
val someGroupWithNested = Group.empty.copy(
apps = someApps,
groups = Set(
Group.empty.copy(
id = PathId("/nested"),
apps = someNestedApps
)
)
)
test("queryForAppId") {
Given("a group repo with some apps")
val f = new Fixture
f.appRepo.currentVersion(app1.id) returns Future.successful(Some(app1))
f.baseData.appInfoFuture(any, any) answers { args =>
Future.successful(AppInfo(args.head.asInstanceOf[AppDefinition]))
}
When("querying for one App")
val appInfo = f.appInfoService.queryForAppId(id = app1.id, embed = Set.empty).futureValue
Then("we get an appInfo for the app from the appRepo/baseAppData")
appInfo.map(_.app.id).toSet should be(Set(app1.id))
verify(f.appRepo, times(1)).currentVersion(app1.id)
for (app <- Set(app1)) {
verify(f.baseData, times(1)).appInfoFuture(app, Set.empty)
}
And("no more interactions")
f.verifyNoMoreInteractions()
}
test("queryForAppId passes embed options along") {
Given("a group repo with some apps")
val f = new Fixture
f.appRepo.currentVersion(app1.id) returns Future.successful(Some(app1))
f.baseData.appInfoFuture(any, any) answers { args =>
Future.successful(AppInfo(args.head.asInstanceOf[AppDefinition]))
}
When("querying for one App")
val embed: Set[AppInfo.Embed] = Set(AppInfo.Embed.Tasks, AppInfo.Embed.Counts)
f.appInfoService.queryForAppId(id = app1.id, embed = embed).futureValue
Then("we get the baseData calls with the correct embed info")
for (app <- Set(app1)) {
verify(f.baseData, times(1)).appInfoFuture(app, embed)
}
}
test("queryAll") {
Given("an app repo with some apps")
val f = new Fixture
val someGroup = Group.empty.copy(apps = someApps)
f.groupManager.rootGroup() returns Future.successful(someGroup)
f.baseData.appInfoFuture(any, any) answers { args =>
Future.successful(AppInfo(args.head.asInstanceOf[AppDefinition]))
}
When("querying all apps")
val appInfos = f.appInfoService.queryAll(AppSelector(_ => true), embed = Set.empty).futureValue
Then("we get appInfos for each app from the appRepo/baseAppData")
appInfos.map(_.app.id).toSet should be(someApps.map(_.id))
verify(f.groupManager, times(1)).rootGroup()
for (app <- someApps) {
verify(f.baseData, times(1)).appInfoFuture(app, Set.empty)
}
And("no more interactions")
f.verifyNoMoreInteractions()
}
test("queryAll passes embed options along") {
Given("an app repo with some apps")
val f = new Fixture
val someGroup = Group.empty.copy(apps = someApps)
f.groupManager.rootGroup() returns Future.successful(someGroup)
f.baseData.appInfoFuture(any, any) answers { args =>
Future.successful(AppInfo(args.head.asInstanceOf[AppDefinition]))
}
When("querying all apps")
val embed: Set[AppInfo.Embed] = Set(AppInfo.Embed.Tasks, AppInfo.Embed.Counts)
f.appInfoService.queryAll(AppSelector(_ => true), embed = embed).futureValue
Then("we get the base data calls with the correct embed")
for (app <- someApps) {
verify(f.baseData, times(1)).appInfoFuture(app, embed)
}
}
test("queryAll filters") {
Given("an app repo with some apps")
val f = new Fixture
val someGroup = Group.empty.copy(apps = someApps)
f.groupManager.rootGroup() returns Future.successful(someGroup)
When("querying all apps with a filter that filters all apps")
val appInfos = f.appInfoService.queryAll(AppSelector(_ => false), embed = Set.empty).futureValue
Then("we get appInfos for no app from the appRepo/baseAppData")
appInfos.map(_.app.id).toSet should be(Set.empty)
verify(f.groupManager, times(1)).rootGroup()
And("no more interactions")
f.verifyNoMoreInteractions()
}
test("queryForGroupId") {
Given("a group repo with some apps below the queried group id")
val f = new Fixture
f.groupManager.group(PathId("/nested")) returns Future.successful(someGroupWithNested.group(PathId("/nested")))
f.baseData.appInfoFuture(any, any) answers { args =>
Future.successful(AppInfo(args.head.asInstanceOf[AppDefinition]))
}
When("querying all apps in that group")
val appInfos = f.appInfoService.queryAllInGroup(PathId("/nested"), embed = Set.empty).futureValue
Then("we get appInfos for each app from the groupRepo/baseAppData")
appInfos.map(_.app.id).toSet should be(someNestedApps.map(_.id))
verify(f.groupManager, times(1)).group(PathId("/nested"))
for (app <- someNestedApps) {
verify(f.baseData, times(1)).appInfoFuture(app, Set.empty)
}
And("no more interactions")
f.verifyNoMoreInteractions()
}
test("queryForGroupId passes embed infos along") {
Given("a group repo with some apps below the queried group id")
val f = new Fixture
f.groupManager.group(PathId("/nested")) returns Future.successful(someGroupWithNested.group(PathId("/nested")))
f.baseData.appInfoFuture(any, any) answers { args =>
Future.successful(AppInfo(args.head.asInstanceOf[AppDefinition]))
}
When("querying all apps in that group")
val embed: Set[AppInfo.Embed] = Set(AppInfo.Embed.Tasks, AppInfo.Embed.Counts)
f.appInfoService.queryAllInGroup(PathId("/nested"), embed = embed).futureValue
Then("baseData was called with the correct embed options")
for (app <- someNestedApps) {
verify(f.baseData, times(1)).appInfoFuture(app, embed)
}
}
}
| Kosta-Github/marathon | src/test/scala/mesosphere/marathon/core/appinfo/impl/DefaultAppInfoServiceTest.scala | Scala | apache-2.0 | 6,734 |
package de.thomasvolk.easy.web
/*
* Copyright 2014 Thomas Volk
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
import org.apache.catalina.startup.Tomcat
import java.io.File
import org.apache.catalina.connector.Connector
object EasyServer {
def main(args: Array[String]) {
val port = sys.env.getOrElse("EASY_HTTP_PORT", "10080")
val ajpPort = sys.env.getOrElse("EASY_AJP_PORT", "10009")
val ajpRedirectPort = sys.env.getOrElse("EASY_AJP_REDIRECT_PORT", "10443")
val baseDir = sys.env.getOrElse("EASY_SERVER_DIR", ".")
val webAppDirLocation = sys.props.getOrElse("app.home", "web/src/main") + "/webapp"
val tomcat = new Tomcat
tomcat.setBaseDir(baseDir)
val ajpConnector = new Connector("org.apache.coyote.ajp.AjpProtocol")
ajpConnector.setPort(ajpPort.toInt)
ajpConnector.setProtocol("AJP/1.3")
ajpConnector.setRedirectPort(ajpRedirectPort.toInt)
ajpConnector.setEnableLookups(false)
ajpConnector.setProperty("redirectPort", ajpRedirectPort)
ajpConnector.setProperty("protocol", "AJP/1.3")
ajpConnector.setProperty("enableLookups", "false")
tomcat.getService.addConnector(ajpConnector)
tomcat.setPort(port.toInt)
tomcat.addWebapp("/easy", new File(webAppDirLocation).getAbsolutePath())
tomcat.start()
tomcat.getServer().await()
}
}
| thomasvolk/easy | web/src/main/scala/de/thomasvolk/easy/web/EasyServer.scala | Scala | apache-2.0 | 2,136 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License
*/
package org.apache.toree.boot
import akka.actor.{ActorRef, ActorSystem}
import com.typesafe.config.Config
import org.apache.toree.boot.layer._
import org.apache.toree.interpreter.Interpreter
import org.apache.toree.kernel.api.Kernel
import org.apache.toree.kernel.protocol.v5.KernelStatusType._
import org.apache.toree.kernel.protocol.v5._
import org.apache.toree.kernel.protocol.v5.kernel.ActorLoader
import org.apache.toree.security.KernelSecurityManager
import org.apache.toree.utils.LogLike
import org.apache.spark.repl.Main
import org.zeromq.ZMQ
import scala.concurrent.Await
import scala.concurrent.duration.Duration
import scala.util.Try
class KernelBootstrap(config: Config) extends LogLike {
this: BareInitialization with ComponentInitialization
with HandlerInitialization with HookInitialization =>
private val DefaultAppName = SparkKernelInfo.banner
private val DefaultActorSystemName = "spark-kernel-actor-system"
private var actorSystem: ActorSystem = _
private var actorLoader: ActorLoader = _
private var kernelMessageRelayActor: ActorRef = _
private var statusDispatch: ActorRef = _
private var kernel: Kernel = _
private var interpreters: Seq[Interpreter] = Nil
private val rootDir = Main.rootDir
private val outputDir = Main.outputDir
/**
* Initializes all kernel systems.
*/
def initialize() = {
// TODO: Investigate potential to initialize System out/err/in to capture
// Console DynamicVariable initialization (since takes System fields)
// and redirect it to a workable location (like an actor) with the
// thread's current information attached
//
// E.G. System.setOut(customPrintStream) ... all new threads will have
// customPrintStream as their initial Console.out value
//
// ENSURE THAT WE SET THE RIGHT SPARK PROPERTIES
val execUri = System.getenv("SPARK_EXECUTOR_URI")
System.setProperty("spark.repl.class.outputDir", outputDir.getAbsolutePath)
if (execUri != null) {
System.setProperty("spark.executor.uri", execUri)
}
displayVersionInfo()
// Do this first to support shutting down quickly before entire system
// is ready
initializeShutdownHook()
// Initialize the bare minimum to report a starting message
val (actorSystem, actorLoader, kernelMessageRelayActor, statusDispatch) =
initializeBare(
config = config,
actorSystemName = DefaultActorSystemName
)
this.actorSystem = actorSystem
this.actorLoader = actorLoader
this.kernelMessageRelayActor = kernelMessageRelayActor
this.statusDispatch = statusDispatch
// Indicate that the kernel is now starting
publishStatus(KernelStatusType.Starting)
// Initialize components needed elsewhere
val (commStorage, commRegistrar, commManager, interpreter,
kernel, dependencyDownloader,
magicManager, pluginManager, responseMap) =
initializeComponents(
config = config,
appName = DefaultAppName,
actorLoader = actorLoader
)
this.interpreters ++= Seq(interpreter)
this.kernel = kernel
// Initialize our handlers that take care of processing messages
initializeHandlers(
actorSystem = actorSystem,
actorLoader = actorLoader,
kernel = kernel,
interpreter = interpreter,
commRegistrar = commRegistrar,
commStorage = commStorage,
pluginManager = pluginManager,
magicManager = magicManager,
responseMap = responseMap
)
// Initialize our non-shutdown hooks that handle various JVM events
initializeHooks(
interpreter = interpreter
)
logger.debug("Initializing security manager")
System.setSecurityManager(new KernelSecurityManager)
logger.info("Marking relay as ready for receiving messages")
kernelMessageRelayActor ! true
this
}
/**
* Shuts down all kernel systems.
*/
def shutdown() = {
logger.info("Shutting down interpreters")
Try(interpreters.foreach(_.stop())).failed.foreach(
logger.error("Failed to shutdown interpreters", _: Throwable)
)
logger.info("Shutting down actor system")
Try(actorSystem.terminate()).failed.foreach(
logger.error("Failed to shutdown actor system", _: Throwable)
)
this
}
/**
* Waits for the main actor system to terminate.
*/
def waitForTermination() = {
logger.debug("Waiting for actor system to terminate")
// actorSystem.awaitTermination()
Await.result(actorSystem.whenTerminated, Duration.Inf)
this
}
private def publishStatus(
status: KernelStatusType,
parentHeader: Option[ParentHeader] = None
): Unit = {
parentHeader match {
case Some(header) => statusDispatch ! ((status, header))
case None => statusDispatch ! status
}
}
@inline private def displayVersionInfo() = {
logger.info("Kernel version: " + SparkKernelInfo.implementationVersion)
logger.info("Scala version: " + SparkKernelInfo.language_info.get("version"))
logger.info("ZeroMQ (JeroMQ) version: " + ZMQ.getVersionString)
}
}
| poplav/incubator-toree | kernel/src/main/scala/org/apache/toree/boot/KernelBootstrap.scala | Scala | apache-2.0 | 6,092 |
/*
* Copyright 2014 Commonwealth Computer Research, Inc.
*
* Licensed under the Apache License, Version 2.0 (the License);
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an AS IS BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.locationtech.geomesa.core.util
import org.geotools.data.DataUtilities
import org.geotools.data.store.DataFeatureCollection
import org.geotools.geometry.jts.ReferencedEnvelope
import org.opengis.feature.simple.{SimpleFeature, SimpleFeatureType}
import scala.collection.JavaConversions._
/**
* Build a unique feature collection based on feature ID
*/
class UniqueMultiCollection(schema: SimpleFeatureType, collections: Iterator[Iterable[SimpleFeature]]) extends DataFeatureCollection {
private val distinctFeatures = {
val uniq = collection.mutable.HashMap.empty[String, SimpleFeature]
collections.flatten.foreach { sf => uniq.put(sf.getID, sf)}
uniq.values
}
override def getBounds: ReferencedEnvelope = DataUtilities.bounds(this)
override def getCount: Int = openIterator.size
override protected def openIterator = distinctFeatures.iterator
override def toArray: Array[AnyRef] = openIterator.toArray
override def getSchema: SimpleFeatureType = schema
}
| mmatz-ccri/geomesa | geomesa-core/src/main/scala/org/locationtech/geomesa/core/util/UniqueMultiCollection.scala | Scala | apache-2.0 | 1,618 |
package games
trait Board {
// Common board values
val size:Int
val rows:Int
val cols: Int
} | minhprg/mcts_in_quoridor | src/main/scala/games/Board.scala | Scala | gpl-2.0 | 101 |
/*
* Copyright (C) 2016-2017 Lightbend Inc. <http://www.lightbend.com>
*/
package akka.persistence.cassandra.query
import java.time.format.DateTimeFormatter
import java.time.{ LocalDateTime, ZoneOffset }
import akka.actor.NoSerializationVerificationNeeded
import akka.annotation.InternalApi
import akka.persistence.cassandra.journal.{ CassandraJournalConfig, Day, Hour, TimeBucket }
import com.datastax.driver.core.ConsistencyLevel
import com.typesafe.config.Config
import scala.concurrent.duration._
/**
* INTERNAL API
*/
@InternalApi private[akka] class CassandraReadJournalConfig(config: Config, writePluginConfig: CassandraJournalConfig)
extends NoSerializationVerificationNeeded {
private val timeBucketFormat = "yyyyMMdd'T'HH:mm"
private val timeBucketFormatter: DateTimeFormatter = DateTimeFormatter.ofPattern(timeBucketFormat).withZone(ZoneOffset.UTC)
val refreshInterval: FiniteDuration = config.getDuration("refresh-interval", MILLISECONDS).millis
val gapFreeSequenceNumbers: Boolean = config.getBoolean("gap-free-sequence-numbers")
val maxBufferSize: Int = config.getInt("max-buffer-size")
val fetchSize: Int = config.getInt("max-result-size-query")
// TODO use for the events by tag query too
val fetchMoreThreshold: Double = config.getDouble("fetch-more-threshold")
require(
0.0 <= fetchMoreThreshold && fetchMoreThreshold <= 1.0,
s"fetch-more-threshold must be between 0.0 and 1.0, was $fetchMoreThreshold"
)
val readConsistency: ConsistencyLevel = ConsistencyLevel.valueOf(config.getString("read-consistency"))
val readRetries: Int = config.getInt("read-retries")
val firstTimeBucket: TimeBucket = {
val firstBucket = config.getString("first-time-bucket")
val firstBucketPadded = (writePluginConfig.bucketSize, firstBucket) match {
case (_, fb) if fb.length == 14 => fb
case (Hour, fb) if fb.length == 11 => s"${fb}:00"
case (Day, fb) if fb.length == 8 => s"${fb}T00:00"
case _ => throw new IllegalArgumentException("Invalid first-time-bucket format. Use: " + timeBucketFormat)
}
val date: LocalDateTime = LocalDateTime.parse(firstBucketPadded, timeBucketFormatter)
TimeBucket(
date.toInstant(ZoneOffset.UTC).toEpochMilli,
writePluginConfig.bucketSize
)
}
val deserializationParallelism: Int = config.getInt("deserialization-parallelism")
val pluginDispatcher: String = config.getString("plugin-dispatcher")
val keyspace: String = writePluginConfig.keyspace
val targetPartitionSize: Long = writePluginConfig.targetPartitionSize
val table: String = writePluginConfig.table
val pubsubNotification: Boolean = writePluginConfig.tagWriterSettings.pubsubNotification
val eventsByPersistenceIdEventTimeout: FiniteDuration = config.getDuration("events-by-persistence-id-gap-timeout", MILLISECONDS).millis
val eventsByTagGapTimeout: FiniteDuration = config.getDuration("events-by-tag.gap-timeout", MILLISECONDS).millis
val eventsByTagNewPersistenceIdScanTimeout = config.getDuration("events-by-tag.new-persistence-id-scan-timeout", MILLISECONDS).millis
val eventsByTagOffsetScanning: FiniteDuration = config.getDuration("events-by-tag.offset-scanning-period", MILLISECONDS).millis
}
| ktoso/akka-persistence-cassandra | core/src/main/scala/akka/persistence/cassandra/query/CassandraReadJournalConfig.scala | Scala | apache-2.0 | 3,258 |
/*
* Scala.js (https://www.scala-js.org/)
*
* Copyright EPFL.
*
* Licensed under Apache License 2.0
* (https://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package org.scalajs.linker
import scala.concurrent._
import org.junit.Test
import org.junit.Assert._
import org.scalajs.ir.ClassKind
import org.scalajs.ir.EntryPointsInfo
import org.scalajs.ir.Definitions._
import org.scalajs.ir.Trees._
import org.scalajs.logging._
import org.scalajs.junit.async._
import org.scalajs.linker._
import org.scalajs.linker.irio._
import org.scalajs.linker.testutils._
import org.scalajs.linker.testutils.TestIRBuilder._
class LinkerTest {
import scala.concurrent.ExecutionContext.Implicits.global
import LinkerTest._
/** Makes sure that the minilib is sufficient to completely link a hello
* world.
*/
@Test
def linkHelloWorld(): AsyncResult = {
val name = "LHelloWorld$"
val mainMethodBody = {
JSBracketMethodApply(JSGlobalRef(Ident("console")), StringLiteral("log"),
List(StringLiteral("Hello world!")))
}
val classDefs = Seq(
classDef(name, kind = ClassKind.ModuleClass,
superClass = Some(ObjectClass),
memberDefs = List(
trivialCtor(name),
mainMethodDef(mainMethodBody)
)
)
)
val moduleInitializers = List(
ModuleInitializer.mainMethodWithArgs("HelloWorld", "main")
)
await(testLink(classDefs, moduleInitializers))
}
/** This test exposes a problem where a linker in error state is called
* multiple times and ends up thinking it is being used concurrently.
*/
@Test
def clean_linking_state(): AsyncResult = {
class DummyException extends Exception
val badSeq = new IndexedSeq[VirtualScalaJSIRFile] {
def apply(x: Int): VirtualScalaJSIRFile = throw new DummyException()
def length: Int = throw new DummyException()
}
val linker = StandardLinker(StandardLinker.Config())
def callLink(): Future[Unit] = {
val out = LinkerOutput(new WritableMemVirtualBinaryFile)
linker.link(badSeq, Nil, out, NullLogger)
}
// Call first time. Get exception from badSeq.
// Note that the call must not throw immediately.
val firstRun = callLink().failed.map {
case e: DummyException => // ok.
case _ => fail("Expected DummyException")
}
def callInFailedState(prev: Future[Unit]): Future[Unit] = {
prev.flatMap(_ => callLink()).failed.map {
case e: IllegalStateException =>
if (e.getMessage.contains("concurrent")) {
fail("Found bad message in exception: " + e.getMessage)
}
case _ => fail("Expected IllegalStateException")
}
}
await((1 to 4).foldLeft(firstRun)((p, _) => callInFailedState(p)))
}
}
object LinkerTest {
def testLink(classDefs: Seq[ClassDef],
moduleInitializers: List[ModuleInitializer])(
implicit ec: ExecutionContext): Future[Unit] = {
val linker = StandardLinker(StandardLinker.Config())
val classDefsFiles = classDefs.map { classDef =>
new VirtualScalaJSIRFile {
val path: String = "mem://" + classDef.name.name + ".sjsir"
val version: Option[String] = None
def tree(implicit ec: ExecutionContext): Future[ClassDef] = Future(classDef)
def entryPointsInfo(implicit ec: ExecutionContext): Future[EntryPointsInfo] =
tree.map(EntryPointsInfo.forClassDef)
}
}
val output = LinkerOutput(new WritableMemVirtualBinaryFile)
TestIRRepo.minilib.stdlibIRFiles.flatMap { stdLibFiles =>
linker.link(stdLibFiles ++ classDefsFiles, moduleInitializers,
output, new ScalaConsoleLogger(Level.Error))
}
}
}
| SebsLittleHelpers/scala-js | linker/shared/src/test/scala/org/scalajs/linker/LinkerTest.scala | Scala | apache-2.0 | 3,866 |
/*
* Licensed to Tuplejump Software Pvt. Ltd. under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. Tuplejump Software Pvt. Ltd. licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.tuplejump.calliope.utils
import java.nio.ByteBuffer
import org.apache.cassandra.utils.ByteBufferUtil
import scala.language.implicitConversions
import java.util.Date
import java.util.UUID
import java.net.InetAddress
import com.datastax.driver.core.DataType
import java.math.BigInteger
import scala.collection.JavaConverters._
import org.joda.time.DateTime
object RichByteBuffer {
/* ByteBuffer to Typed Objects */
implicit def ByteBuffer2Int(buffer: ByteBuffer): Int = ByteBufferUtil.toInt(buffer)
implicit def ByteBuffer2Double(buffer: ByteBuffer): Double = ByteBufferUtil.toDouble(buffer)
implicit def ByteBuffer2Float(buffer: ByteBuffer): Float = ByteBufferUtil.toFloat(buffer)
implicit def ByteBuffer2Long(buffer: ByteBuffer): Long = ByteBufferUtil.toLong(buffer)
implicit def ByteBuffer2String(buffer: ByteBuffer): String = ByteBufferUtil.string(buffer)
implicit def ByteBuffer2Boolean(buffer: ByteBuffer): Boolean = buffer.get() == 1
implicit def ByteBuffer2Date(buffer: ByteBuffer): Date = new Date(ByteBufferUtil.toLong(buffer))
implicit def ByteBuffer2DateTime(buffer: ByteBuffer): DateTime = new DateTime(ByteBufferUtil.toLong(buffer))
implicit def ByteBuffer2UUID(buffer: ByteBuffer): UUID = new UUID(buffer.getLong(buffer.position()), buffer.getLong(buffer.position() + 8))
implicit def ByteBuffer2Byte(buffer: ByteBuffer): Byte = buffer.get()
implicit def ByteBuffer2ByteArray(buffer: ByteBuffer): Array[Byte] = ByteBufferUtil.getArray(buffer)
implicit def ByteBuffer2BigDecimal(buffer: ByteBuffer): BigDecimal = DataType.decimal().deserialize(buffer).asInstanceOf[java.math.BigDecimal]
implicit def ByteBuffer2BigInteger(buffer: ByteBuffer): BigInteger = new BigInteger(ByteBufferUtil.getArray(buffer))
implicit def ByteBuffer2InetAddress(buffer: ByteBuffer): InetAddress = DataType.inet().deserialize(buffer).asInstanceOf[InetAddress]
implicit def TupleBB2TupleSS(t: (ByteBuffer, ByteBuffer)): (String, String) = (t._1, t._2)
implicit def TupleBB2TupleSI(t: (ByteBuffer, ByteBuffer)): (String, Int) = (t._1, t._2)
implicit def TupleBB2TupleSL(t: (ByteBuffer, ByteBuffer)): (String, Long) = (t._1, t._2)
implicit def TupleBB2TupleSF(t: (ByteBuffer, ByteBuffer)): (String, Float) = (t._1, t._2)
implicit def TupleBB2TupleSD(t: (ByteBuffer, ByteBuffer)): (String, Double) = (t._1, t._2)
implicit def ListBB2ListString(l: List[ByteBuffer]): List[String] = l.map(x => ByteBufferUtil.string(x))
implicit def MapSB2MapSS(m: Map[String, ByteBuffer]): Map[String, String] = m.map {
case (k, v) => new Tuple2[String, String](k, v)
}.toMap
implicit def MapSB2MapSI(m: Map[String, ByteBuffer]): Map[String, Int] = m.map {
case (k, v) => new Tuple2[String, Int](k, v)
}.toMap
//implicit def ByteBuffer2String(buffer: ByteBuffer, charset: Charset): String = ByteBufferUtil.string(buffer, charset)
implicit def MapBB2MapSS(m: Map[ByteBuffer, ByteBuffer]): Map[String, String] = m.map {
case (k, v) => new Tuple2[String, String](k, v)
}.toMap
implicit def MapBB2MapSB(m: Map[ByteBuffer, ByteBuffer]) = m.map {
case (k, v) => new Tuple2[String, ByteBuffer](k, v)
}.toMap
/* Typed objects to ByteBuffer */
implicit def String2ByteBuffer(str: String): ByteBuffer = ByteBufferUtil.bytes(str)
implicit def Int2ByteBuffer(i: Int): ByteBuffer = ByteBufferUtil.bytes(i)
implicit def Double2ByteBuffer(d: Double): ByteBuffer = ByteBufferUtil.bytes(d)
implicit def String2ByteBuffer(f: Float): ByteBuffer = ByteBufferUtil.bytes(f)
implicit def Long2ByteBuffer(l: Long): ByteBuffer = ByteBufferUtil.bytes(l)
implicit def Boolean2ByteBuffer(bool: Boolean): ByteBuffer = if (bool) Array(1.toByte) else Array(0.toByte)
implicit def Date2ByteBuffer(date: Date): ByteBuffer = ByteBufferUtil.bytes(date.getTime)
implicit def DateTime2ByteBuffer(date: DateTime): ByteBuffer = ByteBufferUtil.bytes(date.getMillis)
implicit def UUID2ByteBuffer(uuid: UUID): ByteBuffer = ByteBufferUtil.bytes(uuid)
implicit def Byte2ByteBuffer(byte: Byte): ByteBuffer = Array(byte)
implicit def ByteArray2ByteBuffer(bytes: Array[Byte]): ByteBuffer = ByteBuffer.wrap(bytes)
implicit def BigDecimal2ByteBuffer(bigDec: BigDecimal): ByteBuffer = DataType.decimal().serialize(bigDec.bigDecimal)
implicit def BigInteger2ByteBuffer(bigInt: BigInteger): ByteBuffer = bigInt.toByteArray
implicit def ByteBuffer2InetAddress(address: InetAddress): ByteBuffer = ByteBufferUtil.bytes(address)
implicit def TupleSS2TupleBB(t: (String, String)): (ByteBuffer, ByteBuffer) = (t._1, t._2)
implicit def TupleSI2TupleBB(t: (String, Int)): (ByteBuffer, ByteBuffer) = (t._1, t._2)
implicit def TupleSL2TupleBB(t: (String, Long)): (ByteBuffer, ByteBuffer) = (t._1, t._2)
implicit def TupleSF2TupleBB(t: (String, Float)): (ByteBuffer, ByteBuffer) = (t._1, t._2)
implicit def TupleSD2TupleBB(t: (String, Double)): (ByteBuffer, ByteBuffer) = (t._1, t._2)
implicit def MapSS2MapBB(m: Map[String, String]) = m.map {
case (k, v) => new Tuple2[ByteBuffer, ByteBuffer](k, v)
}.toMap
/* Conversions for Collections */
implicit def MapSS2ByteBuffer(map: Map[String, String]): ByteBuffer = DataType.map(DataType.text(), DataType.text()).serialize(map.asJava)
implicit def ByteBuffer2MapSS(buffer: ByteBuffer): Map[String, String] = DataType.map(DataType.text(), DataType.text()).deserialize(buffer).asInstanceOf[java.util.Map[String, String]].asScala.toMap
implicit def StringS2ByteBuffer(ss: Set[String]): ByteBuffer =
DataType.set(DataType.text()).serialize(ss.asJava)
implicit def ByteBuffer2StringS(buffer: ByteBuffer): Set[String] =
DataType.set(DataType.text()).deserialize(buffer)
.asInstanceOf[java.util.Set[String]].asScala.toSet
implicit def ListS2ByteBuffer(ls: List[String]): ByteBuffer =
DataType.list(DataType.text()).serialize(ls.asJava)
implicit def ByteBuffer2ListS(buffer: ByteBuffer): List[String] =
DataType.list(DataType.text()).deserialize(buffer)
.asInstanceOf[java.util.List[String]].asScala.toList
/* Conversions for Option */
def Option2ByteBuffer[A](maybe: Option[A])(implicit serialize: A => ByteBuffer): ByteBuffer =
maybe match {
case None => null: ByteBuffer // Avoid trying to serialize an
// empty value because that
// doesn't work.
case Some(really) => serialize(really)
}
def ByteBuffer2Option[A](buffer: ByteBuffer)(implicit deserialize: ByteBuffer => A): Option[A] =
buffer match {
case null => None // Avoid trying to deserialize a null because
// that doesn't work
case _ => Some[A](deserialize(buffer))
}
implicit def OptionString2ByteBuffer = Option2ByteBuffer[String] _
implicit def ByteBuffer2OptionString = ByteBuffer2Option[String] _
implicit def OptionBoolean2ByteBuffer = Option2ByteBuffer[Boolean] _
implicit def ByteBuffer2OptionBoolean = ByteBuffer2Option[Boolean] _
implicit def OptionInt2ByteBuffer = Option2ByteBuffer[Int] _
implicit def ByteBuffer2OptionInt = ByteBuffer2Option[Int] _
implicit def OptionLong2ByteBuffer = Option2ByteBuffer[Long] _
implicit def ByteBuffer2OptionLong = ByteBuffer2Option[Long] _
implicit def OptionBigInteger2ByteBuffer = Option2ByteBuffer[BigInteger] _
implicit def ByteBuffer2OptionBigInteger = ByteBuffer2Option[BigInteger] _
implicit def OptionFloat2ByteBuffer = Option2ByteBuffer[Float] _
implicit def ByteBuffer2OptionFloat = ByteBuffer2Option[Float] _
implicit def OptionDouble2ByteBuffer = Option2ByteBuffer[Double] _
implicit def ByteBuffer2OptionDouble = ByteBuffer2Option[Double] _
implicit def OptionBigDecimal2ByteBuffer = Option2ByteBuffer[BigDecimal] _
implicit def ByteBuffer2OptionBigDecimal = ByteBuffer2Option[BigDecimal] _
implicit def OptionDate2ByteBuffer = Option2ByteBuffer[Date] _
implicit def ByteBuffer2OptionDate = ByteBuffer2Option[Date] _
implicit def OptionDateTime2ByteBuffer = Option2ByteBuffer[DateTime] _
implicit def ByteBuffer2OptionDateTime = ByteBuffer2Option[DateTime] _
implicit def OptionUuid2ByteBuffer = Option2ByteBuffer[UUID] _
implicit def ByteBuffer2OptionUuid = ByteBuffer2Option[UUID] _
implicit def OptionInetAddress2ByteBuffer = Option2ByteBuffer[InetAddress] _
implicit def ByteBuffer2OptionInetAddress = ByteBuffer2Option[InetAddress] _
}
| tuplejump/calliope | core/src/main/scala/com/tuplejump/calliope/utils/RichByteBuffer.scala | Scala | apache-2.0 | 9,273 |
package com.twitter.finagle.ssl
import java.net.Socket
import java.security.cert.X509Certificate
import javax.net.ssl.SSLEngine
import org.junit.runner.RunWith
import org.scalatest.FunSuite
import org.scalatest.junit.JUnitRunner
import org.scalatest.mock.MockitoSugar
@RunWith(classOf[JUnitRunner])
class IgnorantTrustManagerTest extends FunSuite with MockitoSugar {
val authType = "DHE_DSS"
val socket = mock[Socket]
val engine = mock[SSLEngine]
val cert = mock[X509Certificate]
val chain = Array(cert)
test("an IgnorantTrustManager can be created") {
val tm = new IgnorantTrustManager()
assert(tm != null)
}
test("an IgnorantTrustManager has no accepted issuers") {
val tm = new IgnorantTrustManager()
val issuers = tm.getAcceptedIssuers()
assert(issuers.length == 0)
}
test("checkClientTrusted does not throw") {
val tm = new IgnorantTrustManager()
tm.checkClientTrusted(chain, authType)
}
test("checkClientTrusted with socket does not throw") {
val tm = new IgnorantTrustManager()
tm.checkClientTrusted(chain, authType, socket)
}
test("checkClientTrusted with engine does not throw") {
val tm = new IgnorantTrustManager()
tm.checkClientTrusted(chain, authType, engine)
}
test("checkServerTrusted does not throw") {
val tm = new IgnorantTrustManager()
tm.checkServerTrusted(chain, authType)
}
test("checkServerTrusted with socket does not throw") {
val tm = new IgnorantTrustManager()
tm.checkServerTrusted(chain, authType, socket)
}
test("checkServerTrusted with engine does not throw") {
val tm = new IgnorantTrustManager()
tm.checkServerTrusted(chain, authType, engine)
}
}
| spockz/finagle | finagle-core/src/test/scala/com/twitter/finagle/ssl/IgnorantTrustManagerTest.scala | Scala | apache-2.0 | 1,702 |
package changestream.actors
import akka.actor.{ActorRefFactory, Props}
import akka.testkit.{TestActorRef, TestProbe}
import changestream.actors.PositionSaver.EmitterResult
import changestream.helpers.{Config, Emitter}
import com.typesafe.config.ConfigFactory
import scala.concurrent.duration._
import scala.language.postfixOps
class SnsActorSpec extends Emitter with Config {
val probe = TestProbe()
val maker = (_: ActorRefFactory) => probe.ref
val actorRef = TestActorRef(Props(classOf[SnsActor], maker, awsConfig))
val configWithInterpolation = ConfigFactory.
parseString("aws.sns.topic = \\"__integration_tests-{database}-{tableName}\\"").
withFallback(awsConfig)
val snsWithInterpolation = TestActorRef(Props(classOf[SnsActor], maker, configWithInterpolation))
"When SnsActor receives a single valid message" should {
"Immediately publish the message to SNS" in {
actorRef ! message
val result = probe.expectMsgType[EmitterResult](5000 milliseconds)
result.position should be(message.nextPosition)
}
}
"When SnsActor receives a message" should {
"Should correctly publish the message when the topic contains interpolated database and/or tableName" in {
snsWithInterpolation ! message
val result = probe.expectMsgType[EmitterResult](5000 milliseconds)
result.position should be(message.nextPosition)
}
}
}
| mavenlink/changestream | src/test/scala/changestream/actors/SnsActorSpec.scala | Scala | mit | 1,396 |
package com.twitter.chill
import _root_.java.io.Serializable
import com.twitter.bijection.Injection
/**
* @author Oscar Boykin
* @author Sam Ritchie
*
* This KryoSerializer extension delegates the actual serialization to
* an instance of Injection[T,Array[Byte]].
*/
object InjectiveSerializer {
// Importing this implicit into scope will allow bijections to be
// registered as Kryo Serializers, given an instance of Kryo.
def asKryo[T](implicit injection: Injection[T, Array[Byte]]) =
new InjectiveSerializer(injection)
}
class InjectiveSerializer[T] private (injection: Injection[T, Array[Byte]]) extends KSerializer[T] with Serializable {
def write(kser: Kryo, out: Output, obj: T) {
val bytes = injection(obj)
out.writeInt(bytes.length, true)
out.writeBytes(bytes)
}
def read(kser: Kryo, in: Input, cls: Class[T]): T = {
val bytes = new Array[Byte](in.readInt(true))
in.readBytes(bytes)
injection.invert(bytes).get
}
}
| steveloughran/chill | chill-bijection/src/main/scala/com/twitter/chill/InjectiveSerializer.scala | Scala | apache-2.0 | 980 |
package rpm4s.data
class FileFlags(val value: Int) extends AnyVal {
def &(other: FileFlags) = FileFlags(value & other.value)
def |(other: FileFlags) = FileFlags(value | other.value)
def ^(other: FileFlags) = FileFlags(value ^ other.value)
def union(other: FileFlags) = this | other
def intersection(other: FileFlags) = this & other
def diff(other: FileFlags) = this ^ other
def inverse: FileFlags = this ^ FileFlags.All
def -(other: FileFlags) = this & other.inverse
def containsAll(attributes: FileFlags): Boolean =
(value & attributes.value) == attributes.value
def containsAny(attributes: FileFlags): Boolean =
(value & attributes.value) != 0
override def toString: String = {
val config = if (containsAll(FileFlags.Config)) Seq("Config") else Seq.empty
val doc = if (containsAll(FileFlags.Doc)) Seq("Doc") else Seq.empty
val doNotUse =
if (containsAll(FileFlags.DoNotUse)) Seq("DoNotUse") else Seq.empty
val missingOk =
if (containsAll(FileFlags.MissingOk)) Seq("MissingOk") else Seq.empty
val noReplace =
if (containsAll(FileFlags.NoReplace)) Seq("NoReplace") else Seq.empty
val specFile =
if (containsAll(FileFlags.SpecFile)) Seq("SpecFile") else Seq.empty
val ghost = if (containsAll(FileFlags.Ghost)) Seq("Ghost") else Seq.empty
val licence =
if (containsAll(FileFlags.License)) Seq("Licence") else Seq.empty
val readme = if (containsAll(FileFlags.Readme)) Seq("Readme") else Seq.empty
val exclude =
if (containsAll(FileFlags.Exclude)) Seq("Exclude") else Seq.empty
val pubkey = if (containsAll(FileFlags.PubKey)) Seq("PubKey") else Seq.empty
val all = config ++ doc ++ doNotUse ++ missingOk ++ noReplace ++
specFile ++ ghost ++ licence ++ readme ++ exclude ++ pubkey
s"FileFlags(${all.mkString("|")})"
}
}
object FileFlags {
def apply(byte: Int): FileFlags = new FileFlags(byte)
val None = FileFlags(0)
//TODO: config seems to be specified like this %config(noreplace) so it probably can never be alone
val Config = FileFlags(1 << 0)
val Doc = FileFlags(1 << 1)
val DoNotUse = FileFlags(1 << 2)
val MissingOk = FileFlags(1 << 3)
val NoReplace = FileFlags(1 << 4)
val SpecFile = FileFlags(1 << 5)
val Ghost = FileFlags(1 << 6)
val License = FileFlags(1 << 7)
val Readme = FileFlags(1 << 8)
val Exclude = FileFlags(1 << 9)
val PubKey = FileFlags(1 << 11)
val All = FileFlags((1 << 10) - 1)
}
| lucidd/rpm4s | shared/src/main/scala/rpm4s/data/FileFlags.scala | Scala | mit | 2,462 |
//package de.sciss.anemone
//
//import de.sciss.lucre.synth.Txn
//import de.sciss.nuages.{DSL, Nuages, ScissProcs}
//import de.sciss.proc
//import de.sciss.proc.{ParamSpec, Warp}
//import de.sciss.synth.GE
//
//object PikselGens {
// def apply[T <: Txn[T]](dsl:DSL[T], sConfig: ScissProcs.Config, nConfig: Nuages.Config)
// (implicit tx: T, nuages: Nuages[T]): Unit = {
// import dsl._
// import sConfig.genNumChannels
//
//// implicit val _nuages: Nuages[T] = nuages
//
// import de.sciss.synth.ugen._
// import de.sciss.synth.GEOps._
//
// def filterF (name: String)(fun: GE => GE): proc.Proc[T] =
// filter (name, if (DSL.useScanFixed) genNumChannels else -1)(fun)
//
// def default(in: Double): ControlValues =
// if (genNumChannels <= 0)
// in
// else
// Vector.fill(genNumChannels)(in)
//
// def mix(in: GE, flt: GE, mix: GE): GE = LinXFade2.ar(in, flt, mix * 2 - 1)
// def mkMix(df: Double = 0.0): GE = pAudio("mix", ParamSpec(0, 1), default(df))
//
// // latch and quantize
// filterF("quatch") { in =>
// shortcut = "Q"
// val pQuant = pAudio("quant" , ParamSpec(0.0001, 1.0, Warp.Exp), default(0.01))
// val pLatchAmt = pAudio("amt" , ParamSpec(0.0, 1.0), default(0))
// val pLatchIn = pAudio("mod" , ParamSpec(0.0, 1.0), default(0.0))
// val pLeak = pAudio("leak" , ParamSpec(0, 1, Warp.Int), default(1))
// val pMix = mkMix()
//
// val quant = in.roundTo(pQuant)
// val latch = Latch.ar(quant, pLatchIn)
// val sig = quant * (-pLatchAmt + 1) + latch * pLatchAmt
// val leak = LeakDC.ar(sig)
// val flt = sig * (-pLeak + 1) + leak * pLeak
//
// mix(in, flt, pMix)
// }
// }
//}
| Sciss/AnemoneActiniaria | src/main/scala/de/sciss/anemone/PikselGens.scala | Scala | gpl-3.0 | 1,790 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.integration.torch
import com.intel.analytics.bigdl.nn.CMinTable
import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl.utils.RandomGenerator._
import com.intel.analytics.bigdl.utils.Table
import scala.collection.mutable.HashMap
import scala.util.Random
@com.intel.analytics.bigdl.tags.Serial
class CMinTableSpec extends TorchSpec {
"A CMaxTable Module" should "generate correct output and grad" in {
torchCheck()
val seed = 100
RNG.setSeed(seed)
val module = new CMinTable[Double]()
val input1 = Tensor[Double](5).apply1(e => Random.nextDouble())
val input2 = Tensor[Double](5).apply1(e => Random.nextDouble())
val gradOutput = Tensor[Double](5).apply1(e => Random.nextDouble())
val input = new Table()
input(1.toDouble) = input1
input(2.toDouble) = input2
val start = System.nanoTime()
val output = module.forward(input)
val gradInput = module.backward(input, gradOutput)
val end = System.nanoTime()
val scalaTime = end - start
val code = "torch.manualSeed(" + seed + ")\n" +
"module = nn.CMinTable()\n" +
"output = module:forward(input)\n" +
"gradInput = module:backward(input,gradOutput)\n"
val (luaTime, torchResult) = TH.run(code, Map("input" -> input, "gradOutput" -> gradOutput),
Array("output", "gradInput"))
val luaOutput1 = torchResult("output").asInstanceOf[Tensor[Double]]
val luaOutput2 = torchResult("gradInput").asInstanceOf[Table]
luaOutput1 should be (output)
luaOutput2 should be (gradInput)
println("Test case : CMinTable, Torch : " + luaTime +
" s, Scala : " + scalaTime / 1e9 + " s")
}
}
| zhangxiaoli73/BigDL | spark/dl/src/test/scala/com/intel/analytics/bigdl/integration/torch/CMinTableSpec.scala | Scala | apache-2.0 | 2,303 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.json
import com.fasterxml.jackson.core.{JsonParser, JsonToken}
private object JacksonUtils {
/**
* Advance the parser until a null or a specific token is found
* 推进解析器,直到找到null或特定标记
*/
def nextUntil(parser: JsonParser, stopOn: JsonToken): Boolean = {
parser.nextToken() match {
case null => false
case x => x != stopOn
}
}
}
| tophua/spark1.52 | sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/json/JacksonUtils.scala | Scala | apache-2.0 | 1,244 |
/*
* Copyright 2017 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.accounts.frs102.boxes
import uk.gov.hmrc.ct.accounts.frs102.retriever.Frs102AccountsBoxRetriever
import uk.gov.hmrc.ct.box._
case class AC128A(value: Option[Int]) extends CtBoxIdentifier(name = "Tangible assets - Fixtures and fittings - depreciation at POA START")
with CtOptionalInteger
with Input
with ValidatableBox[Frs102AccountsBoxRetriever]
with Validators {
override def validate(boxRetriever: Frs102AccountsBoxRetriever): Set[CtValidation] = {
collectErrors(
validateMoney(value, min = 0)
)
}
}
| liquidarmour/ct-calculations | src/main/scala/uk/gov/hmrc/ct/accounts/frs102/boxes/AC128A.scala | Scala | apache-2.0 | 1,161 |
package breeze.stats
package distributions
/*
Copyright 2009 David Hall, Daniel Ramage
Licensed under the Apache License, Version 2.0 (the "License")
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
import breeze.numerics._
import breeze.optimize.DiffFunction
import org.apache.commons.math3.distribution.GammaDistribution
import scala.annotation.tailrec
import scala.math.pow
/**
* Represents a Gamma distribution.
* E[X] = shape * scale
*
* @author dlwh
*/
case class Gamma(shape: Double, scale: Double)(implicit rand: RandBasis)
extends ContinuousDistr[Double]
with Moments[Double, Double]
with HasCdf
with HasInverseCdf {
if (shape <= 0.0 || scale <= 0.0)
throw new IllegalArgumentException("Shape and scale must be positive")
override def pdf(x: Double): Double = {
if (x < 0) {
0.0
} else if (x > 0) {
math.exp(logPdf(x))
} else {
if (shape > 1.0) {
0.0
} else if (shape == 1.0) {
normalizer
} else {
Double.PositiveInfinity
}
}
}
lazy val logNormalizer: Double = lgamma(shape) + shape * log(scale)
override def unnormalizedLogPdf(x: Double) = (shape - 1) * log(x) - x / scale
override def toString = "Gamma(" + shape + "," + scale + ")"
def logDraw() =
if (shape < 1) {
// adapted from numpy distributions.c which is Copyright 2005 Robert Kern (robert.kern@gmail.com) under BSD
@tailrec
def rec: Double = {
val u = rand.uniform.draw()
val v = -math.log(rand.uniform.draw())
val logU = log(u)
if (logU <= math.log1p(-shape)) {
val logV = log(v)
val logX = logU / shape
if (logX <= logV) logX
else rec
} else {
val y = -log((1 - u) / shape)
val logX = math.log(1.0 - shape + shape * y) / shape
if (logX <= math.log(v + y)) logX
else rec
}
}
rec + math.log(scale)
} else {
math.log(draw())
}
def draw() = {
if (shape == 1.0) {
scale * -math.log(rand.uniform.draw())
} else if (shape < 1.0) {
// from numpy distributions.c which is Copyright 2005 Robert Kern (robert.kern@gmail.com) under BSD
@tailrec
def rec: Double = {
val u = rand.uniform.draw()
val v = -math.log(rand.uniform.draw())
if (u <= 1.0 - shape) {
val x = pow(u, 1.0 / shape)
if (x <= v) x
else rec
} else {
val y = -log((1 - u) / shape)
val x = pow(1.0 - shape + shape * y, 1.0 / shape)
if (x <= (v + y)) x
else rec
}
}
scale * rec
// val c = 1.0 + shape/scala.math.E
// var d = c * rand.uniform.draw()
// var ok = false
// var x = 0.0
// while(!ok) {
// if (d >= 1.0) {
// x = -log((c - d) / shape)
// if (-math.log(rand.uniform.draw()) >= (1.0 - shape) * log(x)) {
// x = (scale * x)
// ok = true
// }
// } else {
// x = math.pow(d, 1.0/shape)
// if (-math.log(rand.uniform.draw()) >= (1.0 - shape) * log(x)) {
// x = scale * x
// ok = true
// }
// }
// d = c * rand.uniform.draw()
// }
// x
} else {
// from numpy distributions.c which is Copyright 2005 Robert Kern (robert.kern@gmail.com) under BSD
val d = shape - 1.0 / 3.0
val c = 1.0 / math.sqrt(9.0 * d)
var r = 0.0
var ok = false
while (!ok) {
var v = 0.0
var x = 0.0
var continue = true
while (continue) {
x = rand.generator.nextGaussian()
v = 1.0 + c * x
continue = v <= 0
}
v = v * v * v
val x2 = x * x
val u = rand.uniform.draw()
if (u < 1.0 - 0.0331 * (x2 * x2)
|| log(u) < 0.5 * x2 + d * (1.0 - v + log(v))) {
r = (scale * d * v)
ok = true
}
}
r
}
}
def mean = shape * scale
def variance = mean * scale
def mode = { require(shape >= 1); mean - scale }
def entropy = logNormalizer - (shape - 1) * digamma(shape) + shape
override def probability(x: Double, y: Double): Double = {
new GammaDistribution(shape, scale).probability(x, y)
}
override def inverseCdf(p: Double): Double = {
// gammp(this.shape, p / this.scale);
new GammaDistribution(shape, scale).inverseCumulativeProbability(p)
}
override def cdf(x: Double): Double = {
new GammaDistribution(shape, scale).cumulativeProbability(x)
}
}
object Gamma extends ExponentialFamily[Gamma, Double] with ContinuousDistributionUFuncProvider[Double, Gamma] {
type Parameter = (Double, Double)
import breeze.stats.distributions.{SufficientStatistic => BaseSuffStat}
case class SufficientStatistic(n: Double, meanOfLogs: Double, mean: Double)
extends BaseSuffStat[SufficientStatistic] {
def *(weight: Double) = SufficientStatistic(n * weight, meanOfLogs, mean)
def +(t: SufficientStatistic) = {
val delta = t.mean - mean
val newMean = mean + delta * (t.n / (t.n + n))
val logDelta = t.meanOfLogs - meanOfLogs
val newMeanLogs = meanOfLogs + logDelta * (t.n / (t.n + n))
SufficientStatistic(t.n + n, newMeanLogs, newMean)
}
}
def emptySufficientStatistic = SufficientStatistic(0, 0, 0)
def sufficientStatisticFor(t: Double) = SufficientStatistic(1, math.log(t), t)
// change from Timothy Hunter. Thanks!
def mle(ss: SufficientStatistic) = {
val s = math.log(ss.mean) - ss.meanOfLogs
assert(s > 0, s) // check concavity
val k_approx = approx_k(s)
assert(k_approx > 0, k_approx)
val k = Nwt_Rph_iter_for_k(k_approx, s)
val theta = ss.mean / (k)
(k, theta)
}
/*
* s = log( x_hat) - log_x_hat
*/
def approx_k(s: Double): Double = {
// correct within 1.5%
(3 - s + math.sqrt(math.pow((s - 3), 2) + 24 * s)) / (12 * s)
}
private val MaxIter = 50
private def Nwt_Rph_iter_for_k(k: Double, s: Double, iter: Int = 0): Double = {
/*
* For a more precise estimate, use Newton-Raphson updates
*/
val k_new = k - (math.log(k) - digamma(k) - s) / (1.0 / k - trigamma(k))
if (closeTo(k, k_new) || iter >= MaxIter)
k_new
else
Nwt_Rph_iter_for_k(k_new, s, iter + 1)
}
override def distribution(p: Parameter)(implicit rand: RandBasis) = new Gamma(p._1, p._2)
def likelihoodFunction(stats: SufficientStatistic) = new DiffFunction[(Double, Double)] {
val SufficientStatistic(n, meanOfLogs, mean) = stats
def calculate(x: (Double, Double)) = {
val (a, b) = x
val obj = -n * ((a - 1) * meanOfLogs - lgamma(a) - a * log(b) - mean / b)
val gradA = -n * (meanOfLogs - digamma(a) - log(b))
val gradB = -n * (-a / b + mean / b / b)
(obj, (gradA, gradB))
}
}
}
| scalanlp/breeze | math/src/main/scala/breeze/stats/distributions/Gamma.scala | Scala | apache-2.0 | 7,304 |
package nl.iljabooij.garmintrainer.gui
import java.awt.{Color,Graphics,Graphics2D,GraphicsEnvironment}
import java.beans.{PropertyChangeEvent,PropertyChangeListener}
import scala.swing._
import com.google.inject.Inject
import nl.iljabooij.garmintrainer.gui.chart.AltitudeDiagramPainter
import nl.iljabooij.garmintrainer.model.{Activity,ApplicationState}
class ScalaChartComponent @Inject()
(val applicationState:ApplicationState,
val altitudeDiagramPainter: AltitudeDiagramPainter)
extends Component with LoggerHelper {
applicationState.addActivityChangeListener(updateChart)
private def updateChart(activityOption:Option[Activity]) = repaint
def reactToNewActivity: PropertyChangeListener = {
new PropertyChangeListener {
def propertyChange(event: PropertyChangeEvent) = {
repaint
}
}
}
protected override def paintComponent(g: Graphics) = {
val g2d = g.asInstanceOf[Graphics2D]
g2d.setBackground(Color.white)
g2d.clearRect(0, 0, size.width, size.height)
val currentActivity = applicationState.currentActivity
if (currentActivity.isDefined) {
drawChart(g2d, currentActivity.get)
}
}
private def drawChart(g2d: Graphics2D, activity: Activity) = {
val graphImage = GraphicsEnvironment
.getLocalGraphicsEnvironment().getDefaultScreenDevice()
.getDefaultConfiguration().createCompatibleImage(
size.width, size.height)
val imageGraphics = graphImage.createGraphics
imageGraphics.setBackground(Color.white);
imageGraphics.clearRect(0, 0, graphImage.getWidth, graphImage.getHeight);
println("calling painter")
altitudeDiagramPainter.paintDiagram(activity, graphImage)
g2d.drawImage(graphImage, 0, 0, null)
}
}
| chmandrade/garmintrainer | src/main/scala/nl/iljabooij/garmintrainer/gui/ScalaChartComponent.scala | Scala | gpl-3.0 | 1,697 |
/*
* Derived from https://github.com/spray/spray/blob/v1.1-M7/spray-http/src/main/scala/spray/http/parser/AcceptEncodingHeader.scala
*
* Copyright (C) 2011-2012 spray.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.http4s
package parser
import org.parboiled2._
import ContentCoding._
import org.http4s.headers.`Accept-Encoding`
import org.http4s.util.CaseInsensitiveString
private[parser] trait AcceptEncodingHeader {
def ACCEPT_ENCODING(value: String): ParseResult[`Accept-Encoding`] =
new AcceptEncodingParser(value).parse
private class AcceptEncodingParser(input: ParserInput) extends Http4sHeaderParser[`Accept-Encoding`](input) {
def entry: Rule1[`Accept-Encoding`] = rule {
oneOrMore(EncodingRangeDecl).separatedBy(ListSep) ~ EOL ~> { xs: Seq[ContentCoding] =>
`Accept-Encoding`(xs.head, xs.tail: _*)
}
}
def EncodingRangeDecl: Rule1[ContentCoding] = rule {
(EncodingRangeDef ~ EncodingQuality) ~> { (coding: ContentCoding, q: QValue) =>
if (q == org.http4s.QValue.One) coding
else coding.withQValue(q)
}
}
def EncodingRangeDef: Rule1[ContentCoding] = rule {
"*" ~ push(`*`) | Token ~> { s: String =>
val cis = CaseInsensitiveString(s)
org.http4s.ContentCoding.getOrElseCreate(cis)
}
}
def EncodingQuality: Rule1[QValue] = rule {
";" ~ OptWS ~ "q" ~ "=" ~ QValue | push(org.http4s.QValue.One)
}
}
}
| m4dc4p/http4s | core/src/main/scala/org/http4s/parser/AcceptEncodingHeader.scala | Scala | apache-2.0 | 1,965 |
package sylvestris.slick
import scalaz.{ \\/, EitherT }
import scalaz.std, std.anyVal._, std.list._
import scalaz.syntax._, either._, equal._, traverse._
import scala.slick.ast.ColumnOption.DBType
import scala.slick.driver.PostgresDriver.simple.{ Tag => _, _ }
import scala.slick.jdbc.meta.MTable
import spray.json._
import sylvestris.core, core._
import sylvestris.slick.SlickGraph._
// TODO : update to slick 3.0
// TODO : .transact
class SlickGraph(implicit session: Session) extends Graph {
for (t <- List(slickNodes, slickEdges) if MTable.getTables(t.baseTableRow.tableName).list.isEmpty) {
t.ddl.create
}
def nodes[T : NodeManifest](): EitherT[GraphM, List[Error], Set[Node[T]]] = EitherTGraphM {
slickNodes.list.map(slickNodeToNode[T]).sequenceU.bimap(List(_), _.toSet)
}
def getNode[T : NodeManifest](id: Id): EitherT[GraphM, Error, Node[T]] = slick {
slickNodes
.filter(d => d.id === id.v && d.tag === NodeManifest[T].tag.v)
.list
.map(slickNodeToNode[T])
.sequenceU
.flatMap {
case h :: Nil => h.right
case Nil => Error(s"$id not found").left
case nodes => Error(s"more than one node found for $id, $nodes").left
}
}
def addNode[T : NodeManifest](node: Node[T]): EitherT[GraphM, Error, Node[T]] = slick {
if (slickNodes.filter(_.id === node.id.v).run.nonEmpty) {
Error(s"$node already defined").left
}
else {
slickNodes += nodeToSlickNode(node)
node.right
}
}
def updateNode[T : NodeManifest](node: Node[T]): EitherT[GraphM, Error, Node[T]] = slick {
val updatedCount = slickNodes.filter(_.id === node.id.v).update(nodeToSlickNode(node))
if (updatedCount =/= 1) Error(s"updated $updatedCount for $node").left
else node.right
}
def removeNode[T : NodeManifest](id: Id): EitherT[GraphM, Error, Node[T]] =
getNode(id).flatMap { node => slick {
val deletedCount = slickNodes.filter(_.id === id.v).delete
if (deletedCount < 1) Error(s"$id not deleted").left
else node.right
}}
def getEdges(id: Id, tag: Tag): EitherT[GraphM, Error, Set[Edge]] = slick {
filterEdgesQuery(id, tag)
.list
.map(slickEdgeToEdge)
.toSet
.right
}
def getEdges(label: Option[Label], idA: Id, tagA: Tag, tagB: Tag): EitherT[GraphM, Error, Set[Edge]] = slick {
filterEdgesQuery(label, idA, tagA, tagB)
.list
.map(slickEdgeToEdge)
.toSet
.right
}
def addEdges(edges: Set[Edge]): EitherT[GraphM, Error, Set[Edge]] = slick {
slickEdges ++= edges.map(edgeToSlickEdge)
edges.right
}
def removeEdges(edges: Set[Edge]): EitherT[GraphM, Error, Set[Edge]] = slick {
val deletedCount = edges.map(filterEdgesQuery).reduce(_++_).delete
if (deletedCount =/= edges.size) Error(s"$deletedCount of ${edges.size} deleted, ${edges}").left
else edges.right
}
def removeEdges(idA: Id, tagA: Tag, tagB: Tag): EitherT[GraphM, Error, Set[Edge]] =
getEdges(None, idA, tagA, tagB).flatMap { edges => slick {
val deletedCount = filterEdgesQuery(idA, tagA, tagB).delete
if (deletedCount =/= edges.size) Error(s"$deletedCount of ${edges.size} deleted, ${edges}").left
else edges.right
}}
}
object SlickGraph {
import scala.slick.driver.PostgresDriver.simple.Tag
case class SlickNode(id: String, tag: String, content: String)
// TODO : migration
// - rename table documents → nodes
// - rename nodes : poid → id
// - rename nodes : type → tag
// - rename edges : name → label
// - rename edges : from → a_id
// - add column a_tag to edges
// - rename edges : to → b_id
// - add column b_tag to edges
class SlickNodes(t: Tag) extends Table[SlickNode](t, "nodes") {
def id = column[String]("id", O.PrimaryKey)
def tag = column[String]("tag")
def content = column[String]("content", DBType("TEXT"))
def * = (id, tag, content) <> (SlickNode.tupled, SlickNode.unapply)
def idxType = index("idx_type", tag)
}
val slickNodes = TableQuery[SlickNodes]
// TODO : update variable names to be in line with Edge
case class SlickEdge(label: Option[String], idA: String, tagA: String, idB: String, tagB: String)
class SlickEdges(t: Tag) extends Table[SlickEdge](t, "edges") {
def label = column[Option[String]]("label")
def idA = column[String]("a_id")
def tagA = column[String]("a_tag")
def idB = column[String]("b_id")
def tagB = column[String]("b_tag")
def * = (label, idA, tagA, idB, tagB) <> (SlickEdge.tupled, SlickEdge.unapply)
// TODO : do we want delete cascade?
def aFk = foreignKey("a_fk", idA, slickNodes)(_.id, onDelete = ForeignKeyAction.Cascade)
def bFk = foreignKey("to_fk", idB, slickNodes)(_.id, onDelete = ForeignKeyAction.Cascade)
def idx = index("idx_all", (label, idA, tagA, idB, tagB), unique = true)
def idxA = index("idx_a", (idA, tagA))
def idxB = index("idx_b", (idB, tagB))
}
val slickEdges = TableQuery[SlickEdges]
def filterEdgesQuery(idA: Id, tagA: core.Tag)
: Query[SlickEdges, SlickEdges#TableElementType, Seq] =
filterEdgesQuery(None, idA, tagA, None, None)
def filterEdgesQuery(idA: Id, tagA: core.Tag, tagB: core.Tag): Query[SlickEdges, SlickEdges#TableElementType, Seq] =
filterEdgesQuery(None, idA, tagA, None, Some(tagB))
def filterEdgesQuery(label: Option[Label], idA: Id, tagA: core.Tag, tagB: core.Tag)
: Query[SlickEdges, SlickEdges#TableElementType, Seq] =
filterEdgesQuery(label, idA, tagA, None, Some(tagB))
def filterEdgesQuery(edge: Edge): Query[SlickEdges, SlickEdges#TableElementType, Seq] =
filterEdgesQuery(edge.label, edge.idA, edge.tagA, Some(edge.idB), Some(edge.tagB))
def filterEdgesQuery(label: Option[Label], idA: Id, tagA: core.Tag, idB: Option[Id], tagB: Option[core.Tag])
: Query[SlickEdges, SlickEdges#TableElementType, Seq] = {
val q1 = slickEdges.filter(e => e.idA === idA.v && e.tagA === tagA.v)
val q2 = label.fold(q1)(l => q1.filter(_.label === label.map(_.v)))
val q3 = idB.fold(q2)(i => q2.filter(_.idB === i.v))
tagB.fold(q3)(t => q3.filter(_.tagB === t.v))
}
def slickNodeToNode[T : NodeManifest](v: SlickNode): Error \\/ Node[T] =
\\/.fromTryCatchNonFatal(v.content.parseJson.convertTo[T](NodeManifest[T].jsonFormat))
.bimap(t => Error(s"unable to parse $v to Node", Some(t)), Node[T](Id(v.id), _))
def nodeToSlickNode[T : NodeManifest](v: Node[T]): SlickNode =
SlickNode(v.id.v, NodeManifest[T].tag.v, v.content.toJson(NodeManifest[T].jsonFormat).compactPrint)
def slickEdgeToEdge(v: SlickEdge): Edge = Edge(v.label.map(Label(_)), Id(v.idA), Tag(v.tagA), Id(v.idB), Tag(v.tagB))
def edgeToSlickEdge(v: Edge): SlickEdge = SlickEdge(v.label.map(_.v), v.idA.v, v.tagA.v, v.idB.v, v.tagB.v)
def slick[T](op: => Error \\/ T): EitherT[GraphM, Error, T] = EitherTGraphM {
\\/.fromTryCatchNonFatal(op).fold(e => Error("unhandled slick error", Some(e)).left, identity)
}
}
| drostron/sylvestris | slick/src/main/scala/sylvestris/slick/SlickGraph.scala | Scala | mit | 7,000 |
/**
* This file is part of the TA Buddy project.
* Copyright (c) 2012-2015 Alexey Aksenov ezh@ezh.msk.ru
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU Affero General Global License version 3
* as published by the Free Software Foundation with the addition of the
* following permission added to Section 15 as permitted in Section 7(a):
* FOR ANY PART OF THE COVERED WORK IN WHICH THE COPYRIGHT IS OWNED
* BY Limited Liability Company «MEZHGALAKTICHESKIJ TORGOVYJ ALIANS»,
* Limited Liability Company «MEZHGALAKTICHESKIJ TORGOVYJ ALIANS» DISCLAIMS
* THE WARRANTY OF NON INFRINGEMENT OF THIRD PARTY RIGHTS.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Affero General Global License for more details.
* You should have received a copy of the GNU Affero General Global License
* along with this program; if not, see http://www.gnu.org/licenses or write to
* the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
* Boston, MA, 02110-1301 USA, or download the license from the following URL:
* http://www.gnu.org/licenses/agpl.html
*
* The interactive user interfaces in modified source and object code versions
* of this program must display Appropriate Legal Notices, as required under
* Section 5 of the GNU Affero General Global License.
*
* In accordance with Section 7(b) of the GNU Affero General Global License,
* you must retain the producer line in every report, form or document
* that is created or manipulated using TA Buddy.
*
* You can be released from the requirements of the license by purchasing
* a commercial license. Buying such a license is mandatory as soon as you
* develop commercial activities involving the TA Buddy software without
* disclosing the source code of your own applications.
* These activities include: offering paid services to customers,
* serving files in a web or/and network application,
* shipping TA Buddy with a closed source product.
*
* For more information, please contact Digimead Team at this
* address: ezh@ezh.msk.ru
*/
package org.digimead.tabuddy.desktop.model.definition.ui.action
import javax.inject.Inject
import org.digimead.digi.lib.aop.log
import org.digimead.digi.lib.log.api.XLoggable
import org.digimead.tabuddy.desktop.core.definition.{ Context, Operation }
import org.digimead.tabuddy.desktop.core.support.App
import org.digimead.tabuddy.desktop.core.ui.definition.widget.{ AppWindow, VComposite }
import org.digimead.tabuddy.desktop.logic.{ Messages ⇒ LogicMessages }
import org.digimead.tabuddy.desktop.logic.operation.OperationModifyTypeSchemaList
import org.digimead.tabuddy.desktop.logic.payload.TypeSchema
import org.digimead.tabuddy.desktop.logic.payload.marker.GraphMarker
import org.digimead.tabuddy.desktop.model.definition.bundleId
import org.eclipse.core.runtime.jobs.Job
import org.eclipse.e4.core.contexts.Active
import org.eclipse.e4.core.di.annotations.Optional
import org.eclipse.jface.action.{ Action ⇒ JFaceAction, IAction }
import org.eclipse.swt.widgets.Event
/**
* Modify type schema list.
*/
class ActionModifyTypeSchemaList @Inject() (windowContext: Context) extends JFaceAction(LogicMessages.types_text) with XLoggable {
setId(ActionModifyTypeSchemaList.id)
/** Flag indicating whether the action is enabled. */
@volatile protected var vContext = Option.empty[Context]
if (windowContext.getLocal(classOf[AppWindow]) == null)
throw new IllegalArgumentException(s"${windowContext} does not contain AppWindow.")
override def isEnabled(): Boolean = super.isEnabled &&
vContext.map { context ⇒ context.getActive(classOf[GraphMarker]) != null }.getOrElse(false)
/** Runs this action, passing the triggering SWT event. */
@log
override def runWithEvent(event: Event) = for {
context ← vContext
marker ← Option(context.get(classOf[GraphMarker]))
} marker.safeRead { state ⇒
val (allTypeSchemas, activeTypeSchema) = App.execNGet { (state.payload.typeSchemas.values.toSet, state.payload.typeSchema.value) }
OperationModifyTypeSchemaList(state.graph, allTypeSchemas, activeTypeSchema).foreach { operation ⇒
val job = if (operation.canRedo())
Some(operation.redoJob())
else if (operation.canExecute())
Some(operation.executeJob())
else
None
job match {
case Some(job) ⇒
job.setPriority(Job.LONG)
job.onComplete(_ match {
case Operation.Result.OK(result, message) ⇒
log.info(s"Operation completed successfully: ${result}")
result.foreach {
case (schemas, activeSchema) ⇒ App.exec {
TypeSchema.save(marker, schemas)
state.payload.typeSchema.value = activeSchema
}
}
case Operation.Result.Cancel(message) ⇒
log.warn(s"Operation canceled, reason: ${message}.")
case other ⇒
log.error(s"Unable to complete operation: ${other}.")
}).schedule()
case None ⇒
throw new RuntimeException(s"Unable to create job for ${operation}.")
}
}
}
/** Update enabled action state. */
@log
protected def updateEnabled() = if (isEnabled)
firePropertyChange(IAction.ENABLED, java.lang.Boolean.FALSE, java.lang.Boolean.TRUE)
else
firePropertyChange(IAction.ENABLED, java.lang.Boolean.TRUE, java.lang.Boolean.FALSE)
/** Invoked on view activation. */
@Inject @Optional
protected def onViewChanged(@Active @Optional vComposite: VComposite, @Active @Optional marker: GraphMarker) =
ActionModifyElementTemplateList.synchronized {
val newContext = {
for {
composite ← Option(vComposite)
marker ← Option(marker)
} yield vComposite.getContext
} getOrElse None
if (newContext != vContext) {
vContext = newContext
App.exec { updateEnabled() }
}
}
}
object ActionModifyTypeSchemaList {
val id = bundleId + "#ModifyTypeSchemaList"
}
| digimead/digi-TABuddy-desktop | part-model-definition/src/main/scala/org/digimead/tabuddy/desktop/model/definition/ui/action/ActionModifyTypeSchemaList.scala | Scala | agpl-3.0 | 6,227 |
package jgo.tools.compiler
package parser
package scoped
import scope._
import interm._
import symbol._
trait GrowablyScoped extends Scoped {
self: Base =>
protected def growable: GrowableScope
//I may change this to have result type Err[Unit] instead of Err[S]
//since the latter is sort of non-cohesive and since it appears that I
//always follow a call to bind with a `withResult ()` or equivalent.
protected def bind[S <: Symbol](name: String, target: S)(implicit pos: Pos): Err[S] =
if (!growable.alreadyDefined(name)) {
growable.put(name, target)
result(target)
}
else
problem("symbol `%s' already defined in current scope", name)
}
| thomasmodeneis/jgo | src/src/main/scala/jgo/tools/compiler/parser/scoped/GrowablyScoped.scala | Scala | gpl-3.0 | 689 |
/**
* Copyright 2011-2016 GatlingCorp (http://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.commons.util
import io.gatling.BaseSpec
import io.gatling.commons.util.PathHelper._
class PathHelperSpec extends BaseSpec {
val root = string2path("foo")
"ancestor" should "throw an IllegalArgumentException when ancestor rank is negative" in {
an[IllegalArgumentException] should be thrownBy root.ancestor(-1)
}
it should "throw an IllegalArgumentException when asked rank > nb of parents" in {
an[IllegalArgumentException] should be thrownBy (root / "bar").ancestor(3)
}
it should "get the parent of rank n otherwiser" in {
(root / "foo" / "bar").ancestor(1) shouldBe (root / "foo")
}
"extension" should "return an empty String when the specified path has no extension" in {
root.extension shouldBe ""
}
it should "return the file extension if the specified path has one" in {
(root / "foo.json").extension shouldBe "json"
}
"hasExtension" should "return true if the file has one of the specified extension, ignoring case" in {
(root / "foo.json").hasExtension("json") shouldBe true
(root / "foo.json").hasExtension("JSON") shouldBe true
(root / "foo.json").hasExtension("sql", "mp3", "JSON") shouldBe true
}
it should "return false if the file has none of the specified extensions" in {
(root / "foo.json").hasExtension("sql") shouldBe false
}
"stripExtension" should "not modify the path if it has no extension" in {
root.stripExtension shouldBe "foo"
}
it should "remove the file extension if the specified path has one" in {
string2path("foo.json").stripExtension shouldBe "foo"
}
}
| ryez/gatling | gatling-commons/src/test/scala/io/gatling/commons/util/PathHelperSpec.scala | Scala | apache-2.0 | 2,219 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.memory
import org.scalatest.PrivateMethodTester
import org.apache.spark.SparkConf
import org.apache.spark.internal.config._
import org.apache.spark.internal.config.Tests._
import org.apache.spark.storage.TestBlockId
import org.apache.spark.storage.memory.MemoryStore
class UnifiedMemoryManagerSuite extends MemoryManagerSuite with PrivateMethodTester {
private val dummyBlock = TestBlockId("--")
private val storageFraction: Double = 0.5
/**
* Make a [[UnifiedMemoryManager]] and a [[MemoryStore]] with limited class dependencies.
*/
private def makeThings(maxMemory: Long): (UnifiedMemoryManager, MemoryStore) = {
val mm = createMemoryManager(maxMemory)
val ms = makeMemoryStore(mm)
(mm, ms)
}
override protected def createMemoryManager(
maxOnHeapExecutionMemory: Long,
maxOffHeapExecutionMemory: Long): UnifiedMemoryManager = {
val conf = new SparkConf()
.set(MEMORY_FRACTION, 1.0)
.set(TEST_MEMORY, maxOnHeapExecutionMemory)
.set(MEMORY_OFFHEAP_SIZE, maxOffHeapExecutionMemory)
.set(MEMORY_STORAGE_FRACTION, storageFraction)
UnifiedMemoryManager(conf, numCores = 1)
}
test("basic execution memory") {
val maxMemory = 1000L
val taskAttemptId = 0L
val (mm, _) = makeThings(maxMemory)
val memoryMode = MemoryMode.ON_HEAP
assert(mm.executionMemoryUsed === 0L)
assert(mm.acquireExecutionMemory(10L, taskAttemptId, memoryMode) === 10L)
assert(mm.executionMemoryUsed === 10L)
assert(mm.acquireExecutionMemory(100L, taskAttemptId, memoryMode) === 100L)
// Acquire up to the max
assert(mm.acquireExecutionMemory(1000L, taskAttemptId, memoryMode) === 890L)
assert(mm.executionMemoryUsed === maxMemory)
assert(mm.acquireExecutionMemory(1L, taskAttemptId, memoryMode) === 0L)
assert(mm.executionMemoryUsed === maxMemory)
mm.releaseExecutionMemory(800L, taskAttemptId, memoryMode)
assert(mm.executionMemoryUsed === 200L)
// Acquire after release
assert(mm.acquireExecutionMemory(1L, taskAttemptId, memoryMode) === 1L)
assert(mm.executionMemoryUsed === 201L)
// Release beyond what was acquired
mm.releaseExecutionMemory(maxMemory, taskAttemptId, memoryMode)
assert(mm.executionMemoryUsed === 0L)
}
test("basic storage memory") {
val maxMemory = 1000L
val (mm, ms) = makeThings(maxMemory)
val memoryMode = MemoryMode.ON_HEAP
assert(mm.storageMemoryUsed === 0L)
assert(mm.acquireStorageMemory(dummyBlock, 10L, memoryMode))
assertEvictBlocksToFreeSpaceNotCalled(ms)
assert(mm.storageMemoryUsed === 10L)
assert(mm.acquireStorageMemory(dummyBlock, 100L, memoryMode))
assertEvictBlocksToFreeSpaceNotCalled(ms)
assert(mm.storageMemoryUsed === 110L)
// Acquire more than the max, not granted
assert(!mm.acquireStorageMemory(dummyBlock, maxMemory + 1L, memoryMode))
assertEvictBlocksToFreeSpaceNotCalled(ms)
assert(mm.storageMemoryUsed === 110L)
// Acquire up to the max, requests after this are still granted due to LRU eviction
assert(mm.acquireStorageMemory(dummyBlock, maxMemory, memoryMode))
assertEvictBlocksToFreeSpaceCalled(ms, 110L)
assert(mm.storageMemoryUsed === 1000L)
assert(evictedBlocks.nonEmpty)
evictedBlocks.clear()
assert(mm.acquireStorageMemory(dummyBlock, 1L, memoryMode))
assertEvictBlocksToFreeSpaceCalled(ms, 1L)
assert(evictedBlocks.nonEmpty)
evictedBlocks.clear()
// Note: We evicted 1 byte to put another 1-byte block in, so the storage memory used remains at
// 1000 bytes. This is different from real behavior, where the 1-byte block would have evicted
// the 1000-byte block entirely. This is set up differently so we can write finer-grained tests.
assert(mm.storageMemoryUsed === 1000L)
mm.releaseStorageMemory(800L, memoryMode)
assert(mm.storageMemoryUsed === 200L)
// Acquire after release
assert(mm.acquireStorageMemory(dummyBlock, 1L, memoryMode))
assertEvictBlocksToFreeSpaceNotCalled(ms)
assert(mm.storageMemoryUsed === 201L)
mm.releaseAllStorageMemory()
assert(mm.storageMemoryUsed === 0L)
assert(mm.acquireStorageMemory(dummyBlock, 1L, memoryMode))
assertEvictBlocksToFreeSpaceNotCalled(ms)
assert(mm.storageMemoryUsed === 1L)
// Release beyond what was acquired
mm.releaseStorageMemory(100L, memoryMode)
assert(mm.storageMemoryUsed === 0L)
}
test("execution evicts storage") {
val maxMemory = 1000L
val taskAttemptId = 0L
val (mm, ms) = makeThings(maxMemory)
val memoryMode = MemoryMode.ON_HEAP
// Acquire enough storage memory to exceed the storage region
assert(mm.acquireStorageMemory(dummyBlock, 750L, memoryMode))
assertEvictBlocksToFreeSpaceNotCalled(ms)
assert(mm.executionMemoryUsed === 0L)
assert(mm.storageMemoryUsed === 750L)
// Execution needs to request 250 bytes to evict storage memory
assert(mm.acquireExecutionMemory(100L, taskAttemptId, memoryMode) === 100L)
assert(mm.executionMemoryUsed === 100L)
assert(mm.storageMemoryUsed === 750L)
assertEvictBlocksToFreeSpaceNotCalled(ms)
// Execution wants 200 bytes but only 150 are free, so storage is evicted
assert(mm.acquireExecutionMemory(200L, taskAttemptId, memoryMode) === 200L)
assert(mm.executionMemoryUsed === 300L)
assert(mm.storageMemoryUsed === 700L)
assertEvictBlocksToFreeSpaceCalled(ms, 50L)
assert(evictedBlocks.nonEmpty)
evictedBlocks.clear()
mm.releaseAllStorageMemory()
require(mm.executionMemoryUsed === 300L)
require(mm.storageMemoryUsed === 0, "bad test: all storage memory should have been released")
// Acquire some storage memory again, but this time keep it within the storage region
assert(mm.acquireStorageMemory(dummyBlock, 400L, memoryMode))
assertEvictBlocksToFreeSpaceNotCalled(ms)
assert(mm.storageMemoryUsed === 400L)
assert(mm.executionMemoryUsed === 300L)
// Execution cannot evict storage because the latter is within the storage fraction,
// so grant only what's remaining without evicting anything, i.e. 1000 - 300 - 400 = 300
assert(mm.acquireExecutionMemory(400L, taskAttemptId, memoryMode) === 300L)
assert(mm.executionMemoryUsed === 600L)
assert(mm.storageMemoryUsed === 400L)
assertEvictBlocksToFreeSpaceNotCalled(ms)
}
test("execution memory requests smaller than free memory should evict storage (SPARK-12165)") {
val maxMemory = 1000L
val taskAttemptId = 0L
val (mm, ms) = makeThings(maxMemory)
val memoryMode = MemoryMode.ON_HEAP
// Acquire enough storage memory to exceed the storage region size
assert(mm.acquireStorageMemory(dummyBlock, 700L, memoryMode))
assertEvictBlocksToFreeSpaceNotCalled(ms)
assert(mm.executionMemoryUsed === 0L)
assert(mm.storageMemoryUsed === 700L)
// SPARK-12165: previously, MemoryStore would not evict anything because it would
// mistakenly think that the 300 bytes of free space was still available even after
// using it to expand the execution pool. Consequently, no storage memory was released
// and the following call granted only 300 bytes to execution.
assert(mm.acquireExecutionMemory(500L, taskAttemptId, memoryMode) === 500L)
assertEvictBlocksToFreeSpaceCalled(ms, 200L)
assert(mm.storageMemoryUsed === 500L)
assert(mm.executionMemoryUsed === 500L)
assert(evictedBlocks.nonEmpty)
}
test("storage does not evict execution") {
val maxMemory = 1000L
val taskAttemptId = 0L
val (mm, ms) = makeThings(maxMemory)
val memoryMode = MemoryMode.ON_HEAP
// Acquire enough execution memory to exceed the execution region
assert(mm.acquireExecutionMemory(800L, taskAttemptId, memoryMode) === 800L)
assert(mm.executionMemoryUsed === 800L)
assert(mm.storageMemoryUsed === 0L)
assertEvictBlocksToFreeSpaceNotCalled(ms)
// Storage should not be able to evict execution
assert(mm.acquireStorageMemory(dummyBlock, 100L, memoryMode))
assert(mm.executionMemoryUsed === 800L)
assert(mm.storageMemoryUsed === 100L)
assertEvictBlocksToFreeSpaceNotCalled(ms)
assert(!mm.acquireStorageMemory(dummyBlock, 250L, memoryMode))
assert(mm.executionMemoryUsed === 800L)
assert(mm.storageMemoryUsed === 100L)
// Do not attempt to evict blocks, since evicting will not free enough memory:
assertEvictBlocksToFreeSpaceNotCalled(ms)
mm.releaseExecutionMemory(maxMemory, taskAttemptId, memoryMode)
mm.releaseStorageMemory(maxMemory, memoryMode)
// Acquire some execution memory again, but this time keep it within the execution region
assert(mm.acquireExecutionMemory(200L, taskAttemptId, memoryMode) === 200L)
assert(mm.executionMemoryUsed === 200L)
assert(mm.storageMemoryUsed === 0L)
assertEvictBlocksToFreeSpaceNotCalled(ms)
// Storage should still not be able to evict execution
assert(mm.acquireStorageMemory(dummyBlock, 750L, memoryMode))
assert(mm.executionMemoryUsed === 200L)
assert(mm.storageMemoryUsed === 750L)
assertEvictBlocksToFreeSpaceNotCalled(ms) // since there were 800 bytes free
assert(!mm.acquireStorageMemory(dummyBlock, 850L, memoryMode))
assert(mm.executionMemoryUsed === 200L)
assert(mm.storageMemoryUsed === 750L)
// Do not attempt to evict blocks, since evicting will not free enough memory:
assertEvictBlocksToFreeSpaceNotCalled(ms)
}
test("small heap") {
val systemMemory = 1024L * 1024
val reservedMemory = 300L * 1024
val memoryFraction = 0.8
val conf = new SparkConf()
.set(MEMORY_FRACTION, memoryFraction)
.set(TEST_MEMORY, systemMemory)
.set(TEST_RESERVED_MEMORY, reservedMemory)
val mm = UnifiedMemoryManager(conf, numCores = 1)
val expectedMaxMemory = ((systemMemory - reservedMemory) * memoryFraction).toLong
assert(mm.maxHeapMemory === expectedMaxMemory)
// Try using a system memory that's too small
val conf2 = conf.clone().set(TEST_MEMORY, reservedMemory / 2)
val exception = intercept[IllegalArgumentException] {
UnifiedMemoryManager(conf2, numCores = 1)
}
assert(exception.getMessage.contains("increase heap size"))
}
test("insufficient executor memory") {
val systemMemory = 1024L * 1024
val reservedMemory = 300L * 1024
val memoryFraction = 0.8
val conf = new SparkConf()
.set(MEMORY_FRACTION, memoryFraction)
.set(TEST_MEMORY, systemMemory)
.set(TEST_RESERVED_MEMORY, reservedMemory)
val mm = UnifiedMemoryManager(conf, numCores = 1)
// Try using an executor memory that's too small
val conf2 = conf.clone().set(EXECUTOR_MEMORY.key, (reservedMemory / 2).toString)
val exception = intercept[IllegalArgumentException] {
UnifiedMemoryManager(conf2, numCores = 1)
}
assert(exception.getMessage.contains("increase executor memory"))
}
test("execution can evict cached blocks when there are multiple active tasks (SPARK-12155)") {
val conf = new SparkConf()
.set(MEMORY_FRACTION, 1.0)
.set(MEMORY_STORAGE_FRACTION, 0.0)
.set(TEST_MEMORY, 1000L)
val mm = UnifiedMemoryManager(conf, numCores = 2)
val ms = makeMemoryStore(mm)
val memoryMode = MemoryMode.ON_HEAP
assert(mm.maxHeapMemory === 1000)
// Have two tasks each acquire some execution memory so that the memory pool registers that
// there are two active tasks:
assert(mm.acquireExecutionMemory(100L, 0, memoryMode) === 100L)
assert(mm.acquireExecutionMemory(100L, 1, memoryMode) === 100L)
// Fill up all of the remaining memory with storage.
assert(mm.acquireStorageMemory(dummyBlock, 800L, memoryMode))
assertEvictBlocksToFreeSpaceNotCalled(ms)
assert(mm.storageMemoryUsed === 800)
assert(mm.executionMemoryUsed === 200)
// A task should still be able to allocate 100 bytes execution memory by evicting blocks
assert(mm.acquireExecutionMemory(100L, 0, memoryMode) === 100L)
assertEvictBlocksToFreeSpaceCalled(ms, 100L)
assert(mm.executionMemoryUsed === 300)
assert(mm.storageMemoryUsed === 700)
assert(evictedBlocks.nonEmpty)
}
test("SPARK-15260: atomically resize memory pools") {
val conf = new SparkConf()
.set(MEMORY_FRACTION, 1.0)
.set(MEMORY_STORAGE_FRACTION, 0.0)
.set(TEST_MEMORY, 1000L)
val mm = UnifiedMemoryManager(conf, numCores = 2)
makeBadMemoryStore(mm)
val memoryMode = MemoryMode.ON_HEAP
// Acquire 1000 then release 600 bytes of storage memory, leaving the
// storage memory pool at 1000 bytes but only 400 bytes of which are used.
assert(mm.acquireStorageMemory(dummyBlock, 1000L, memoryMode))
mm.releaseStorageMemory(600L, memoryMode)
// Before the fix for SPARK-15260, we would first shrink the storage pool by the amount of
// unused storage memory (600 bytes), try to evict blocks, then enlarge the execution pool
// by the same amount. If the eviction threw an exception, then we would shrink one pool
// without enlarging the other, resulting in an assertion failure.
intercept[RuntimeException] {
mm.acquireExecutionMemory(1000L, 0, memoryMode)
}
val assertInvariants = PrivateMethod[Unit](Symbol("assertInvariants"))
mm.invokePrivate[Unit](assertInvariants())
}
test("not enough free memory in the storage pool --OFF_HEAP") {
val conf = new SparkConf()
.set(MEMORY_OFFHEAP_SIZE, 1000L)
.set(TEST_MEMORY, 1000L)
.set(MEMORY_OFFHEAP_ENABLED, true)
val taskAttemptId = 0L
val mm = UnifiedMemoryManager(conf, numCores = 1)
val ms = makeMemoryStore(mm)
val memoryMode = MemoryMode.OFF_HEAP
assert(mm.acquireExecutionMemory(400L, taskAttemptId, memoryMode) === 400L)
assert(mm.storageMemoryUsed === 0L)
assert(mm.executionMemoryUsed === 400L)
// Fail fast
assert(!mm.acquireStorageMemory(dummyBlock, 700L, memoryMode))
assert(mm.storageMemoryUsed === 0L)
assert(mm.acquireStorageMemory(dummyBlock, 100L, memoryMode))
assert(mm.storageMemoryUsed === 100L)
assertEvictBlocksToFreeSpaceNotCalled(ms)
// Borrow 50 from execution memory
assert(mm.acquireStorageMemory(dummyBlock, 450L, memoryMode))
assertEvictBlocksToFreeSpaceNotCalled(ms)
assert(mm.storageMemoryUsed === 550L)
// Borrow 50 from execution memory and evict 50 to free space
assert(mm.acquireStorageMemory(dummyBlock, 100L, memoryMode))
assertEvictBlocksToFreeSpaceCalled(ms, 50)
assert(mm.storageMemoryUsed === 600L)
}
}
| maropu/spark | core/src/test/scala/org/apache/spark/memory/UnifiedMemoryManagerSuite.scala | Scala | apache-2.0 | 15,346 |
/*
* Copyright (c) 2012, 2013, 2014, 2015, 2016 SURFnet BV
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
* following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice, this list of conditions and the following
* disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided with the distribution.
* * Neither the name of the SURFnet BV nor the names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package nl.surfnet.safnari
import nl.surfnet.nsiv2.messages._
import nl.surfnet.nsiv2.persistence.MessageData
import nl.surfnet.nsiv2.soap.Conversion
import play.api.libs.json._
import scala.util.Success
object MessagePersistence {
import MessageData._
import PceMessage.ProviderEndPointFormat
// Json.format doesn't work, so use manual conversion instead.
implicit val FromRequesterFormat = unaryCaseClassFormat("message")(FromRequester.apply, FromRequester.unapply)
implicit val ToRequesterFormat = unaryCaseClassFormat("message")(ToRequester.apply, ToRequester.unapply)
implicit val FromProviderFormat = unaryCaseClassFormat("message")(FromProvider.apply, FromProvider.unapply)
implicit val AckFromProviderFormat = unaryCaseClassFormat("message")(AckFromProvider.apply, AckFromProvider.unapply)
implicit val ToProviderFormat = Json.format[ToProvider]
implicit val FromPceFormat = Json.format[FromPce]
implicit val AckFromPceFormat = Json.format[AckFromPce]
implicit val ToPceFormat = Json.format[ToPce]
implicit val MessageDeliveryFailureFormat = Json.format[MessageDeliveryFailure]
implicit val PassedEndTimeFormat = Json.format[PassedEndTime]
implicit val MessageToMessageData = Conversion.build[Message, MessageData] {
case message @ FromRequester(nsi) => Success(MessageData(Some(nsi.headers.correlationId), "FromRequester", formatJson(message)))
case message @ ToRequester(nsi) => Success(MessageData(Some(nsi.headers.correlationId), "ToRequester", formatJson(message)))
case message @ FromProvider(nsi) => Success(MessageData(Some(nsi.headers.correlationId), "FromProvider", formatJson(message)))
case message @ AckFromProvider(nsi) => Success(MessageData(Some(nsi.headers.correlationId), "ProviderAck", formatJson(message)))
case message @ ToProvider(nsi, _) => Success(MessageData(Some(nsi.headers.correlationId), "ToProvider", formatJson(message)))
case message @ FromPce(pce) => Success(MessageData(Some(pce.correlationId), "FromPce", formatJson(message)))
case message @ AckFromPce(pce) => Success(MessageData(Some(pce.correlationId), "AckFromPce", formatJson(message)))
case message @ ToPce(pce) => Success(MessageData(Some(pce.correlationId), "ToPce", formatJson(message)))
case message: MessageDeliveryFailure => Success(MessageData(Some(message.correlationId), "MessageDeliveryFailure", formatJson(message)))
case message: PassedEndTime => Success(MessageData(Some(message.correlationId), "PassedEndTime", formatJson(message)))
} { serialized =>
serialized.tpe match {
case "FromRequester" => parseJson[FromRequester](serialized.content)
case "ToRequester" => parseJson[ToRequester](serialized.content)
case "FromProvider" => parseJson[FromProvider](serialized.content)
case "ProviderAck" => parseJson[AckFromProvider](serialized.content)
case "ToProvider" => parseJson[ToProvider](serialized.content)
case "AckFromPce" => parseJson[AckFromPce](serialized.content)
case "FromPce" => parseJson[FromPce](serialized.content)
case "ToPce" => parseJson[ToPce](serialized.content)
case "MessageDeliveryFailure" => parseJson[MessageDeliveryFailure](serialized.content)
case "PassedEndTime" => parseJson[PassedEndTime](serialized.content)
}
}
}
| BandwidthOnDemand/nsi-safnari | app/nl/surfnet/safnari/MessagePersistence.scala | Scala | bsd-3-clause | 5,031 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest.funsuite
import org.scalactic.{Resources => _, _}
import org.scalatest._
import scala.concurrent.Future
import Suite.autoTagClassAnnotations
import scala.util.Try
/**
* Implementation trait for class <code>AsyncFunSuite</code>, which represents
* a suite of tests in which each test is represented as a function value.
*
* <p>
* <a href="AsyncFunSuite.html"><code>AsyncFunSuite</code></a> is a class, not a trait,
* to minimize compile time given there is a slight compiler overhead to
* mixing in traits compared to extending classes. If you need to mix the
* behavior of <code>AsyncFunSuite</code> into some other class, you can use this
* trait instead, because class <code>AsyncFunSuite</code> does nothing more than
* extend this trait and add a nice <code>toString</code> implementation.
* </p>
*
* <p>
* See the documentation of the class for a <a href="AsyncFunSuite.html">detailed
* overview of <code>AsyncFunSuite</code></a>.
* </p>
*
* @author Bill Venners
*/
//SCALATESTJS-ONLY @scala.scalajs.reflect.annotation.EnableReflectiveInstantiation
@Finders(Array("org.scalatest.finders.FunSuiteFinder"))
trait AsyncFunSuiteLike extends AsyncTestSuite with AsyncTestRegistration with Informing with Notifying with Alerting with Documenting { thisSuite =>
private final val engine = new AsyncEngine(Resources.concurrentFunSuiteMod, "FunSuite")
import engine._
/**
* Returns an <code>Informer</code> that during test execution will forward strings passed to its
* <code>apply</code> method to the current reporter. If invoked in a constructor, it
* will register the passed string for forwarding later during test execution. If invoked from inside a scope,
* it will forward the information to the current reporter immediately. If invoked from inside a test function,
* it will record the information and forward it to the current reporter only after the test completed, as <code>recordedEvents</code>
* of the test completed event, such as <code>TestSucceeded</code>. If invoked at any other time, it will print to the standard output.
* This method can be called safely by any thread.
*/
protected def info: Informer = atomicInformer.get
/**
* Returns a <code>Notifier</code> that during test execution will forward strings passed to its
* <code>apply</code> method to the current reporter. If invoked in a constructor, it
* will register the passed string for forwarding later during test execution. If invoked while this
* <code>AsyncFunSuite</code> is being executed, such as from inside a test function, it will forward the information to
* the current reporter immediately. If invoked at any other time, it will
* print to the standard output. This method can be called safely by any thread.
*/
protected def note: Notifier = atomicNotifier.get
/**
* Returns an <code>Alerter</code> that during test execution will forward strings passed to its
* <code>apply</code> method to the current reporter. If invoked in a constructor, it
* will register the passed string for forwarding later during test execution. If invoked while this
* <code>AsyncFunSuite</code> is being executed, such as from inside a test function, it will forward the information to
* the current reporter immediately. If invoked at any other time, it will
* print to the standard output. This method can be called safely by any thread.
*/
protected def alert: Alerter = atomicAlerter.get
/**
* Returns a <code>Documenter</code> that during test execution will forward strings passed to its
* <code>apply</code> method to the current reporter. If invoked in a constructor, it
* will register the passed string for forwarding later during test execution. If invoked from inside a scope,
* it will forward the information to the current reporter immediately. If invoked from inside a test function,
* it will record the information and forward it to the current reporter only after the test completed, as <code>recordedEvents</code>
* of the test completed event, such as <code>TestSucceeded</code>. If invoked at any other time, it will print to the standard output.
* This method can be called safely by any thread.
*/
protected def markup: Documenter = atomicDocumenter.get
final def registerAsyncTest(testText: String, testTags: Tag*)(testFun: => Future[compatible.Assertion])(implicit pos: source.Position): Unit = {
engine.registerAsyncTest(testText, transformToOutcome(testFun), Resources.testCannotBeNestedInsideAnotherTest, None, None, pos, testTags: _*)
}
final def registerIgnoredAsyncTest(testText: String, testTags: Tag*)(testFun: => Future[compatible.Assertion])(implicit pos: source.Position): Unit = {
engine.registerIgnoredAsyncTest(testText, transformToOutcome(testFun), Resources.testCannotBeNestedInsideAnotherTest, None, pos, testTags: _*)
}
/**
* Register a test with the specified name, optional tags, and function value that takes no arguments.
* This method will register the test for later execution via an invocation of one of the <code>run</code>
* methods. The passed test name must not have been registered previously on
* this <code>AsyncFunSuite</code> instance.
*
* @param testName the name of the test
* @param testTags the optional list of tags for this test
* @param testFun the test function
* @throws TestRegistrationClosedException if invoked after <code>run</code> has been invoked on this suite
* @throws DuplicateTestNameException if a test with the same name has been registered previously
* @throws NotAllowedException if <code>testName</code> had been registered previously
* @throws NullArgumentException if <code>testName</code> or any passed test tag is <code>null</code>
*/
protected def test(testName: String, testTags: Tag*)(testFun: => Future[compatible.Assertion])(implicit pos: source.Position): Unit = {
engine.registerAsyncTest(testName, transformToOutcome(testFun), Resources.testCannotAppearInsideAnotherTest, None, None, pos, testTags: _*)
}
/**
* Register a test to ignore, which has the specified name, optional tags, and function value that takes no arguments.
* This method will register the test for later ignoring via an invocation of one of the <code>run</code>
* methods. This method exists to make it easy to ignore an existing test by changing the call to <code>test</code>
* to <code>ignore</code> without deleting or commenting out the actual test code. The test will not be run, but a
* report will be sent that indicates the test was ignored. The passed test name must not have been registered previously on
* this <code>AsyncFunSuite</code> instance.
*
* @param testName the name of the test
* @param testTags the optional list of tags for this test
* @param testFun the test function
* @throws TestRegistrationClosedException if invoked after <code>run</code> has been invoked on this suite
* @throws DuplicateTestNameException if a test with the same name has been registered previously
* @throws NotAllowedException if <code>testName</code> had been registered previously
*/
protected def ignore(testName: String, testTags: Tag*)(testFun: => Future[compatible.Assertion])(implicit pos: source.Position): Unit = {
engine.registerIgnoredAsyncTest(testName, transformToOutcome(testFun), Resources.ignoreCannotAppearInsideATest, None, pos, testTags: _*)
}
/**
* An immutable <code>Set</code> of test names. If this <code>AsyncFunSuite</code> contains no tests, this method returns an empty <code>Set</code>.
*
* <p>
* This trait's implementation of this method will return a set that contains the names of all registered tests. The set's iterator will
* return those names in the order in which the tests were registered.
* </p>
*/
override def testNames: Set[String] = {
InsertionOrderSet(atomic.get.testNamesList)
}
/**
* Run a test. This trait's implementation runs the test registered with the name specified by <code>testName</code>.
*
* @param testName the name of one test to run.
* @param args the <code>Args</code> for this run
* @return a <code>Status</code> object that indicates when the test started by this method has completed, and whether or not it failed .
*
* @throws IllegalArgumentException if <code>testName</code> is defined but a test with that name does not exist on this <code>AsyncFunSuite</code>
* @throws NullArgumentException if any of <code>testName</code>, <code>reporter</code>, <code>stopper</code>, or <code>configMap</code>
* is <code>null</code>.
*/
protected override def runTest(testName: String, args: Args): Status = {
def invokeWithAsyncFixture(theTest: TestLeaf, onCompleteFun: Try[Outcome] => Unit): AsyncOutcome = {
val theConfigMap = args.configMap
val testData = testDataFor(testName, theConfigMap)
FutureAsyncOutcome(
withFixture(
new NoArgAsyncTest {
val name = testData.name
def apply(): FutureOutcome = { theTest.testFun().toFutureOutcome }
val configMap = testData.configMap
val scopes = testData.scopes
val text = testData.text
val tags = testData.tags
val pos = testData.pos
}
).underlying,
onCompleteFun
)
}
runTestImpl(thisSuite, testName, args, true, parallelAsyncTestExecution, invokeWithAsyncFixture)
}
/**
* A <code>Map</code> whose keys are <code>String</code> names of tagged tests and whose associated values are
* the <code>Set</code> of tags for the test. If this <code>AsyncFunSuite</code> contains no tags, this method returns an empty <code>Map</code>.
*
* <p>
* This trait's implementation returns tags that were passed as strings contained in <code>Tag</code> objects passed to
* methods <code>test</code> and <code>ignore</code>.
* </p>
*
* <p>
* In addition, this trait's implementation will also auto-tag tests with class level annotations.
* For example, if you annotate <code>@Ignore</code> at the class level, all test methods in the class will be auto-annotated with
* <code>org.scalatest.Ignore</code>.
* </p>
*/
override def tags: Map[String, Set[String]] = autoTagClassAnnotations(atomic.get.tagsMap, this)
/**
* Run zero to many of this <code>AsyncFunSuite</code>'s tests.
*
* @param testName an optional name of one test to run. If <code>None</code>, all relevant tests should be run.
* I.e., <code>None</code> acts like a wildcard that means run all relevant tests in this <code>Suite</code>.
* @param args the <code>Args</code> for this run
* @return a <code>Status</code> object that indicates when all tests started by this method have completed, and whether or not a failure occurred.
*
* @throws NullArgumentException if any of the passed parameters is <code>null</code>.
* @throws IllegalArgumentException if <code>testName</code> is defined, but no test with the specified test name
* exists in this <code>Suite</code>
*/
protected override def runTests(testName: Option[String], args: Args): Status = {
runTestsImpl(thisSuite, testName, args, true, parallelAsyncTestExecution, runTest)
}
override def run(testName: Option[String], args: Args): Status = {
runImpl(thisSuite, testName, args, parallelAsyncTestExecution, super.run)
}
/**
* Registers shared tests.
*
* <p>
* This method enables the following syntax for shared tests in a <code>AsyncFunSuite</code>:
* </p>
*
* <pre class="stHighlight">
* testsFor(nonEmptyStack(lastValuePushed))
* </pre>
*
* <p>
* This method just provides syntax sugar intended to make the intent of the code clearer.
* Because the parameter passed to it is
* type <code>Unit</code>, the expression will be evaluated before being passed, which
* is sufficient to register the shared tests. For examples of shared tests, see the
* <a href="#sharedTests">Shared tests section</a> in the main documentation for this trait.
* </p>
*/
protected def testsFor(unit: Unit): Unit = {}
/**
* Suite style name.
*
* @return `org.scalatest.funsuite.AsyncFunSuite`
*/
final override val styleName: String = "org.scalatest.funsuite.AsyncFunSuite"
// Inherits scaladoc
override def testDataFor(testName: String, theConfigMap: ConfigMap = ConfigMap.empty): TestData = createTestDataFor(testName, theConfigMap, this)
}
| dotty-staging/scalatest | scalatest/src/main/scala/org/scalatest/funsuite/AsyncFunSuiteLike.scala | Scala | apache-2.0 | 13,330 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.h2o.sparkling.backend.utils
object ProgressBar {
private final val BarLength = 50
private final val CarriageReturn = "\\r"
private final val Empty = "_"
private final val Filled = "█"
private final val Side = "|"
private[sparkling] def printProgressBar(progress: Float, leaveTheProgressBarVisible: Boolean = false): Unit = {
val crOrNewline = if (leaveTheProgressBarVisible) System.lineSeparator else CarriageReturn
print(CarriageReturn + renderProgressBar(progress) + crOrNewline)
}
private[sparkling] def renderProgressBar(progress: Float): String = {
val filledPartLength = (progress * BarLength).ceil.toInt
val filledPart = Filled * filledPartLength
val emptyPart = Empty * (BarLength - filledPartLength)
val progressPercent = (progress * 100).toInt
val percentagePart = s" $progressPercent%"
Side + filledPart + emptyPart + Side + percentagePart
}
}
| h2oai/sparkling-water | core/src/main/scala/ai/h2o/sparkling/backend/utils/ProgressBar.scala | Scala | apache-2.0 | 1,723 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bigtop.bigpetstore.spark.analytics
import java.io.File
import java.sql.Timestamp
import scala.Nothing
import org.apache.spark.sql._
import org.apache.spark.{SparkContext, SparkConf}
import org.apache.spark.SparkContext._
import org.apache.spark.rdd._
import org.joda.time.DateTime
import org.json4s.JsonDSL.WithBigDecimal._
import org.apache.bigtop.bigpetstore.spark.datamodel._
object PetStoreStatistics {
private def printUsage() {
val usage: String = "BigPetStore Analytics Module." +
"\\n" +
"Usage: spark-submit ... inputDir outputFile\\n " +
"inputDir - (string) Path to ETL'd data\\n" +
"outputFile - (string) is a JSON file. For schema, see the code.\\n"
System.err.println(usage)
}
/**
* Scala details. Some or None are an idiomatic way, in scala, to
* return an optional value. This allows us to signify, to the caller, that the
* method may fail. The caller can decide how to deal with failure (i.e. using getOrElse).
* @param args
* @return
*/
def parseArgs(args: Array[String]):(Option[String],Option[String]) = {
if(args.length < 1) {
(None, None)
} else if (args.length == 1) {
(Some(args(0)), None)
} else {
(Some(args(0)), Some(args(1)))
}
}
def productMap(r:Array[Product]) : Map[Long,Product] = {
r map (prod => prod.productId -> prod) toMap
}
def queryTxByMonth(sqlContext: SQLContext): Array[StatisticsTxByMonth] = {
import sqlContext._
val results: SchemaRDD = sql("SELECT count(*), month FROM Transactions GROUP BY month")
val transactionsByMonth = results.collect()
for(x<-transactionsByMonth){
println(x)
}
transactionsByMonth.map { r =>
StatisticsTxByMonth(r.getInt(1), r.getLong(0))
}
}
def queryTxByProductZip(sqlContext: SQLContext): Array[StatisticsTxByProductZip] = {
import sqlContext._
val results: SchemaRDD = sql(
"""SELECT count(*) c, productId, zipcode
FROM Transactions t
JOIN Stores s ON t.storeId = s.storeId
GROUP BY productId, zipcode""")
val groupedProductZips = results.collect()
//get list of all transactionsData
for(x<-groupedProductZips){
println("grouped product:zip " + x)
}
//Map JDBC Row into a Serializable case class.
groupedProductZips.map { r =>
StatisticsTxByProductZip(r.getLong(1),r.getString(2),r.getLong(0))
}
}
def queryTxByProduct(sqlContext: SQLContext): Array[StatisticsTxByProduct] = {
import sqlContext._
val results: SchemaRDD = sql(
"""SELECT count(*) c, productId FROM Transactions GROUP BY productId""")
val groupedProducts = results.collect()
//Map JDBC Row into a Serializable case class.
groupedProducts.map { r =>
StatisticsTxByProduct(r.getLong(1),r.getLong(0))
}
}
def runQueries(r:(RDD[Location], RDD[Store], RDD[Customer], RDD[Product],
RDD[Transaction]), sc: SparkContext): Statistics = {
val sqlContext = new org.apache.spark.sql.SQLContext(sc)
import sqlContext._
// Transform the Non-SparkSQL Calendar into a SparkSQL-friendly field.
val mappableTransactions:RDD[TransactionSQL] =
r._5.map { trans => trans.toSQL() }
r._1.registerTempTable("Locations")
r._2.registerTempTable("Stores")
r._3.registerTempTable("Customers")
r._4.registerTempTable("Product")
mappableTransactions.registerTempTable("Transactions")
val txByMonth = queryTxByMonth(sqlContext)
val txByProduct = queryTxByProduct(sqlContext)
val txByProductZip = queryTxByProductZip(sqlContext)
return Statistics(
txByMonth.map { s => s.count }.reduce(_+_), // Total number of transactions
txByMonth,
txByProduct,
txByProductZip,
r._4.collect()) // Product details
}
/**
* We keep a "run" method which can be called easily from tests and also is used by main.
*/
def run(txInputDir:String, statsOutputFile:String,
sc:SparkContext) {
System.out.println("Running w/ input = " + txInputDir)
System.out.println("input : " + txInputDir)
val etlData = IOUtils.load(sc, txInputDir)
val stats = runQueries(etlData, sc)
IOUtils.saveLocalAsJSON(new File(statsOutputFile), stats)
System.out.println("Output JSON Stats stored : " + statsOutputFile)
}
def main(args: Array[String]) {
// Get or else : On failure (else) we exit.
val (inputPath,outputPath) = parseArgs(args)
if(! (inputPath.isDefined && outputPath.isDefined)) {
printUsage()
System.exit(1)
}
val sc = new SparkContext(new SparkConf().setAppName("PetStoreStatistics"))
run(inputPath.get, outputPath.get, sc)
sc.stop()
}
}
| jagatsingh/bigtop | bigtop-bigpetstore/bigpetstore-spark/src/main/scala/org/apache/bigpetstore/spark/analytics/PetStoreStatistics.scala | Scala | apache-2.0 | 5,546 |
// Copyright (C) 2011-2012 the original author or authors.
// See the LICENCE.txt file distributed with this work for additional
// information regarding copyright ownership.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package org.scalastyle.file
import org.junit.Test
import org.scalatest.junit.AssertionsForJUnit
// scalastyle:off magic.number multiple.string.literals
class FileLineLengthCheckerTest extends AssertionsForJUnit with CheckerTest {
val key = "line.size.limit"
val classUnderTest = classOf[FileLineLengthChecker]
@Test def testNoMax(): Unit = {
val source = """
package foobar
object Foobar {
}
"""
assertErrors(List(), source, Map("maxLineLength" -> "20"))
}
@Test def testWithOneMax(): Unit = {
val source = """
package foobar
object Foobar {
}
"""
assertErrors(List(lineError(4, List("15"))), source, Map("maxLineLength" -> "15"))
}
@Test def testWithImports(): Unit = {
val source = """
package foobar
import org.scalastyle.file.SuperLongImportClass
object Foobar {
import org.scalastyle.file._
}
"""
assertErrors(
List(lineError(5, List("15"))),
source,
Map("maxLineLength" -> "15", "ignoreImports" -> "true"))
assertErrors(
List(lineError(3, List("15")), lineError(5, List("15")), lineError(6, List("15"))),
source,
Map("maxLineLength" -> "15"))
}
@Test def testWithTwoMax(): Unit = {
val source = """
package foobar
object Foobar {
}
object Barbar {
}
"""
assertErrors(List(lineError(4, List("15")), lineError(6, List("15"))), source, Map("maxLineLength" -> "15"))
}
@Test def testWithSpacesTabs(): Unit = {
val source = """
package foobar
import# #java.lang._
object Barbar {
}
""".replaceAll("#","\t")
assertErrors(List(lineError(4, List("14")), lineError(5, List("14"))), source, Map("maxLineLength" -> "14"))
}
}
| scalastyle/scalastyle | src/test/scala/org/scalastyle/file/FileLineLengthCheckerTest.scala | Scala | apache-2.0 | 2,401 |
package sampler.io
import java.time.LocalDateTime
import play.api.libs.json._
import scala.sys.process._
object Meta extends Meta
trait Meta {
implicit class JsonMeta(json: JsValue) {
def addSystemMeta(): MetaBuilder = {
MetaBuilder.init(json).addSystemMeta()
}
def add(key: String, value: JsValue): MetaBuilder = {
MetaBuilder.init(json).add(key, value)
}
def addUpstreamFrom(arr: JsValue): MetaBuilder = {
MetaBuilder.init(json).addUpstream(json)
}
}
case class MetaBuilder(metaMap: JsObject, json: JsValue, upstream: JsArray = JsArray.empty) {
def add(key: String, value: String): MetaBuilder = add(key, JsString(value))
def add(key: String, value: JsValue): MetaBuilder = copy(metaMap = metaMap.+(key -> value))
def addSystemMeta() = add("date", LocalDateTime.now.toString)
.add("hostname", "hostname".!!.takeWhile(_ != System.lineSeparator.charAt(0)))
.add("username", System.getProperty("user.name"))
.add("simplified-stack", {
val distinctStringMatcher: PartialFunction[Seq[String], String] = {
case Seq(a, b) if a != b => b
}
val simpleStack: Seq[JsString] = Thread.currentThread
.getStackTrace
.toSeq
.map(_.getClassName.takeWhile(_ != '$'))
.sliding(2)
.collect(distinctStringMatcher)
.map(JsString)
.toSeq
JsArray(simpleStack)
})
def addProject(project: String): MetaBuilder = add("project", project)
def addTask(name: String): MetaBuilder = add("task", name)
def addKind(text: String): MetaBuilder = add("kind", text)
def addUpstream(json: JsValue): MetaBuilder = {
val breadcrumbPicker = (__ \\ "breadcrumbs").json.pick[JsObject]
json.validate(breadcrumbPicker).fold(
// TODO, warn, don't fail
invalid = _ => throw new AssertionError("No breadcrumbs found in provided document: "+json),
valid = breadcrumbs => this.copy(upstream = upstream.append(breadcrumbs))
)
}
def build(): JsValue = {
val allMeta = metaMap.+("upstream", upstream)
val putter: Reads[JsObject] = {
(__).json.update(
__.read[JsObject].map(root => JsObject(
// Huh? Despite the meta being at the start, it always gets put at the end?!
("breadcrumbs", allMeta.as[JsValue]) +: root.as[JsObject].fields
))
)
}
json.transform(putter).get
// JsObject {
// val withMeta = json.transform(putter).get
//
// //temporary hack to put meta at the top
// val fieldsAsMap: Map[String, JsValue] = withMeta.fields.toMap
// val meta: JsValue = fieldsAsMap("meta")
// ("meta", meta) +: fieldsAsMap.-("meta").toSeq
// }
}
}
object MetaBuilder{
def init(json: JsValue): MetaBuilder = {
// Assume that the JSON we are adding meta to has no existing meta
assume(
json.validate( (__ \\ 'meta).json.pick ).isError,
"can't add meta to a document that already contains meta"
)
MetaBuilder(JsObject.empty, json)
}
}
} | tearne/Sampler | sampler-core/src/main/scala/sampler/io/Meta.scala | Scala | apache-2.0 | 3,188 |
package jerimum
import br.edu.ifrn.potigol.Potigolutil.{ Inteiro, Lista }
case class Animacao(velocidade: Inteiro, imagens: Lista[Imagem]) {
private[this] val inicio = System.currentTimeMillis()
private[this] val tamanho = imagens.length
def imagem(): Imagem = {
val indice = ((System.currentTimeMillis() - inicio) / velocidade % tamanho).toInt
imagens(indice)
}
} | potigol/Jerimum | src/main/scala/jerimum/Animacao.scala | Scala | mit | 383 |
package com.blrest.model
/**
* Created by ctcarrier on 12/25/13.
*/
object Neo4jRestMethod {
val POST = "POST"
val GET = "GET"
}
object Neo4jRestTo {
val NODE = "/node"
val LABEL = "/node/101/labels"
}
case class Neo4jRestTask(method: String, to: String, id: Int, body: AnyRef)
| ctcarrier/bl-rest | src/main/scala/com/blrest/model/Neo4jRestTask.scala | Scala | mit | 294 |
package aima.core.search.contingency
import aima.core.fp.Show
import scala.annotation.tailrec
import scala.reflect.ClassTag
/**
*
* <pre>
* <code>
* function AND-OR-GRAPH-SEARCH(problem) returns a conditional plan, or failure
* OR-SEARCH(problem.INITIAL-STATE, problem, [])
*
* ---------------------------------------------------------------------------------
*
* function OR-SEARCH(state, problem, path) returns a conditional plan, or failure
* if problem.GOAL-TEST(state) then return the empty plan
* if state is on path then return failure
* for each action in problem.ACTIONS(state) do
* plan <- AND-SEARCH(RESULTS(state, action), problem, [state | path])
* if plan != failure then return [action | plan]
* return failure
*
* ---------------------------------------------------------------------------------
*
* function AND-SEARCH(states, problem, path) returns a conditional plan, or failure
* for each s<sub>i</sub> in states do
* plan<sub>i</sub> <- OR-SEARCH(s<sub>i</sub>, problem, path)
* if plan<sub>i</sub> = failure then return failure
* return [if s<sub>1</sub> then plan<sub>1</sub> else if s<sub>2</sub> then plan<sub>2</sub> else ... if s<sub>n-1</sub> then plan<sub>n-1</sub> else plan<sub>n</sub>]
* </code>
* </pre>
*
* @author Shawn Garner
*/
trait AndOrGraphSearch[ACTION, STATE] {
implicit val aCT: ClassTag[ACTION]
implicit val sCT: ClassTag[STATE]
def andOrGraphSearch(problem: NondeterministicProblem[ACTION, STATE]): ConditionPlanResult =
orSearch(problem.initialState(), problem, Nil)
def orSearch(
state: STATE,
problem: NondeterministicProblem[ACTION, STATE],
path: List[STATE]
): ConditionPlanResult = {
if (problem.isGoalState(state)) {
ConditionalPlan.emptyPlan
} else if (path.contains(state)) {
ConditionalPlanningFailure
} else {
val statePlusPath = state :: path
val actions: List[ACTION] = problem.actions(state)
@tailrec def recurse(a: List[ACTION]): ConditionPlanResult = a match {
case Nil => ConditionalPlanningFailure
case action :: rest =>
andSearch(problem.results(state, action), problem, statePlusPath) match {
case conditionalPlan: ConditionalPlan => newPlan(action, conditionalPlan)
case ConditionalPlanningFailure => recurse(rest)
}
}
recurse(actions)
}
}
def andSearch(
states: List[STATE],
problem: NondeterministicProblem[ACTION, STATE],
path: List[STATE]
): ConditionPlanResult = {
@tailrec def recurse(currentStates: List[STATE], acc: List[(STATE, ConditionalPlan)]): ConditionPlanResult =
currentStates match {
case Nil => newPlan(acc)
case si :: rest =>
orSearch(si, problem, path) match {
case ConditionalPlanningFailure => ConditionalPlanningFailure
case plani: ConditionalPlan => recurse(rest, acc :+ (si -> plani))
}
}
recurse(states, List.empty)
}
def newPlan(l: List[(STATE, ConditionalPlan)]): ConditionalPlan = l match {
case (_, cp: ConditionalPlan) :: Nil => cp
case ls => ConditionalPlan(ls.map(statePlan => ConditionedSubPlan(statePlan._1, statePlan._2)))
}
def newPlan(action: ACTION, plan: ConditionalPlan): ConditionalPlan =
ConditionalPlan(ActionStep(action) :: plan.steps)
}
sealed trait Step
final case class ActionStep[ACTION: ClassTag](action: ACTION) extends Step
final case class ConditionedSubPlan[STATE: ClassTag](state: STATE, subPlan: ConditionalPlan) extends Step
sealed trait ConditionPlanResult
case object ConditionalPlanningFailure extends ConditionPlanResult
final case class ConditionalPlan(steps: List[Step]) extends ConditionPlanResult
object ConditionalPlan {
val emptyPlan = ConditionalPlan(List.empty)
object Implicits {
import Show.Implicits._
implicit def showConditionalPlan[STATE: ClassTag: Show, ACTION: ClassTag: Show]: Show[ConditionalPlan] =
new Show[ConditionalPlan] {
override def show(conditionalPlan: ConditionalPlan): String = {
@tailrec def recurse(steps: List[Step], acc: String, lastStepAction: Boolean): String = steps match {
case Nil => acc
case ActionStep(a: ACTION) :: Nil => recurse(Nil, acc + a.show, true)
case ActionStep(a: ACTION) :: rest => recurse(rest, acc + a.show + ", ", true)
case ConditionedSubPlan(state: STATE, subPlan) :: rest if lastStepAction =>
recurse(rest, acc + s"if State = ${state.show} then ${show(subPlan)}", false)
case ConditionedSubPlan(_, subPlan) :: Nil =>
recurse(Nil, acc + s" else ${show(subPlan)}", false)
case ConditionedSubPlan(_, subPlan) :: ActionStep(a) :: rest =>
recurse(ActionStep(a) :: rest, acc + s" else ${show(subPlan)}", false)
case ConditionedSubPlan(state: STATE, subPlan) :: rest =>
recurse(rest, acc + s" else if State = ${state.show} then ${show(subPlan)}", false)
}
recurse(conditionalPlan.steps, "[", true) + "]"
}
}
}
}
trait NondeterministicProblem[ACTION, STATE] {
def initialState(): STATE
def actions(s: STATE): List[ACTION]
def results(s: STATE, a: ACTION): List[STATE]
def isGoalState(s: STATE): Boolean
def stepCost(s: STATE, a: ACTION, childPrime: STATE): Double
}
| aimacode/aima-scala | core/src/main/scala/aima/core/search/contingency/AndOrGraphSearch.scala | Scala | mit | 5,574 |
/***********************************************************************
* Copyright (c) 2013-2018 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.raster.iterators
import org.apache.accumulo.core.data.{Key, Value}
import org.apache.accumulo.core.iterators.{IteratorEnvironment, SortedKeyValueIterator}
/**
* This iterator returns as its nextKey and nextValue responses the key and value
* from the DATA iterator, not from the INDEX iterator. The assumption is that
* the data rows are what we care about; that we do not care about the index
* rows that merely helped us find the data rows quickly.
*
* The other trick to remember about iterators is that they essentially pre-fetch
* data. "hasNext" really means, "was there a next record that you already found".
*/
class SpatioTemporalIntersectingIterator
extends GeomesaFilteringIterator
with HasFeatureType
with SetTopUnique
with SetTopFilterUnique
with SetTopTransformUnique
with SetTopFilterTransformUnique {
var setTopOptimized: (Key) => Unit = null
override def init(source: SortedKeyValueIterator[Key, Value],
options: java.util.Map[String, String],
env: IteratorEnvironment) = {
super.init(source, options, env)
initFeatureType(options)
init(featureType, options)
// pick the execution path once based on the filters and transforms we need to apply
// see org.locationtech.geomesa.core.iterators.IteratorFunctions
setTopOptimized = (filter, transform, checkUniqueId) match {
case (null, null, null) => setTopInclude
case (null, null, _) => setTopUnique
case (_, null, null) => setTopFilter
case (_, null, _) => setTopFilterUnique
case (null, _, null) => setTopTransform
case (null, _, _) => setTopTransformUnique
case (_, _, null) => setTopFilterTransform
case (_, _, _) => setTopFilterTransformUnique
}
}
override def setTopConditionally(): Unit = setTopOptimized(source.getTopKey)
}
| jahhulbert-ccri/geomesa | geomesa-accumulo/geomesa-accumulo-raster/src/main/scala/org/locationtech/geomesa/raster/iterators/SpatioTemporalIntersectingIterator.scala | Scala | apache-2.0 | 2,395 |
package org.genericConfig.admin.controllers
import akka.actor.ActorSystem
import akka.stream.Materializer
import javax.inject._
import org.genericConfig.admin.controllers.websocket.{ClientActor, WebClientsMgr}
import play.api.libs.json.JsValue
import play.api.libs.streams.ActorFlow
import play.api.mvc._
/**
* This controller creates an `Action` to handle HTTP requests to the
* application's home page.
*/
@Singleton
class HomeController @Inject()(cc: ControllerComponents) (implicit actorSystem: ActorSystem, mat: Materializer) extends AbstractController(cc) {
/**
* Create an Action to render an HTML page.
*
* The configuration in the `routes` file means that this method
* will be called when the application receives a `GET` request with
* a path of `/`.
*/
def index() = Action { implicit request: Request[AnyContent] =>
Ok(views.html.index(""))
}
val webClientsMgr = actorSystem.actorOf(WebClientsMgr.props(), "WebClientsMgr")
def websocket: WebSocket = WebSocket.accept[JsValue, JsValue] { request =>
ActorFlow.actorRef { out =>
ClientActor.props(out, webClientsMgr)
}
}
}
| gennadij/admin | server/app/org/genericConfig/admin/controllers/HomeController.scala | Scala | apache-2.0 | 1,144 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql
import java.math.BigDecimal
import java.sql.Timestamp
import org.apache.spark.sql.test.SharedSQLContext
/**
* A test suite for functions added for compatibility with other databases such as Oracle, MSSQL.
*
* These functions are typically implemented using the trait
* [[org.apache.spark.sql.catalyst.expressions.RuntimeReplaceable]].
*/
class SQLCompatibilityFunctionSuite extends QueryTest with SharedSQLContext {
test("ifnull") {
checkAnswer(
sql("SELECT ifnull(null, 'x'), ifnull('y', 'x'), ifnull(null, null)"),
Row("x", "y", null))
// Type coercion
checkAnswer(
sql("SELECT ifnull(1, 2.1d), ifnull(null, 2.1d)"),
Row(1.0, 2.1))
}
test("nullif") {
checkAnswer(
sql("SELECT nullif('x', 'x'), nullif('x', 'y')"),
Row(null, "x"))
// Type coercion
checkAnswer(
sql("SELECT nullif(1, 2.1d), nullif(1, 1.0d)"),
Row(1.0, null))
}
test("nvl") {
checkAnswer(
sql("SELECT nvl(null, 'x'), nvl('y', 'x'), nvl(null, null)"),
Row("x", "y", null))
// Type coercion
checkAnswer(
sql("SELECT nvl(1, 2.1d), nvl(null, 2.1d)"),
Row(1.0, 2.1))
}
test("nvl2") {
checkAnswer(
sql("SELECT nvl2(null, 'x', 'y'), nvl2('n', 'x', 'y'), nvl2(null, null, null)"),
Row("y", "x", null))
// Type coercion
checkAnswer(
sql("SELECT nvl2(null, 1, 2.1d), nvl2('n', 1, 2.1d)"),
Row(2.1, 1.0))
}
test("SPARK-16730 cast alias functions for Hive compatibility") {
checkAnswer(
sql("SELECT boolean(1), tinyint(1), smallint(1), int(1), bigint(1)"),
Row(true, 1.toByte, 1.toShort, 1, 1L))
checkAnswer(
sql("SELECT float(1), double(1), decimal(1)"),
Row(1.toFloat, 1.0, new BigDecimal(1)))
checkAnswer(
sql("SELECT date(\\"2014-04-04\\"), timestamp(date(\\"2014-04-04\\"))"),
Row(new java.util.Date(114, 3, 4), new Timestamp(114, 3, 4, 0, 0, 0, 0)))
checkAnswer(
sql("SELECT string(1)"),
Row("1"))
// Error handling: only one argument
val errorMsg = intercept[AnalysisException](sql("SELECT string(1, 2)")).getMessage
assert(errorMsg.contains("Function string accepts only one argument"))
}
}
| gioenn/xSpark | sql/core/src/test/scala/org/apache/spark/sql/SQLCompatibilityFunctionSuite.scala | Scala | apache-2.0 | 3,041 |
package chandu0101.scalajs.react.components.materialui
import chandu0101.macros.tojs.JSMacro
import japgolly.scalajs.react._
import materialui.Mui
import scala.scalajs.js
case class MuiSlider(onBlur : js.UndefOr[js.Function] = js.undefined ,
name : String ,
onDragStart : js.UndefOr[js.Function] = js.undefined ,
step : js.UndefOr[Int] = js.undefined,
style : js.UndefOr[js.Any] = js.undefined,
description : js.UndefOr[String] = js.undefined,
onChange : js.UndefOr[(ReactEventH,Double) => Unit] = js.undefined,
min : js.UndefOr[Double] = js.undefined,
ref : js.UndefOr[String] = js.undefined,
key : js.UndefOr[String] = js.undefined,
onDragStop : js.UndefOr[js.Function] = js.undefined ,
max : js.UndefOr[Double] = js.undefined,
error : js.UndefOr[String] = js.undefined,
onFocus : js.UndefOr[js.Function] = js.undefined ,
disabled : js.UndefOr[Boolean]=js.undefined,
required : js.UndefOr[Boolean]=js.undefined,
defaultValue : js.UndefOr[Double] = js.undefined,
value : js.UndefOr[Double] = js.undefined) {
def apply() = {
val props = JSMacro[MuiSlider](this)
val f = React.asInstanceOf[js.Dynamic].createFactory(Mui.Slider)
f(props).asInstanceOf[ReactComponentU_]
}
}
| mproch/scalajs-react-components | core/src/main/scala/chandu0101/scalajs/react/components/materialui/MuiSlider.scala | Scala | apache-2.0 | 1,397 |
/*
* Copyright (C) 2005, The Beangle Software.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.beangle.ems.ws.user
import org.beangle.commons.collection.{Collections, Properties}
import org.beangle.data.dao.{EntityDao, OqlBuilder}
import org.beangle.web.action.support.{ActionSupport, EntitySupport}
import org.beangle.web.action.annotation.{mapping, param, response}
import org.beangle.ems.core.config.model.{App, AppType}
import org.beangle.ems.core.config.service.DomainService
import org.beangle.ems.core.security.model.FuncPermission
import org.beangle.ems.core.user.model.{Root, User}
import org.beangle.ems.core.user.service.UserService
/**
* @author chaostone
*/
class AppWS(userService: UserService, entityDao: EntityDao) extends ActionSupport with EntitySupport[User] {
var domainService: DomainService = _
@response(cacheable = true)
@mapping("{userCode}")
def index(@param("userCode") userCode: String): collection.Seq[Properties] = {
userService.get(userCode) match {
case Some(user) =>
val domain = domainService.getDomain
val fpAppQuery = OqlBuilder.from[App](classOf[FuncPermission].getName, "fp")
.join("fp.role.members", "m")
.where("m.user=:user and m.member=true", user)
.where("fp.resource.app.enabled=true")
.where("fp.resource.app.domain=:domain", domain)
.where(s"fp.resource.app.appType.name='${AppType.Webapp}'")
.select("distinct fp.resource.app").cacheable()
val fpApps = entityDao.search(fpAppQuery)
val apps = Collections.newSet[App]
apps ++= fpApps
val rootsQuery = OqlBuilder.from(classOf[Root], "root")
.where("root.app.domain=:domain", domain)
.where(s"root.user=:user and root.app.enabled=true and root.app.appType.name='${AppType.Webapp}'", user)
.cacheable()
val roots = entityDao.search(rootsQuery)
apps ++= roots.map(a => a.app)
var appBuffer = apps.toBuffer.sorted
get("q") foreach { q =>
appBuffer = appBuffer.filter(a => a.title.contains(q))
}
appBuffer.map { app =>
val p = new Properties(app, "id", "name", "title", "base", "url", "logoUrl", "navStyle")
p.add("group", app.group, "id", "name", "title")
p
}
case None => Seq.empty
}
}
}
| beangle/ems | service/src/main/scala/org/beangle/ems/ws/user/AppWS.scala | Scala | lgpl-3.0 | 2,991 |
/*
* Copyright 2014–2018 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.frontend.logicalplan
import slamdata.Predef._
import quasar._, RenderTree.ops._
import quasar.common.{JoinType, SortDir}
import quasar.common.data.Data
import quasar.contrib.pathy.{FPath, refineTypeAbs}
import quasar.contrib.shapeless._
import quasar.fp._
import quasar.fp.binder._
import quasar.time.TemporalPart
import scala.Symbol
import scala.Predef.$conforms
import matryoshka._
import matryoshka.implicits._
import scalaz._, Scalaz._
import shapeless.{Nat, Sized}
import pathy.Path.posixCodec
sealed abstract class LogicalPlan[A] extends Product with Serializable {
// TODO this should be removed, but usage of `==` is so pervasive in
// external dependencies (and scalac) that removal may never be possible
override def equals(that: scala.Any): Boolean = that match {
case lp: LogicalPlan[A] => LogicalPlan.equal(Equal.equalA[A]).equal(this, lp)
case _ => false
}
}
final case class Read[A](path: FPath) extends LogicalPlan[A]
final case class Constant[A](data: Data) extends LogicalPlan[A]
final case class Invoke[N <: Nat, A](func: GenericFunc[N], values: Func.Input[A, N])
extends LogicalPlan[A]
// TODO we create a custom `unapply` to bypass a scalac pattern matching bug
// https://issues.scala-lang.org/browse/SI-5900
object InvokeUnapply {
def unapply[N <: Nat, A](in: Invoke[N, A])
: Some[(GenericFunc[N], Func.Input[A, N])] =
Some((in.func, in.values))
}
final case class JoinSideName[A](name: Symbol) extends LogicalPlan[A]
final case class JoinCondition[A](leftName: Symbol, rightName: Symbol, value: A)
final case class Join[A](left: A, right: A, tpe: JoinType, condition: JoinCondition[A])
extends LogicalPlan[A]
final case class Free[A](name: Symbol) extends LogicalPlan[A]
final case class Let[A](let: Symbol, form: A, in: A) extends LogicalPlan[A]
final case class Sort[A](src: A, order: NonEmptyList[(A, SortDir)])
extends LogicalPlan[A]
final case class TemporalTrunc[A](part: TemporalPart, src: A) extends LogicalPlan[A]
object LogicalPlan {
import quasar.std.StdLib._
implicit val traverse: Traverse[LogicalPlan] =
new Traverse[LogicalPlan] {
def traverseImpl[G[_], A, B](
fa: LogicalPlan[A])(
f: A => G[B])(
implicit G: Applicative[G]):
G[LogicalPlan[B]] =
fa match {
case Read(coll) => G.point(Read(coll))
case Constant(data) => G.point(Constant(data))
case Invoke(func, values) => values.traverse(f).map(Invoke(func, _))
case JoinSideName(v) => G.point(JoinSideName(v))
case Join(l, r, tpe, JoinCondition(lName, rName, cond)) =>
(f(l) ⊛ f(r) ⊛ f(cond))((v1, v2, v3) => Join(v1, v2, tpe, JoinCondition(lName, rName, v3)))
case Free(v) => G.point(Free(v))
case Let(ident, form, in) => (f(form) ⊛ f(in))(Let(ident, _, _))
case Sort(src, ords) =>
(f(src) ⊛ ords.traverse { case (a, d) => f(a) strengthR d })(Sort(_, _))
case TemporalTrunc(part, src) => f(src) ∘ (TemporalTrunc(part, _))
}
override def map[A, B](v: LogicalPlan[A])(f: A => B): LogicalPlan[B] =
v match {
case Read(coll) => Read(coll)
case Constant(data) => Constant(data)
case Invoke(func, values) => Invoke(func, values.map(f))
case JoinSideName(v) => JoinSideName(v)
case Join(l, r, tpe, JoinCondition(lName, rName, cond)) =>
Join(f(l), f(r), tpe, JoinCondition(lName, rName, f(cond)))
case Free(v) => Free(v)
case Let(ident, form, in) => Let(ident, f(form), f(in))
case Sort(src, ords) => Sort(f(src), ords map (f.first))
case TemporalTrunc(part, src) => TemporalTrunc(part, f(src))
}
override def foldMap[A, B](fa: LogicalPlan[A])(f: A => B)(implicit B: Monoid[B]): B =
fa match {
case Read(_) => B.zero
case Constant(_) => B.zero
case Invoke(_, values) => values.foldMap(f)
case JoinSideName(_) => B.zero
case Join(l, r, _, JoinCondition(_, _, v)) =>
f(l) ⊹ f(r) ⊹ f(v)
case Free(_) => B.zero
case Let(_, form, in) => f(form) ⊹ f(in)
case Sort(src, ords) => f(src) ⊹ ords.foldMap { case (a, _) => f(a) }
case TemporalTrunc(_, src) => f(src)
}
override def foldRight[A, B](fa: LogicalPlan[A], z: => B)(f: (A, => B) => B): B =
fa match {
case Read(_) => z
case Constant(_) => z
case Invoke(_, values) => values.foldRight(z)(f)
case JoinSideName(_) => z
case Join(l, r, _, JoinCondition(_, _, v)) =>
f(l, f(r, f(v, z)))
case Free(_) => z
case Let(ident, form, in) => f(form, f(in, z))
case Sort(src, ords) => f(src, ords.foldRight(z) { case ((a, _), b) => f(a, b) })
case TemporalTrunc(_, src) => f(src, z)
}
}
implicit val show: Delay[Show, LogicalPlan] =
new Delay[Show, LogicalPlan] {
def apply[A](sa: Show[A]): Show[LogicalPlan[A]] = {
implicit val showA: Show[A] = sa
Show.show {
case Read(v) =>
Cord("Read(") ++ v.show ++ Cord(")")
case Constant(v) =>
Cord("Constant(") ++ v.show ++ Cord(")")
case Invoke(func, values) =>
// TODO remove trailing comma
func.show ++ Cord("(") ++
values.foldLeft(Cord("")){ case (acc, v) => acc ++ sa.show(v) ++ Cord(", ") } ++ Cord(")")
case JoinSideName(n) =>
Cord("JoinSideName(") ++ Cord(n.toString) ++ Cord(")")
case Join(l, r, tpe, JoinCondition(lName, rName, v)) =>
Cord("Join(") ++
l.show ++ Cord(", ") ++
r.show ++ Cord(", ") ++
tpe.show ++ Cord(", ") ++
Cord(lName.toString) ++ Cord(", ") ++
Cord(rName.toString) ++ Cord(", ") ++
v.show ++ Cord(")")
case Free(n) =>
Cord("Free(") ++ Cord(n.toString) ++ Cord(")")
case Let(n, f, b) =>
Cord("Let(") ++ Cord(n.toString) ++ Cord(",") ++
sa.show(f) ++ Cord(",") ++ sa.show(b) ++ Cord(")")
case Sort(src, ords) =>
Cord("Sort(") ++ sa.show(src) ++ Cord(", ") ++ ords.show ++ Cord(")")
case TemporalTrunc(part, src) =>
Cord("TemporalTrunc(") ++ part.show ++ Cord(",") ++ sa.show(src) ++ Cord(")")
}
}
}
implicit val renderTree: Delay[RenderTree, LogicalPlan] =
new Delay[RenderTree, LogicalPlan] {
def apply[A](ra: RenderTree[A]): RenderTree[LogicalPlan[A]] =
new RenderTree[LogicalPlan[A]] {
val nodeType = "LogicalPlan" :: Nil
def render(v: LogicalPlan[A]) = v match {
// NB: a couple of special cases for readability
case Constant(Data.Str(str)) => Terminal("Str" :: "Constant" :: nodeType, Some(str.shows))
case InvokeUnapply(func @ structural.MapProject, Sized(expr, name)) =>
(ra.render(expr), ra.render(name)) match {
case (exprR @ RenderedTree(_, Some(_), Nil), RenderedTree(_, Some(n), Nil)) =>
Terminal("MapProject" :: nodeType, Some(exprR.shows + "{" + n + "}"))
case (x, n) => NonTerminal("Invoke" :: nodeType, Some(func.shows), x :: n :: Nil)
}
case Read(file) => Terminal("Read" :: nodeType, Some(posixCodec.printPath(file)))
case Constant(data) => Terminal("Constant" :: nodeType, Some(data.shows))
case InvokeUnapply(func, args) => NonTerminal("Invoke" :: nodeType, Some(func.shows), args.unsized.map(ra.render))
case JoinSideName(name) => Terminal("JoinSideName" :: nodeType, Some(name.toString))
case Join(l, r, t, JoinCondition(lName, rName, c)) =>
NonTerminal("Join" :: nodeType, None, List(
ra.render(l),
ra.render(r),
RenderTree[JoinType].render(t),
Terminal("LeftSide" :: nodeType, Some(lName.toString)),
Terminal("RightSide" :: nodeType, Some(rName.toString)),
ra.render(c)))
case Free(name) => Terminal("Free" :: nodeType, Some(name.toString))
case Let(ident, form, body) => NonTerminal("Let" :: nodeType, Some(ident.toString), List(ra.render(form), ra.render(body)))
case Sort(src, ords) =>
NonTerminal("Sort" :: nodeType, None,
(ra.render(src) :: ords.list.flatMap {
case (a, d) => IList(ra.render(a), d.render)
}).toList)
case TemporalTrunc(part, src) =>
NonTerminal("TemporalTrunc" :: nodeType, Some(part.shows), List(ra.render(src)))
}
}
}
@SuppressWarnings(Array("org.wartremover.warts.Equals"))
implicit val equal: Delay[Equal, LogicalPlan] =
new Delay[Equal, LogicalPlan] {
def apply[A](fa: Equal[A]) = {
implicit val eqA: Equal[A] = fa
Equal.equal {
case (Read(n1), Read(n2)) => refineTypeAbs(n1) ≟ refineTypeAbs(n2)
case (Constant(d1), Constant(d2)) => d1 ≟ d2
case (InvokeUnapply(f1, v1), InvokeUnapply(f2, v2)) => f1 == f2 && v1.unsized ≟ v2.unsized // TODO impl `scalaz.Equal` for `GenericFunc`
case (JoinSideName(n1), JoinSideName(n2)) => n1 ≟ n2
case (Join(l1, r1, t1, JoinCondition(lName1, rName1, c1)), Join(l2, r2, t2, JoinCondition(lName2, rName2, c2))) =>
l1 ≟ l2 && r1 ≟ r2 && t1 ≟ t2 && lName1 ≟ lName2 && rName1 ≟ rName2 && c1 ≟ c2
case (Free(n1), Free(n2)) => n1 ≟ n2
case (Let(ident1, form1, in1), Let(ident2, form2, in2)) =>
ident1 ≟ ident2 && form1 ≟ form2 && in1 ≟ in2
case (Sort(s1, o1), Sort(s2, o2)) => s1 ≟ s2 && o1 ≟ o2
case (TemporalTrunc(part1, src1), TemporalTrunc(part2, src2)) =>
part1 ≟ part2 && src1 ≟ src2
case _ => false
}
}
}
implicit val unzip: Unzip[LogicalPlan] = new Unzip[LogicalPlan] {
def unzip[A, B](f: LogicalPlan[(A, B)]) = (f.map(_._1), f.map(_._2))
}
implicit val binder: Binder[LogicalPlan] = new Binder[LogicalPlan] {
type G[A] = Map[Symbol, A]
val G = Traverse[G]
def initial[A] = Map[Symbol, A]()
def bindings[T, A]
(t: LogicalPlan[T], b: G[A])
(f: LogicalPlan[T] => A)
(implicit T: Recursive.Aux[T, LogicalPlan])
: G[A] =
t match {
case Let(ident, form, _) => b + (ident -> f(form.project))
case _ => b
}
def subst[T, A]
(t: LogicalPlan[T], b: G[A])
(implicit T: Recursive.Aux[T, LogicalPlan])
: Option[A] =
t match {
case Free(symbol) => b.get(symbol)
case _ => None
}
}
}
| slamdata/slamengine | frontend/src/main/scala/quasar/frontend/logicalplan/LogicalPlan.scala | Scala | apache-2.0 | 11,831 |
package chapter6
object CandyMachine {
//TODO: Implement this later
} | amolnayak311/functional-programming-in-scala | src/chapter6/CandyMachine.scala | Scala | unlicense | 77 |
package org.orbeon.dom.tree
import java.{lang ⇒ jl, util ⇒ ju}
import org.orbeon.dom._
abstract class AbstractBranch extends AbstractNode with Branch {
protected def internalContent: ju.List[Node]
override def hasContent: Boolean = nodeCount > 0
override def getText: String = {
val list = internalContent
if (list ne null) {
val size = list.size
if (size >= 1) {
val first = list.get(0)
val firstText = getContentAsText(first)
if (size == 1) {
return firstText
} else {
val buffer = new jl.StringBuilder(firstText)
for (i ← 1 until size) {
val node = list.get(i)
buffer.append(getContentAsText(node))
}
return buffer.toString
}
}
}
""
}
// Return the text value of the Text node.
private def getContentAsText(content: Node): String =
content match {
case _: Text ⇒ content.getText
case _ ⇒ ""
}
def getTextTrim: String = {
val text = getText
val textContent = new jl.StringBuilder
val tokenizer = new ju.StringTokenizer(text)
while (tokenizer.hasMoreTokens) {
val str = tokenizer.nextToken()
textContent.append(str)
if (tokenizer.hasMoreTokens) {
textContent.append(" ")
}
}
textContent.toString
}
def addElement(name: String): Element = {
val node = DocumentFactory.createElement(name)
add(node)
node
}
def addElement(qname: QName): Element = {
val node = DocumentFactory.createElement(qname)
add(node)
node
}
def addElement(name: String, prefix: String, uri: String): Element = {
val namespace = Namespace(prefix, uri)
val qName = QName(name, namespace)
addElement(qName)
}
def add(node: Node) = node match {
case n: Element ⇒ add(n)
case n: Comment ⇒ add(n)
case n: ProcessingInstruction ⇒ add(n)
case n ⇒ invalidNodeTypeException(n)
}
def remove(node: Node): Boolean = node match {
case n: Element ⇒ remove(n)
case n: Comment ⇒ remove(n)
case n: ProcessingInstruction ⇒ remove(n)
case n ⇒ invalidNodeTypeException(n)
}
def add(comment: Comment) : Unit = addNode(comment)
def add(element: Element) : Unit
def add(pi: ProcessingInstruction) : Unit = addNode(pi)
def remove(comment: Comment) : Boolean = removeNode(comment)
def remove(element: Element) : Boolean
def remove(pi: ProcessingInstruction) : Boolean = removeNode(pi)
def appendContent(branch: Branch): Unit = {
for (i ← 0 until branch.nodeCount) {
val node = branch.node(i)
add(node.deepCopy)
}
}
def node(index: Int): Node =
internalContent.get(index) match {
case node: Node ⇒ node
case _ ⇒ null
}
def nodeCount: Int = internalContent.size
def nodeIterator: ju.Iterator[Node] = internalContent.iterator
protected def addNode(node: Node): Unit
protected def addNode(index: Int, node: Node): Unit
protected def removeNode(node: Node): Boolean
/**
* Called when a new child node has been added to me to allow any parent
* relationships to be created or events to be fired.
*/
protected[dom] def childAdded(node: Node): Unit
/**
* Called when a child node has been removed to allow any parent
* relationships to be deleted or events to be fired.
*/
protected[dom] def childRemoved(node: Node): Unit
private def invalidNodeTypeException(node: Node) =
throw new IllegalAddException("Invalid node type. Cannot add node: " + node + " to this branch: " + this)
}
| brunobuzzi/orbeon-forms | dom/src/main/scala/org/orbeon/dom/tree/AbstractBranch.scala | Scala | lgpl-2.1 | 3,770 |
package org.jetbrains.plugins.scala.debugger.evaluation
import java.io.File
import com.intellij.openapi.extensions.ExtensionPointName
import com.intellij.openapi.module.Module
/**
* Nikolay.Tropin
* 2014-11-27
*/
object EvaluatorCompileHelper {
val EP_NAME = ExtensionPointName.create[EvaluatorCompileHelper]("org.intellij.scala.evaluatorCompileHelper")
def needCompileServer = EP_NAME.getExtensions.isEmpty
}
trait EvaluatorCompileHelper {
/**
* @return Array of all classfiles generated from a given source with corresponding dot-separated full qualified names
* (like "java.lang.Object" or "scala.None$")
*/
def compile(fileText: String, module: Module): Array[(File, String)]
} | katejim/intellij-scala | src/org/jetbrains/plugins/scala/debugger/evaluation/EvaluatorCompileHelper.scala | Scala | apache-2.0 | 715 |
package org.jetbrains.plugins.scala.tasty
import java.util.UUID
// See dotty.tools.tasty.TastyHeader
private case class TastyHeader(uuid: UUID,
majorVersion: Int,
minorVersion: Int,
experimentalVersion: Int,
toolingVersion: String)
| JetBrains/intellij-scala | tasty/runtime/src/org/jetbrains/plugins/scala/tasty/TastyHeader.scala | Scala | apache-2.0 | 359 |
package edu.colorado.hopper.synthesis
import com.ibm.wala.classLoader.{IClass, IClassLoader, IField, IMethod}
import com.ibm.wala.ipa.cha.IClassHierarchy
import com.ibm.wala.types.{Selector, TypeName, TypeReference}
import com.ibm.wala.types.annotations.Annotation
import com.ibm.wala.util.strings.Atom
import edu.colorado.walautil.ClassUtil
import scala.collection.JavaConversions._
trait DummyIClass extends IClass
object DummyIClass {
/** @return - a dummy subclass of @param toSubclass that has been added to the class hierarchy @param cha */
def findOrCreateDummySubclass(toSubclass : IClass, cha : IClassHierarchy) : DummyIClass = {
// TODO: would like to cache, but it is causing problems
//val dummyClass = dummyImpls.getOrElseUpdate(toSubclass, new DummyIClassImpl(toSubclass, cha))
val dummyClass = new DummyIClassImpl(toSubclass, cha)
println("Creating dummyImpl " + dummyClass)
// IMPORTANT! otherwise lookups of the class will fail. Also, it's important to do this here rather than in DummyClassImpl
// because if we reuse the dummy impl map across analysis of multiple apps that use the same interface type (as we do in
// the regression tests), then we need to be careful that the dummy subclass is always in the class hierarchy for app under analysis
cha.addClass(dummyClass)
dummyClass
}
}
private class DummyIClassImpl(clazz : IClass, cha : IClassHierarchy) extends DummyIClass {
require(clazz.isInterface() || clazz.isAbstract()) // for now, only expecting this to be called on interface/abstract classes, though could easily be extended
// avoid NPE in the case that clazz lives in default package (WALA returns null for this)
val pkg = clazz.getName().getPackage()
val pkgName = if (pkg != null) pkg.toString() else "DUMMY"
val dummyClassType = TypeReference.findOrCreateClass(clazz.getClassLoader().getReference(), pkgName,
"DUMMY_" + clazz.getName().getClassName().toString())
val (allMethods, declaredMethods) = clazz.getAllMethods().foldLeft ((List.empty[IMethod], List.empty[IMethod])) ((pair, m) => {
val (allMethods, declaredMethods) = pair
if (m.isAbstract()) {
val newM = new DummyIMethod(m, this, cha)
(newM :: allMethods, newM :: declaredMethods)
} else (m :: allMethods, declaredMethods)
})
val methodMap = allMethods.foldLeft (Map.empty[Selector,IMethod]) ((map, m) => map + (m.getSelector() -> m))
cha.addClass(this)
// meaningful overrides
override def getName() : TypeName = dummyClassType.getName()
override def getReference() : TypeReference = dummyClassType
override def isInterface() : Boolean = false
override def isAbstract() : Boolean = false
override def getSuperclass() : IClass = clazz
override def getSourceFileName() : String = null
override def getSource() = null
override def getDirectInterfaces() : java.util.Collection[_ <: IClass] = java.util.Collections.singleton(clazz)
override def getAllImplementedInterfaces : java.util.Collection[IClass] = {
val l = new java.util.LinkedList(clazz.getAllImplementedInterfaces())
l.add(clazz)
l
}
override def getDeclaredMethods() : java.util.Collection[IMethod] = declaredMethods
override def getMethod(selector : Selector) : IMethod = methodMap(selector)
override def getAllMethods() : java.util.Collection[IMethod] = allMethods
// all other methods delegate to superclass
override def getClassLoader() : IClassLoader = clazz.getClassLoader()
override def isPublic() : Boolean = clazz.isPublic()
override def isPrivate() : Boolean = clazz.isPrivate()
override def getModifiers() : Int = clazz.getModifiers()
override def getField(name : Atom) : IField = clazz.getField(name)
override def getField(name : Atom, typ : TypeName) : IField = clazz.getField(name, typ)
override def getClassInitializer() : IMethod = clazz.getClassInitializer()
override def isArrayClass() : Boolean = clazz.isArrayClass()
override def getAllInstanceFields() : java.util.Collection[IField] = clazz.getAllInstanceFields()
override def getAllStaticFields() : java.util.Collection[IField] = clazz.getAllStaticFields()
override def getAllFields() : java.util.Collection[IField] = clazz.getAllFields()
override def getDeclaredInstanceFields() : java.util.Collection[IField] = clazz.getDeclaredInstanceFields()
override def getDeclaredStaticFields() : java.util.Collection[IField] = clazz.getDeclaredStaticFields()
override def isReferenceType() : Boolean = clazz.isReferenceType()
override def getAnnotations() : java.util.Collection[Annotation] = clazz.getAnnotations()
override def getClassHierarchy() : IClassHierarchy = clazz.getClassHierarchy()
override def toString : String = "DUMMY_" + ClassUtil.pretty(clazz)
} | cuplv/hopper | src/main/scala/edu/colorado/hopper/synthesis/DummyIClass.scala | Scala | apache-2.0 | 5,033 |
package org.velvia.filo.vectors
import org.scalatest.{FunSpec, Matchers}
import org.velvia.filo.{FiloVector, GrowableVector, ZeroCopyUTF8String, BuilderEncoder}
class UTF8VectorTest extends FunSpec with Matchers {
import ZeroCopyUTF8String._
import BuilderEncoder._
describe("UTF8Vector") {
it("should be able to append all NAs") {
val utf8vect = UTF8Vector.flexibleAppending(5, 1024)
utf8vect.addNA()
utf8vect.addNA()
utf8vect.length should equal (2)
utf8vect.frozenSize should equal (12)
utf8vect.isAvailable(0) should equal (false)
utf8vect.isAvailable(1) should equal (false)
// should be able to apply read back NA values and get back empty string
utf8vect(0).length should equal (0)
utf8vect.isAllNA should equal (true)
utf8vect.noNAs should equal (false)
}
it("should be able to append mix of strings and NAs") {
val strs = Seq("apple", "", "Charlie").map(ZeroCopyUTF8String.apply)
val utf8vect = UTF8Vector.flexibleAppending(5, 1024)
utf8vect.addNA()
strs.foreach(utf8vect.addData)
utf8vect.addNA()
utf8vect.length should equal (5)
utf8vect.toSeq should equal (strs)
utf8vect.isAllNA should equal (false)
utf8vect.noNAs should equal (false)
utf8vect.isAvailable(0) should equal (false)
utf8vect.isAvailable(1) should equal (true)
utf8vect.isAvailable(2) should equal (true)
utf8vect.numBytes should equal (36)
utf8vect.frozenSize should equal (36)
}
it("should be able to calculate min, max # bytes for all elements") {
val utf8vect = UTF8Vector.flexibleAppending(5, 1024)
Seq("apple", "zoe", "bananas").foreach(s => utf8vect.addData(ZeroCopyUTF8String(s)))
utf8vect.addNA() // NA or empty string should not affect min/max len
val inner = utf8vect.asInstanceOf[GrowableVector[_]].inner.asInstanceOf[UTF8AppendableVector]
inner.minMaxStrLen should equal ((3, 7))
val utf8vect2 = UTF8Vector.flexibleAppending(5, 1024)
Seq("apple", "", "bananas").foreach(s => utf8vect2.addData(ZeroCopyUTF8String(s)))
utf8vect2.noNAs should equal (true)
val inner2 = utf8vect2.asInstanceOf[GrowableVector[_]].inner.asInstanceOf[UTF8AppendableVector]
inner2.minMaxStrLen should equal ((0, 7))
}
it("should be able to freeze and minimize bytes used") {
val strs = Seq("apple", "zoe", "bananas").map(ZeroCopyUTF8String.apply)
val utf8vect = UTF8Vector.flexibleAppending(10, 1024)
strs.foreach(utf8vect.addData)
utf8vect.length should equal (3)
utf8vect.noNAs should equal (true)
utf8vect.frozenSize should equal (4 + 12 + 5 + 3 + 7)
val frozen = utf8vect.freeze()
frozen.length should equal (3)
frozen.toSeq should equal (strs)
frozen.numBytes should equal (4 + 12 + 5 + 3 + 7)
}
it("should be able toFiloBuffer and parse back with FiloVector") {
val strs = Seq("apple", "zoe", "bananas").map(ZeroCopyUTF8String.apply)
val utf8vect = UTF8Vector.flexibleAppending(strs.length, 1024)
strs.foreach(utf8vect.addData)
val buffer = utf8vect.toFiloBuffer
val readVect = FiloVector[ZeroCopyUTF8String](buffer)
readVect.toSeq should equal (strs)
val buffer2 = UTF8Vector(strs).optimize().toFiloBuffer
val readVect2 = FiloVector[ZeroCopyUTF8String](buffer2)
readVect2.toSeq should equal (strs)
}
it("should be able to grow the UTF8Vector if run out of initial maxBytes") {
// Purposefully test when offsets grow beyond 32k
val strs = (1 to 10000).map(i => ZeroCopyUTF8String("string" + i))
val utf8vect = UTF8Vector.flexibleAppending(50, 16384)
strs.foreach(utf8vect.addData)
val buffer = utf8vect.toFiloBuffer
val readVect = FiloVector[ZeroCopyUTF8String](buffer)
readVect.toSeq should equal (strs)
val vect2 = UTF8Vector.appendingVector(50)
vect2 shouldBe a[GrowableVector[_]]
vect2.asInstanceOf[GrowableVector[_]].inner shouldBe a[UTF8PtrAppendable]
strs.foreach(vect2.addData)
val readVect2 = FiloVector[ZeroCopyUTF8String](vect2.optimize().toFiloBuffer)
readVect2.toSeq should equal (strs)
}
}
describe("FixedMaxUTF8Vector") {
it("should throw if try to append item longer than max") {
val cb = UTF8Vector.fixedMaxAppending(5, 4)
// OK: 3 chars, or 4 chars
cb.addData(ZeroCopyUTF8String("zoe"))
cb.addData(ZeroCopyUTF8String("card"))
cb.length should be (2)
cb.frozenSize should be (1 + 5 + 5)
// Not OK, will throw: 5 chars
intercept[IllegalArgumentException] {
cb.addData(ZeroCopyUTF8String("money"))
}
}
it("should add multiple items, create buffer and read it back") {
val strs = Seq("apple", "zoe", "jack").map(ZeroCopyUTF8String.apply)
val cb = UTF8Vector.fixedMaxAppending(3, 5)
strs.foreach(cb.addData)
val buffer = cb.toFiloBuffer
val readVect = FiloVector[ZeroCopyUTF8String](buffer)
readVect.toSeq should equal (strs)
}
it("should handle NA items as well as empty strings") {
val cb = UTF8Vector.fixedMaxAppending(3, 4)
cb.addData("zoe".utf8)
cb.addNA()
cb.addData("".utf8)
val buffer = cb.toFiloBuffer
val readVect = FiloVector[ZeroCopyUTF8String](buffer)
readVect(0) should equal ("zoe".utf8)
readVect.isAvailable(1) should equal (false)
readVect.isAvailable(2) should equal (true)
readVect(1) should equal ("".utf8)
readVect(2) should equal ("".utf8)
}
}
describe("UTF8PtrAppendable.optimize") {
it("should produce a FixedMaxUTF8Vector if strings mostly same length") {
val strs = Seq("apple", "zoe", "jack").map(ZeroCopyUTF8String.apply)
val buffer = UTF8Vector(strs).optimize().toFiloBuffer
val reader = FiloVector[ZeroCopyUTF8String](buffer)
reader shouldBe a [FixedMaxUTF8Vector]
reader.toSeq should equal (strs)
}
it("should produce a UTF8Vector if one string much longer") {
val strs = Seq("apple", "zoe", "jacksonhole, wyoming").map(ZeroCopyUTF8String.apply)
val buffer = UTF8Vector(strs).optimize().toFiloBuffer
val reader = FiloVector[ZeroCopyUTF8String](buffer)
reader shouldBe a [UTF8Vector]
reader.toSeq should equal (strs)
}
it("should produce a UTF8ConstVector if all strings the same") {
val strs = Seq.fill(50)("apple").map(ZeroCopyUTF8String.apply)
val buffer = UTF8Vector(strs).optimize().toFiloBuffer
val reader = FiloVector[ZeroCopyUTF8String](buffer)
reader shouldBe a [UTF8ConstVector]
reader.toSeq should equal (strs)
}
it("should not produce a FixedMaxUTF8Vector if longest str >= 255 chars") {
val str1 = ZeroCopyUTF8String("apple" * 51)
val str2 = ZeroCopyUTF8String("beach" * 52)
val strs = Seq(str1, str2, str2, str1, str1)
val buffer = UTF8Vector(strs).optimize().toFiloBuffer
val reader = FiloVector[ZeroCopyUTF8String](buffer)
reader shouldBe a [UTF8Vector]
reader.toSeq should equal (strs)
}
it("should handle adding a NULL using addData()") {
val appender = UTF8Vector.appendingVector(5)
val rawData = Seq("apple", "zoe", "jacksonhole", "wyoming").map(_.utf8) :+ null
rawData.foreach(appender.addData)
appender.length shouldEqual rawData.length
// Now, add one more which will cause vector to grow
appender.addData("beelzeebub".utf8)
val buffer = appender.optimize().toFiloBuffer
val reader = FiloVector[ZeroCopyUTF8String](buffer)
reader shouldBe a [UTF8Vector]
reader.length shouldEqual 6
reader.isAvailable(4) shouldEqual false
}
}
describe("DictUTF8Vector") {
it("shouldMakeDict when source strings are mostly repeated") {
val strs = Seq("apple", "zoe", "grape").permutations.flatten.toList.map(ZeroCopyUTF8String.apply)
val dictInfo = DictUTF8Vector.shouldMakeDict(UTF8Vector(strs), samplingRate=0.5)
dictInfo should be ('defined)
dictInfo.get.codeMap.size should equal (3)
dictInfo.get.dictStrings.length should equal (4)
}
it("should not makeDict when source strings are all unique") {
val strs = (0 to 9).map(_.toString).map(ZeroCopyUTF8String.apply)
val dictInfo = DictUTF8Vector.shouldMakeDict(UTF8Vector(strs))
dictInfo should be ('empty)
}
it("should optimize UTF8Vector to DictVector with NAs and read it back") {
val strs = ZeroCopyUTF8String.NA +:
Seq("apple", "zoe", "grape").permutations.flatten.toList.map(ZeroCopyUTF8String.apply)
val appender = UTF8Vector.appendingVector(strs.length)
strs.foreach(s => appender.addData(s))
val buffer = appender.optimize(AutoDictString(samplingRate=0.5)).toFiloBuffer
val reader = FiloVector[ZeroCopyUTF8String](buffer)
reader shouldBe a [DictUTF8Vector]
reader.length should equal (strs.length)
reader.toSeq should equal (strs.drop(1))
reader.isAvailable(0) should be (false)
reader(0) should equal (ZeroCopyUTF8String(""))
}
// Negative byte values might not get converted to ints properly, leading
// to an ArrayOutOfBoundsException.
it("should ensure proper conversion when there are 128-255 unique strings") {
val orig = (0 to 130).map(_.toString).map(ZeroCopyUTF8String.apply)
val buffer = UTF8Vector(orig).optimize(AutoDictString(spaceThreshold = 1.1)).toFiloBuffer
val binarySeq = FiloVector[ZeroCopyUTF8String](buffer)
binarySeq shouldBe a [DictUTF8Vector]
binarySeq.length should equal (orig.length)
binarySeq.toSeq should equal (orig)
}
}
} | velvia/filo | filo-scala/src/test/scala/org.velvia.filo/vectors/UTF8VectorTest.scala | Scala | apache-2.0 | 9,762 |
/*
Copyright (c) 2013-2016 Karol M. Stasiak
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
package io.github.karols.units.benchmarks
import com.google.caliper.SimpleBenchmark
import io.github.karols.units._
import io.github.karols.units.SI._
import io.github.karols.units.arrays._
class IntUArrayForeachBenchmark extends SimpleBenchmark {
val array = new Array[Long](1000)
val arrayW = new IntUArray[metre](1000);
override def setUp (){
var i = 0
while (i < 1000) {
array(i) = i*i
arrayW(i) = (i*i).of[metre]
i += 1
}
}
override def tearDown(){}
def timeRaw(reps: Int) = {
var result = 0L
for(x<-0 until 2*reps){
array.foreach {
result += _
}
}
result
}
def timeWithUnits(reps: Int) = {
var result = 0.of[metre]
for(x<-0 until 2*reps){
arrayW.foreach {
result += _
}
}
result.value
}
} | KarolS/units | units/src/test/scala/io/github/karols/units/benchmarks/IntUArrayForeachBenchmark.scala | Scala | mit | 1,835 |
/*
* Copyright 2017 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.ct600.v2
import uk.gov.hmrc.ct.box._
import uk.gov.hmrc.ct.box.retriever.BoxRetriever
import uk.gov.hmrc.ct.computations.retriever.ComputationsBoxRetriever
case class RSQ2(inputValue: Option[Boolean], defaultValue: Option[Boolean]) extends CtBoxIdentifier
with CtOptionalBoolean with InputWithDefault[Boolean] with ValidatableBox[BoxRetriever] with Validators {
override def validate(boxRetriever: BoxRetriever): Set[CtValidation] = {
boxRetriever match {
case compsRetriever: ComputationsBoxRetriever => {
collectErrors(
cannotExistIf(CP287GreaterThenZeroAndHaveInputValue(compsRetriever)),
requiredIf(CP287NotExistsAndNoInputValue(compsRetriever))
)
}
case _ => validateAsMandatory(this) //Charities may not have Computations, but still need to validate as mandatory
}
}
private def CP287GreaterThenZeroAndHaveInputValue(retriever: ComputationsBoxRetriever)() =
retriever.cp287().value.exists(_ > 0) && inputValue.isDefined
private def CP287NotExistsAndNoInputValue(retriever: ComputationsBoxRetriever)() =
!retriever.cp287().value.exists(_ > 0) && inputValue.isEmpty
}
object RSQ2 {
def apply(inputValue: Option[Boolean]): RSQ2 = RSQ2(inputValue, None)
}
| pncampbell/ct-calculations | src/main/scala/uk/gov/hmrc/ct/ct600/v2/RSQ2.scala | Scala | apache-2.0 | 1,876 |
package sbtbuildinfo
import sbt._, Keys._
import java.io.File
object BuildInfoPlugin extends AutoPlugin {
type BuildInfoKey = BuildInfoKey.Entry[_]
override def requires = plugins.JvmPlugin
override def projectSettings: Seq[Def.Setting[_]] =
buildInfoScopedSettings(Compile) ++ buildInfoDefaultSettings
object autoImport extends BuildInfoKeys {
val BuildInfoKey = sbtbuildinfo.BuildInfoKey
type BuildInfoKey = sbtbuildinfo.BuildInfoKey
val BuildInfoOption = sbtbuildinfo.BuildInfoOption
type BuildInfoOption = sbtbuildinfo.BuildInfoOption
val BuildInfoType = sbtbuildinfo.BuildInfoType
type BuildInfoType = sbtbuildinfo.BuildInfoType
val addBuildInfoToConfig = buildInfoScopedSettings _
val buildInfoValues: TaskKey[Seq[BuildInfoResult]] =
taskKey("BuildInfo keys/values/types for use in the sbt build")
}
import autoImport._
def buildNumberTask(dir: File, increment: Int): Int = {
val file: File = dir / "buildinfo.properties"
val prop = new java.util.Properties
def readProp: Int = {
prop.load(new java.io.FileInputStream(file))
prop.getProperty("buildnumber", "0").toInt
}
def writeProp(value: Int) = {
prop.setProperty("buildnumber", value.toString)
prop.store(new java.io.FileOutputStream(file), null)
}
val current = if (file.exists) readProp
else 0
writeProp(current + increment)
current
}
import TupleSyntax._
def buildInfoScopedSettings(conf: Configuration): Seq[Def.Setting[_]] = inConfig(conf)(Seq(
buildInfo := (
(
buildInfoRenderer,
sourceManaged,
resourceManaged,
buildInfoUsePackageAsPath,
buildInfoPackage,
buildInfoObject,
buildInfoKeys,
buildInfoOptions,
thisProjectRef,
state,
streams,
) flatMap { (
renderer: BuildInfoRenderer,
srcDir: File,
resDir: File,
usePackageAsPath: Boolean,
packageName: String,
obj: String,
keys: Seq[BuildInfoKey],
opts: Seq[BuildInfoOption],
pr: ProjectRef,
s: State,
taskStreams: TaskStreams,
) =>
val dir = {
val parentDir = renderer.fileType match {
case BuildInfoType.Source => srcDir
case BuildInfoType.Resource => resDir
}
if (usePackageAsPath)
packageName match {
case "" => parentDir
case _ => parentDir / (packageName split '.' mkString "/")
}
else
parentDir / "sbt-buildinfo"
}
BuildInfo(dir, renderer, obj, keys, opts, pr, s, taskStreams.cacheDirectory) map (Seq(_))
}
).value,
buildInfoValues := (
(buildInfoKeys, buildInfoOptions, thisProjectRef, state) flatMap ((keys, opts, pr, s) =>
BuildInfo.results(keys, opts, pr, s)
)
).value,
sourceGenerators ++= (if (buildInfoRenderer.value.isSource) Seq(buildInfo.taskValue) else Nil),
resourceGenerators ++= (if (buildInfoRenderer.value.isResource) Seq(buildInfo.taskValue) else Nil),
buildInfoRenderer := buildInfoRenderFactory.value.apply(
buildInfoOptions.value,
buildInfoPackage.value,
buildInfoObject.value)
)
)
def buildInfoDefaultSettings: Seq[Setting[_]] = Seq(
buildInfoObject := "BuildInfo",
buildInfoPackage := "buildinfo",
buildInfoUsePackageAsPath := false,
buildInfoKeys := Seq(name, version, scalaVersion, sbtVersion),
buildInfoBuildNumber := buildNumberTask(baseDirectory.value, 1),
buildInfoOptions := Seq(),
buildInfoRenderFactory := ScalaCaseObjectRenderer.apply
)
}
| sbt/sbt-buildinfo | src/main/scala/sbtbuildinfo/BuildInfoPlugin.scala | Scala | mit | 3,809 |
package tscfg
import org.scalatest.wordspec.AnyWordSpec
import tscfg.buildWarnings.{
DefaultListElemWarning,
MultElemListWarning,
OptListElemWarning
}
import tscfg.exceptions.ObjectDefinitionException
import tscfg.model._
import tscfg.model.durations._
import tscfg.ns.NamespaceMan
class ModelBuilderSpec extends AnyWordSpec {
def build(source: String, showOutput: Boolean = false): ModelBuildResult = {
if (showOutput)
println("\nsource:\n |" + source.replaceAll("\n", "\n |"))
val result = ModelBuilder(new NamespaceMan, source)
val objectType = result.objectType
if (showOutput) {
println("\nobjectType: " + objectType)
println(
"\nobjectType:\n |" + model.util
.format(objectType)
.replaceAll("\n", "\n |")
)
}
result
}
private def verify(
objType: ObjectType,
memberName: String,
t: Type,
optional: Boolean = false,
default: Option[String] = None,
comments: Option[String] = None
): Unit = {
val at = objType.members(memberName)
assert(at.t === t)
assert(at.optional === optional)
assert(at.default === default)
assert(at.comments === comments)
}
"with empty input" should {
val result = build("")
"build empty ObjectType" in {
assert(result.objectType === ObjectType())
}
}
"with empty list" should {
"throw" in {
assertThrows[IllegalArgumentException] {
build("""
|my_list: [ ]
""".stripMargin)
}
}
}
"with list with multiple elements" should {
val result = build("my_list: [ true, false ]")
"generate warning" in {
val warns = result.warnings.filter(_.isInstanceOf[MultElemListWarning])
assert(warns.map(_.source) contains "[true,false]")
}
}
"with list element indicating optional" should {
val result = build("""my_list: [ "string?" ]""")
"generate warning" in {
val warns = result.warnings.filter(_.isInstanceOf[OptListElemWarning])
assert(warns.map(_.source) contains "string?")
}
}
"with list element indicating a default value" should {
val result = build("""
|my_list: [ "double | 3.14" ]
""".stripMargin)
"generate warning" in {
val warns = result.warnings
.filter(_.isInstanceOf[DefaultListElemWarning])
.asInstanceOf[List[DefaultListElemWarning]]
assert(warns.map(_.default) contains "3.14")
}
}
"with list with literal int" should {
val result = build("""
|my_list: [ 99999999 ]
""".stripMargin)
"translate into ListType(INTEGER)" in {
assert(result.objectType.members("my_list").t === ListType(INTEGER))
}
}
"with list with literal long" should {
val result = build("""
|my_list: [ 99999999999 ]
""".stripMargin)
"translate into ListType(LONG)" in {
assert(result.objectType.members("my_list").t === ListType(LONG))
}
}
"with list with literal double" should {
val result = build("""
|my_list: [ 3.14 ]
""".stripMargin)
"translate into ListType(DOUBLE)" in {
assert(result.objectType.members("my_list").t === ListType(DOUBLE))
}
}
"with list with literal boolean" should {
val result = build("""
|my_list: [ false ]
""".stripMargin)
"translate into ListType(BOOLEAN)" in {
assert(result.objectType.members("my_list").t === ListType(BOOLEAN))
}
}
"with literal integer" should {
val result = build("""
|optInt: 21
""".stripMargin)
"translate into ListType(BOOLEAN)" in {
val at = result.objectType.members("optInt")
assert(at.t === INTEGER)
assert(at.optional)
assert(at.default contains "21")
}
}
"with literal duration (issue 22)" should {
val result = build("""
|idleTimeout = 75 seconds
""".stripMargin)
"translate into DURATION(ms) with given default" in {
val at = result.objectType.members("idleTimeout")
assert(at.t === DURATION(ms))
assert(at.optional)
assert(at.default contains "75 seconds")
}
}
"with good input" should {
val result = build("""
|foo {
| reqStr = string
| reqInt = integer
| reqLong = long
| reqDouble = double
| reqBoolean = boolean
| reqDuration = duration
| duration_ns = "duration : ns"
| duration_µs = "duration : us"
| duration_ms = "duration : ms"
| duration_se = "duration : s"
| duration_mi = "duration : m"
| duration_hr = "duration : h"
| duration_dy = "duration : d"
| optStr = "string?"
| optInt = "int?"
| optLong = "long?"
| optDouble = "double?"
| optBoolean = "boolean?"
| optDuration = "duration?"
| dflStr = "string | hi"
| dflInt = "int | 3"
| dflLong = "long | 999999999"
| dflDouble = "double | 3.14"
| dflBoolean = "boolean | false"
| dflDuration = "duration | 21d"
| listStr = [ string ]
| listInt = [ integer ]
| listLong = [ long ]
| listDouble = [ double ]
| listBoolean = [ boolean ]
| listDuration = [ duration ]
| listDuration_se = [ "duration : second" ]
|}
""".stripMargin)
val objType = result.objectType
"build expected objType" in {
assert(objType.members.keySet === Set("foo"))
val foo = objType.members("foo")
assert(foo.optional === false)
assert(foo.default.isEmpty)
assert(foo.comments.isEmpty)
assert(foo.t.isInstanceOf[ObjectType])
val fooObj = foo.t.asInstanceOf[ObjectType]
assert(
fooObj.members.keySet === Set(
"reqStr",
"reqInt",
"reqLong",
"reqDouble",
"reqBoolean",
"reqDuration",
"duration_ns",
"duration_µs",
"duration_ms",
"duration_se",
"duration_mi",
"duration_hr",
"duration_dy",
"optStr",
"optInt",
"optLong",
"optDouble",
"optBoolean",
"optDuration",
"dflStr",
"dflInt",
"dflLong",
"dflDouble",
"dflBoolean",
"dflDuration",
"listStr",
"listInt",
"listLong",
"listBoolean",
"listDouble",
"listDuration",
"listDuration_se"
)
)
verify(fooObj, "reqStr", STRING)
verify(fooObj, "reqInt", INTEGER)
verify(fooObj, "reqLong", LONG)
verify(fooObj, "reqDouble", DOUBLE)
verify(fooObj, "reqBoolean", BOOLEAN)
verify(fooObj, "reqDuration", DURATION(ms))
verify(fooObj, "duration_ns", DURATION(ns))
verify(fooObj, "duration_µs", DURATION(us))
verify(fooObj, "duration_ms", DURATION(ms))
verify(fooObj, "duration_se", DURATION(second))
verify(fooObj, "duration_mi", DURATION(minute))
verify(fooObj, "duration_hr", DURATION(hour))
verify(fooObj, "duration_dy", DURATION(day))
verify(fooObj, "optStr", STRING, optional = true)
verify(fooObj, "optInt", INTEGER, optional = true)
verify(fooObj, "optLong", LONG, optional = true)
verify(fooObj, "optDouble", DOUBLE, optional = true)
verify(fooObj, "optBoolean", BOOLEAN, optional = true)
verify(fooObj, "optDuration", DURATION(ms), optional = true)
verify(fooObj, "dflStr", STRING, optional = true, default = Some("hi"))
verify(fooObj, "dflInt", INTEGER, optional = true, default = Some("3"))
verify(
fooObj,
"dflLong",
LONG,
optional = true,
default = Some("999999999")
)
verify(
fooObj,
"dflDouble",
DOUBLE,
optional = true,
default = Some("3.14")
)
verify(
fooObj,
"dflBoolean",
BOOLEAN,
optional = true,
default = Some("false")
)
verify(
fooObj,
"dflDuration",
DURATION(ms),
optional = true,
default = Some("21d")
)
verify(fooObj, "listStr", ListType(STRING))
verify(fooObj, "listInt", ListType(INTEGER))
verify(fooObj, "listLong", ListType(LONG))
verify(fooObj, "listDouble", ListType(DOUBLE))
verify(fooObj, "listBoolean", ListType(BOOLEAN))
verify(fooObj, "listDuration", ListType(DURATION(ms)))
verify(fooObj, "listDuration_se", ListType(DURATION(second)))
}
}
"invalid @defines" should {
"check Missing name after `extends`" in {
val e = intercept[ObjectDefinitionException] {
build("""#@define extends
|foo {x:int}
|""".stripMargin)
}
assert(e.getMessage contains "Missing name after `extends`")
}
"check Unrecognized @define construct" in {
val e = intercept[ObjectDefinitionException] {
build("""#@define dummy
|foo {x:int}
|""".stripMargin)
}
assert(e.getMessage contains "Unrecognized @define construct")
}
}
}
| carueda/tscfg | src/test/scala/tscfg/ModelBuilderSpec.scala | Scala | apache-2.0 | 9,349 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.security.authorizer
import java.io.File
import java.net.InetAddress
import java.nio.charset.StandardCharsets.UTF_8
import java.nio.file.Files
import java.util.{Collections, UUID}
import java.util.concurrent.{Executors, Semaphore, TimeUnit}
import kafka.Kafka
import kafka.api.{ApiVersion, KAFKA_2_0_IV0, KAFKA_2_0_IV1}
import kafka.security.authorizer.AclEntry.{WildcardHost, WildcardPrincipalString}
import kafka.server.KafkaConfig
import kafka.utils.TestUtils
import kafka.zk.{ZkAclStore, ZooKeeperTestHarness}
import kafka.zookeeper.{GetChildrenRequest, GetDataRequest, ZooKeeperClient}
import org.apache.kafka.common.acl._
import org.apache.kafka.common.acl.AclOperation._
import org.apache.kafka.common.acl.AclPermissionType.{ALLOW, DENY}
import org.apache.kafka.common.errors.{ApiException, UnsupportedVersionException}
import org.apache.kafka.common.requests.RequestContext
import org.apache.kafka.common.resource.{PatternType, ResourcePattern, ResourcePatternFilter, ResourceType}
import org.apache.kafka.common.resource.Resource.CLUSTER_NAME
import org.apache.kafka.common.resource.ResourcePattern.WILDCARD_RESOURCE
import org.apache.kafka.common.resource.ResourceType._
import org.apache.kafka.common.resource.PatternType.{LITERAL, MATCH, PREFIXED}
import org.apache.kafka.common.security.auth.KafkaPrincipal
import org.apache.kafka.server.authorizer._
import org.apache.kafka.common.utils.{Time, SecurityUtils => JSecurityUtils}
import org.apache.zookeeper.client.ZKClientConfig
import org.junit.jupiter.api.Assertions._
import org.junit.jupiter.api.{AfterEach, BeforeEach, Test}
import scala.jdk.CollectionConverters._
import scala.collection.mutable
class AclAuthorizerTest extends ZooKeeperTestHarness with BaseAuthorizerTest {
private val allowReadAcl = new AccessControlEntry(WildcardPrincipalString, WildcardHost, READ, ALLOW)
private val allowWriteAcl = new AccessControlEntry(WildcardPrincipalString, WildcardHost, WRITE, ALLOW)
private val denyReadAcl = new AccessControlEntry(WildcardPrincipalString, WildcardHost, READ, DENY)
private val wildCardResource = new ResourcePattern(TOPIC, WILDCARD_RESOURCE, LITERAL)
private val prefixedResource = new ResourcePattern(TOPIC, "foo", PREFIXED)
private val clusterResource = new ResourcePattern(CLUSTER, CLUSTER_NAME, LITERAL)
private val wildcardPrincipal = JSecurityUtils.parseKafkaPrincipal(WildcardPrincipalString)
private val aclAuthorizer = new AclAuthorizer
private val aclAuthorizer2 = new AclAuthorizer
class CustomPrincipal(principalType: String, name: String) extends KafkaPrincipal(principalType, name) {
override def equals(o: scala.Any): Boolean = false
}
override def authorizer: Authorizer = aclAuthorizer
@BeforeEach
override def setUp(): Unit = {
super.setUp()
// Increase maxUpdateRetries to avoid transient failures
aclAuthorizer.maxUpdateRetries = Int.MaxValue
aclAuthorizer2.maxUpdateRetries = Int.MaxValue
val props = TestUtils.createBrokerConfig(0, zkConnect)
props.put(AclAuthorizer.SuperUsersProp, superUsers)
config = KafkaConfig.fromProps(props)
aclAuthorizer.configure(config.originals)
aclAuthorizer2.configure(config.originals)
resource = new ResourcePattern(TOPIC, "foo-" + UUID.randomUUID(), LITERAL)
zooKeeperClient = new ZooKeeperClient(zkConnect, zkSessionTimeout, zkConnectionTimeout, zkMaxInFlightRequests,
Time.SYSTEM, "kafka.test", "AclAuthorizerTest", new ZKClientConfig, "AclAuthorizerTest")
}
@AfterEach
override def tearDown(): Unit = {
aclAuthorizer.close()
aclAuthorizer2.close()
zooKeeperClient.close()
super.tearDown()
}
@Test
def testAuthorizeThrowsOnNonLiteralResource(): Unit = {
assertThrows(classOf[IllegalArgumentException], () => authorize(aclAuthorizer, requestContext, READ,
new ResourcePattern(TOPIC, "something", PREFIXED)))
}
@Test
def testAuthorizeWithEmptyResourceName(): Unit = {
assertFalse(authorize(aclAuthorizer, requestContext, READ, new ResourcePattern(GROUP, "", LITERAL)))
addAcls(aclAuthorizer, Set(allowReadAcl), new ResourcePattern(GROUP, WILDCARD_RESOURCE, LITERAL))
assertTrue(authorize(aclAuthorizer, requestContext, READ, new ResourcePattern(GROUP, "", LITERAL)))
}
// Authorizing the empty resource is not supported because we create a znode with the resource name.
@Test
def testEmptyAclThrowsException(): Unit = {
val e = assertThrows(classOf[ApiException],
() => addAcls(aclAuthorizer, Set(allowReadAcl), new ResourcePattern(GROUP, "", LITERAL)))
assertTrue(e.getCause.isInstanceOf[IllegalArgumentException], s"Unexpected exception $e")
}
@Test
def testTopicAcl(): Unit = {
val user1 = new KafkaPrincipal(KafkaPrincipal.USER_TYPE, username)
val user2 = new KafkaPrincipal(KafkaPrincipal.USER_TYPE, "rob")
val user3 = new KafkaPrincipal(KafkaPrincipal.USER_TYPE, "batman")
val host1 = InetAddress.getByName("192.168.1.1")
val host2 = InetAddress.getByName("192.168.1.2")
//user1 has READ access from host1 and host2.
val acl1 = new AccessControlEntry(user1.toString, host1.getHostAddress, READ, ALLOW)
val acl2 = new AccessControlEntry(user1.toString, host2.getHostAddress, READ, ALLOW)
//user1 does not have READ access from host1.
val acl3 = new AccessControlEntry(user1.toString, host1.getHostAddress, READ, DENY)
//user1 has WRITE access from host1 only.
val acl4 = new AccessControlEntry(user1.toString, host1.getHostAddress, WRITE, ALLOW)
//user1 has DESCRIBE access from all hosts.
val acl5 = new AccessControlEntry(user1.toString, WildcardHost, DESCRIBE, ALLOW)
//user2 has READ access from all hosts.
val acl6 = new AccessControlEntry(user2.toString, WildcardHost, READ, ALLOW)
//user3 has WRITE access from all hosts.
val acl7 = new AccessControlEntry(user3.toString, WildcardHost, WRITE, ALLOW)
val acls = Set(acl1, acl2, acl3, acl4, acl5, acl6, acl7)
changeAclAndVerify(Set.empty, acls, Set.empty)
val host1Context = newRequestContext(user1, host1)
val host2Context = newRequestContext(user1, host2)
assertTrue(authorize(aclAuthorizer, host2Context, READ, resource), "User1 should have READ access from host2")
assertFalse(authorize(aclAuthorizer, host1Context, READ, resource), "User1 should not have READ access from host1 due to denyAcl")
assertTrue(authorize(aclAuthorizer, host1Context, WRITE, resource), "User1 should have WRITE access from host1")
assertFalse(authorize(aclAuthorizer, host2Context, WRITE, resource), "User1 should not have WRITE access from host2 as no allow acl is defined")
assertTrue(authorize(aclAuthorizer, host1Context, DESCRIBE, resource), "User1 should not have DESCRIBE access from host1")
assertTrue(authorize(aclAuthorizer, host2Context, DESCRIBE, resource), "User1 should have DESCRIBE access from host2")
assertFalse(authorize(aclAuthorizer, host1Context, ALTER, resource), "User1 should not have edit access from host1")
assertFalse(authorize(aclAuthorizer, host2Context, ALTER, resource), "User1 should not have edit access from host2")
//test if user has READ and write access they also get describe access
val user2Context = newRequestContext(user2, host1)
val user3Context = newRequestContext(user3, host1)
assertTrue(authorize(aclAuthorizer, user2Context, DESCRIBE, resource), "User2 should have DESCRIBE access from host1")
assertTrue(authorize(aclAuthorizer, user3Context, DESCRIBE, resource), "User3 should have DESCRIBE access from host2")
assertTrue(authorize(aclAuthorizer, user2Context, READ, resource), "User2 should have READ access from host1")
assertTrue(authorize(aclAuthorizer, user3Context, WRITE, resource), "User3 should have WRITE access from host2")
}
/**
CustomPrincipals should be compared with their principal type and name
*/
@Test
def testAllowAccessWithCustomPrincipal(): Unit = {
val user = new KafkaPrincipal(KafkaPrincipal.USER_TYPE, username)
val customUserPrincipal = new CustomPrincipal(KafkaPrincipal.USER_TYPE, username)
val host1 = InetAddress.getByName("192.168.1.1")
val host2 = InetAddress.getByName("192.168.1.2")
// user has READ access from host2 but not from host1
val acl1 = new AccessControlEntry(user.toString, host1.getHostAddress, READ, DENY)
val acl2 = new AccessControlEntry(user.toString, host2.getHostAddress, READ, ALLOW)
val acls = Set(acl1, acl2)
changeAclAndVerify(Set.empty, acls, Set.empty)
val host1Context = newRequestContext(customUserPrincipal, host1)
val host2Context = newRequestContext(customUserPrincipal, host2)
assertTrue(authorize(aclAuthorizer, host2Context, READ, resource), "User1 should have READ access from host2")
assertFalse(authorize(aclAuthorizer, host1Context, READ, resource), "User1 should not have READ access from host1 due to denyAcl")
}
@Test
def testDenyTakesPrecedence(): Unit = {
val user = new KafkaPrincipal(KafkaPrincipal.USER_TYPE, username)
val host = InetAddress.getByName("192.168.2.1")
val session = newRequestContext(user, host)
val allowAll = new AccessControlEntry(WildcardPrincipalString, WildcardHost, AclOperation.ALL, ALLOW)
val denyAcl = new AccessControlEntry(user.toString, host.getHostAddress, AclOperation.ALL, DENY)
val acls = Set(allowAll, denyAcl)
changeAclAndVerify(Set.empty, acls, Set.empty)
assertFalse(authorize(aclAuthorizer, session, READ, resource), "deny should take precedence over allow.")
}
@Test
def testAllowAllAccess(): Unit = {
val allowAllAcl = new AccessControlEntry(WildcardPrincipalString, WildcardHost, AclOperation.ALL, ALLOW)
changeAclAndVerify(Set.empty, Set(allowAllAcl), Set.empty)
val context = newRequestContext(new KafkaPrincipal(KafkaPrincipal.USER_TYPE, "random"), InetAddress.getByName("192.0.4.4"))
assertTrue(authorize(aclAuthorizer, context, READ, resource), "allow all acl should allow access to all.")
}
@Test
def testSuperUserHasAccess(): Unit = {
val denyAllAcl = new AccessControlEntry(WildcardPrincipalString, WildcardHost, AclOperation.ALL, DENY)
changeAclAndVerify(Set.empty, Set(denyAllAcl), Set.empty)
val session1 = newRequestContext(new KafkaPrincipal(KafkaPrincipal.USER_TYPE, "superuser1"), InetAddress.getByName("192.0.4.4"))
val session2 = newRequestContext(new KafkaPrincipal(KafkaPrincipal.USER_TYPE, "superuser2"), InetAddress.getByName("192.0.4.4"))
assertTrue(authorize(aclAuthorizer, session1, READ, resource), "superuser always has access, no matter what acls.")
assertTrue(authorize(aclAuthorizer, session2, READ, resource), "superuser always has access, no matter what acls.")
}
/**
CustomPrincipals should be compared with their principal type and name
*/
@Test
def testSuperUserWithCustomPrincipalHasAccess(): Unit = {
val denyAllAcl = new AccessControlEntry(WildcardPrincipalString, WildcardHost, AclOperation.ALL, DENY)
changeAclAndVerify(Set.empty, Set(denyAllAcl), Set.empty)
val session = newRequestContext(new CustomPrincipal(KafkaPrincipal.USER_TYPE, "superuser1"), InetAddress.getByName("192.0.4.4"))
assertTrue(authorize(aclAuthorizer, session, READ, resource), "superuser with custom principal always has access, no matter what acls.")
}
@Test
def testWildCardAcls(): Unit = {
assertFalse(authorize(aclAuthorizer, requestContext, READ, resource), "when acls = [], authorizer should fail close.")
val user1 = new KafkaPrincipal(KafkaPrincipal.USER_TYPE, username)
val host1 = InetAddress.getByName("192.168.3.1")
val readAcl = new AccessControlEntry(user1.toString, host1.getHostAddress, READ, ALLOW)
val acls = changeAclAndVerify(Set.empty, Set(readAcl), Set.empty, wildCardResource)
val host1Context = newRequestContext(user1, host1)
assertTrue(authorize(aclAuthorizer, host1Context, READ, resource), "User1 should have READ access from host1")
//allow WRITE to specific topic.
val writeAcl = new AccessControlEntry(user1.toString, host1.getHostAddress, WRITE, ALLOW)
changeAclAndVerify(Set.empty, Set(writeAcl), Set.empty)
//deny WRITE to wild card topic.
val denyWriteOnWildCardResourceAcl = new AccessControlEntry(user1.toString, host1.getHostAddress, WRITE, DENY)
changeAclAndVerify(acls, Set(denyWriteOnWildCardResourceAcl), Set.empty, wildCardResource)
assertFalse(authorize(aclAuthorizer, host1Context, WRITE, resource), "User1 should not have WRITE access from host1")
}
@Test
def testNoAclFound(): Unit = {
assertFalse(authorize(aclAuthorizer, requestContext, READ, resource), "when acls = [], authorizer should deny op.")
}
@Test
def testNoAclFoundOverride(): Unit = {
val props = TestUtils.createBrokerConfig(1, zkConnect)
props.put(AclAuthorizer.AllowEveryoneIfNoAclIsFoundProp, "true")
val cfg = KafkaConfig.fromProps(props)
val testAuthorizer = new AclAuthorizer
try {
testAuthorizer.configure(cfg.originals)
assertTrue(authorize(testAuthorizer, requestContext, READ, resource),
"when acls = null or [], authorizer should allow op with allow.everyone = true.")
} finally {
testAuthorizer.close()
}
}
@Test
def testAclManagementAPIs(): Unit = {
val user1 = new KafkaPrincipal(KafkaPrincipal.USER_TYPE, username)
val user2 = new KafkaPrincipal(KafkaPrincipal.USER_TYPE, "bob")
val host1 = "host1"
val host2 = "host2"
val acl1 = new AccessControlEntry(user1.toString, host1, READ, ALLOW)
val acl2 = new AccessControlEntry(user1.toString, host1, WRITE, ALLOW)
val acl3 = new AccessControlEntry(user2.toString, host2, READ, ALLOW)
val acl4 = new AccessControlEntry(user2.toString, host2, WRITE, ALLOW)
var acls = changeAclAndVerify(Set.empty, Set(acl1, acl2, acl3, acl4), Set.empty)
//test addAcl is additive
val acl5 = new AccessControlEntry(user2.toString, WildcardHost, READ, ALLOW)
acls = changeAclAndVerify(acls, Set(acl5), Set.empty)
//test get by principal name.
TestUtils.waitUntilTrue(() => Set(acl1, acl2).map(acl => new AclBinding(resource, acl)) == getAcls(aclAuthorizer, user1),
"changes not propagated in timeout period")
TestUtils.waitUntilTrue(() => Set(acl3, acl4, acl5).map(acl => new AclBinding(resource, acl)) == getAcls(aclAuthorizer, user2),
"changes not propagated in timeout period")
val resourceToAcls = Map[ResourcePattern, Set[AccessControlEntry]](
new ResourcePattern(TOPIC, WILDCARD_RESOURCE, LITERAL) -> Set(new AccessControlEntry(user2.toString, WildcardHost, READ, ALLOW)),
new ResourcePattern(CLUSTER , WILDCARD_RESOURCE, LITERAL) -> Set(new AccessControlEntry(user2.toString, host1, READ, ALLOW)),
new ResourcePattern(GROUP, WILDCARD_RESOURCE, LITERAL) -> acls,
new ResourcePattern(GROUP, "test-ConsumerGroup", LITERAL) -> acls
)
resourceToAcls foreach { case (key, value) => changeAclAndVerify(Set.empty, value, Set.empty, key) }
val expectedAcls = (resourceToAcls + (resource -> acls)).flatMap {
case (res, resAcls) => resAcls.map { acl => new AclBinding(res, acl) }
}.toSet
TestUtils.waitUntilTrue(() => expectedAcls == getAcls(aclAuthorizer), "changes not propagated in timeout period.")
//test remove acl from existing acls.
acls = changeAclAndVerify(acls, Set.empty, Set(acl1, acl5))
//test remove all acls for resource
removeAcls(aclAuthorizer, Set.empty, resource)
TestUtils.waitAndVerifyAcls(Set.empty[AccessControlEntry], aclAuthorizer, resource)
assertFalse(zkClient.resourceExists(resource))
//test removing last acl also deletes ZooKeeper path
acls = changeAclAndVerify(Set.empty, Set(acl1), Set.empty)
changeAclAndVerify(acls, Set.empty, acls)
assertFalse(zkClient.resourceExists(resource))
}
@Test
def testLoadCache(): Unit = {
val user1 = new KafkaPrincipal(KafkaPrincipal.USER_TYPE, username)
val acl1 = new AccessControlEntry(user1.toString, "host-1", READ, ALLOW)
val acls = Set(acl1)
addAcls(aclAuthorizer, acls, resource)
val user2 = new KafkaPrincipal(KafkaPrincipal.USER_TYPE, "bob")
val resource1 = new ResourcePattern(TOPIC, "test-2", LITERAL)
val acl2 = new AccessControlEntry(user2.toString, "host3", READ, DENY)
val acls1 = Set(acl2)
addAcls(aclAuthorizer, acls1, resource1)
zkClient.deleteAclChangeNotifications()
val authorizer = new AclAuthorizer
try {
authorizer.configure(config.originals)
assertEquals(acls, getAcls(authorizer, resource))
assertEquals(acls1, getAcls(authorizer, resource1))
} finally {
authorizer.close()
}
}
/**
* Verify that there is no timing window between loading ACL cache and setting
* up ZK change listener. Cache must be loaded before creating change listener
* in the authorizer to avoid the timing window.
*/
@Test
def testChangeListenerTiming(): Unit = {
val configureSemaphore = new Semaphore(0)
val listenerSemaphore = new Semaphore(0)
val executor = Executors.newSingleThreadExecutor
val aclAuthorizer3 = new AclAuthorizer {
override private[authorizer] def startZkChangeListeners(): Unit = {
configureSemaphore.release()
listenerSemaphore.acquireUninterruptibly()
super.startZkChangeListeners()
}
}
try {
val future = executor.submit((() => aclAuthorizer3.configure(config.originals)): Runnable)
configureSemaphore.acquire()
val user1 = new KafkaPrincipal(KafkaPrincipal.USER_TYPE, username)
val acls = Set(new AccessControlEntry(user1.toString, "host-1", READ, DENY))
addAcls(aclAuthorizer, acls, resource)
listenerSemaphore.release()
future.get(10, TimeUnit.SECONDS)
assertEquals(acls, getAcls(aclAuthorizer3, resource))
} finally {
aclAuthorizer3.close()
executor.shutdownNow()
}
}
@Test
def testLocalConcurrentModificationOfResourceAcls(): Unit = {
val commonResource = new ResourcePattern(TOPIC, "test", LITERAL)
val user1 = new KafkaPrincipal(KafkaPrincipal.USER_TYPE, username)
val acl1 = new AccessControlEntry(user1.toString, WildcardHost, READ, ALLOW)
val user2 = new KafkaPrincipal(KafkaPrincipal.USER_TYPE, "bob")
val acl2 = new AccessControlEntry(user2.toString, WildcardHost, READ, DENY)
addAcls(aclAuthorizer, Set(acl1), commonResource)
addAcls(aclAuthorizer, Set(acl2), commonResource)
TestUtils.waitAndVerifyAcls(Set(acl1, acl2), aclAuthorizer, commonResource)
}
@Test
def testDistributedConcurrentModificationOfResourceAcls(): Unit = {
val commonResource = new ResourcePattern(TOPIC, "test", LITERAL)
val user1 = new KafkaPrincipal(KafkaPrincipal.USER_TYPE, username)
val acl1 = new AccessControlEntry(user1.toString, WildcardHost, READ, ALLOW)
val user2 = new KafkaPrincipal(KafkaPrincipal.USER_TYPE, "bob")
val acl2 = new AccessControlEntry(user2.toString, WildcardHost, READ, DENY)
// Add on each instance
addAcls(aclAuthorizer, Set(acl1), commonResource)
addAcls(aclAuthorizer2, Set(acl2), commonResource)
TestUtils.waitAndVerifyAcls(Set(acl1, acl2), aclAuthorizer, commonResource)
TestUtils.waitAndVerifyAcls(Set(acl1, acl2), aclAuthorizer2, commonResource)
val user3 = new KafkaPrincipal(KafkaPrincipal.USER_TYPE, "joe")
val acl3 = new AccessControlEntry(user3.toString, WildcardHost, READ, DENY)
// Add on one instance and delete on another
addAcls(aclAuthorizer, Set(acl3), commonResource)
val deleted = removeAcls(aclAuthorizer2, Set(acl3), commonResource)
assertTrue(deleted, "The authorizer should see a value that needs to be deleted")
TestUtils.waitAndVerifyAcls(Set(acl1, acl2), aclAuthorizer, commonResource)
TestUtils.waitAndVerifyAcls(Set(acl1, acl2), aclAuthorizer2, commonResource)
}
@Test
def testHighConcurrencyModificationOfResourceAcls(): Unit = {
val commonResource = new ResourcePattern(TOPIC, "test", LITERAL)
val acls= (0 to 50).map { i =>
val useri = new KafkaPrincipal(KafkaPrincipal.USER_TYPE, i.toString)
(new AccessControlEntry(useri.toString, WildcardHost, READ, ALLOW), i)
}
// Alternate authorizer, Remove all acls that end in 0
val concurrentFuctions = acls.map { case (acl, aclId) =>
() => {
if (aclId % 2 == 0) {
addAcls(aclAuthorizer, Set(acl), commonResource)
} else {
addAcls(aclAuthorizer2, Set(acl), commonResource)
}
if (aclId % 10 == 0) {
removeAcls(aclAuthorizer2, Set(acl), commonResource)
}
}
}
val expectedAcls = acls.filter { case (acl, aclId) =>
aclId % 10 != 0
}.map(_._1).toSet
TestUtils.assertConcurrent("Should support many concurrent calls", concurrentFuctions, 30 * 1000)
TestUtils.waitAndVerifyAcls(expectedAcls, aclAuthorizer, commonResource)
TestUtils.waitAndVerifyAcls(expectedAcls, aclAuthorizer2, commonResource)
}
/**
* Test ACL inheritance, as described in #{org.apache.kafka.common.acl.AclOperation}
*/
@Test
def testAclInheritance(): Unit = {
testImplicationsOfAllow(AclOperation.ALL, Set(READ, WRITE, CREATE, DELETE, ALTER, DESCRIBE,
CLUSTER_ACTION, DESCRIBE_CONFIGS, ALTER_CONFIGS, IDEMPOTENT_WRITE))
testImplicationsOfDeny(AclOperation.ALL, Set(READ, WRITE, CREATE, DELETE, ALTER, DESCRIBE,
CLUSTER_ACTION, DESCRIBE_CONFIGS, ALTER_CONFIGS, IDEMPOTENT_WRITE))
testImplicationsOfAllow(READ, Set(DESCRIBE))
testImplicationsOfAllow(WRITE, Set(DESCRIBE))
testImplicationsOfAllow(DELETE, Set(DESCRIBE))
testImplicationsOfAllow(ALTER, Set(DESCRIBE))
testImplicationsOfDeny(DESCRIBE, Set())
testImplicationsOfAllow(ALTER_CONFIGS, Set(DESCRIBE_CONFIGS))
testImplicationsOfDeny(DESCRIBE_CONFIGS, Set())
}
private def testImplicationsOfAllow(parentOp: AclOperation, allowedOps: Set[AclOperation]): Unit = {
val user = new KafkaPrincipal(KafkaPrincipal.USER_TYPE, username)
val host = InetAddress.getByName("192.168.3.1")
val hostContext = newRequestContext(user, host)
val acl = new AccessControlEntry(user.toString, WildcardHost, parentOp, ALLOW)
addAcls(aclAuthorizer, Set(acl), clusterResource)
AclOperation.values.filter(validOp).foreach { op =>
val authorized = authorize(aclAuthorizer, hostContext, op, clusterResource)
if (allowedOps.contains(op) || op == parentOp)
assertTrue(authorized, s"ALLOW $parentOp should imply ALLOW $op")
else
assertFalse(authorized, s"ALLOW $parentOp should not imply ALLOW $op")
}
removeAcls(aclAuthorizer, Set(acl), clusterResource)
}
private def testImplicationsOfDeny(parentOp: AclOperation, deniedOps: Set[AclOperation]): Unit = {
val user1 = new KafkaPrincipal(KafkaPrincipal.USER_TYPE, username)
val host1 = InetAddress.getByName("192.168.3.1")
val host1Context = newRequestContext(user1, host1)
val acls = Set(new AccessControlEntry(user1.toString, WildcardHost, parentOp, DENY),
new AccessControlEntry(user1.toString, WildcardHost, AclOperation.ALL, ALLOW))
addAcls(aclAuthorizer, acls, clusterResource)
AclOperation.values.filter(validOp).foreach { op =>
val authorized = authorize(aclAuthorizer, host1Context, op, clusterResource)
if (deniedOps.contains(op) || op == parentOp)
assertFalse(authorized, s"DENY $parentOp should imply DENY $op")
else
assertTrue(authorized, s"DENY $parentOp should not imply DENY $op")
}
removeAcls(aclAuthorizer, acls, clusterResource)
}
@Test
def testHighConcurrencyDeletionOfResourceAcls(): Unit = {
val acl = new AccessControlEntry(new KafkaPrincipal(KafkaPrincipal.USER_TYPE, username).toString, WildcardHost, AclOperation.ALL, ALLOW)
// Alternate authorizer to keep adding and removing ZooKeeper path
val concurrentFuctions = (0 to 50).map { _ =>
() => {
addAcls(aclAuthorizer, Set(acl), resource)
removeAcls(aclAuthorizer2, Set(acl), resource)
}
}
TestUtils.assertConcurrent("Should support many concurrent calls", concurrentFuctions, 30 * 1000)
TestUtils.waitAndVerifyAcls(Set.empty[AccessControlEntry], aclAuthorizer, resource)
TestUtils.waitAndVerifyAcls(Set.empty[AccessControlEntry], aclAuthorizer2, resource)
}
@Test
def testAccessAllowedIfAllowAclExistsOnWildcardResource(): Unit = {
addAcls(aclAuthorizer, Set(allowReadAcl), wildCardResource)
assertTrue(authorize(aclAuthorizer, requestContext, READ, resource))
}
@Test
def testDeleteAclOnWildcardResource(): Unit = {
addAcls(aclAuthorizer, Set(allowReadAcl, allowWriteAcl), wildCardResource)
removeAcls(aclAuthorizer, Set(allowReadAcl), wildCardResource)
assertEquals(Set(allowWriteAcl), getAcls(aclAuthorizer, wildCardResource))
}
@Test
def testDeleteAllAclOnWildcardResource(): Unit = {
addAcls(aclAuthorizer, Set(allowReadAcl), wildCardResource)
removeAcls(aclAuthorizer, Set.empty, wildCardResource)
assertEquals(Set.empty, getAcls(aclAuthorizer))
}
@Test
def testAccessAllowedIfAllowAclExistsOnPrefixedResource(): Unit = {
addAcls(aclAuthorizer, Set(allowReadAcl), prefixedResource)
assertTrue(authorize(aclAuthorizer, requestContext, READ, resource))
}
@Test
def testDeleteAclOnPrefixedResource(): Unit = {
addAcls(aclAuthorizer, Set(allowReadAcl, allowWriteAcl), prefixedResource)
removeAcls(aclAuthorizer, Set(allowReadAcl), prefixedResource)
assertEquals(Set(allowWriteAcl), getAcls(aclAuthorizer, prefixedResource))
}
@Test
def testDeleteAllAclOnPrefixedResource(): Unit = {
addAcls(aclAuthorizer, Set(allowReadAcl, allowWriteAcl), prefixedResource)
removeAcls(aclAuthorizer, Set.empty, prefixedResource)
assertEquals(Set.empty, getAcls(aclAuthorizer))
}
@Test
def testAddAclsOnLiteralResource(): Unit = {
addAcls(aclAuthorizer, Set(allowReadAcl, allowWriteAcl), resource)
addAcls(aclAuthorizer, Set(allowWriteAcl, denyReadAcl), resource)
assertEquals(Set(allowReadAcl, allowWriteAcl, denyReadAcl), getAcls(aclAuthorizer, resource))
assertEquals(Set.empty, getAcls(aclAuthorizer, wildCardResource))
assertEquals(Set.empty, getAcls(aclAuthorizer, prefixedResource))
}
@Test
def testAddAclsOnWildcardResource(): Unit = {
addAcls(aclAuthorizer, Set(allowReadAcl, allowWriteAcl), wildCardResource)
addAcls(aclAuthorizer, Set(allowWriteAcl, denyReadAcl), wildCardResource)
assertEquals(Set(allowReadAcl, allowWriteAcl, denyReadAcl), getAcls(aclAuthorizer, wildCardResource))
assertEquals(Set.empty, getAcls(aclAuthorizer, resource))
assertEquals(Set.empty, getAcls(aclAuthorizer, prefixedResource))
}
@Test
def testAddAclsOnPrefixedResource(): Unit = {
addAcls(aclAuthorizer, Set(allowReadAcl, allowWriteAcl), prefixedResource)
addAcls(aclAuthorizer, Set(allowWriteAcl, denyReadAcl), prefixedResource)
assertEquals(Set(allowReadAcl, allowWriteAcl, denyReadAcl), getAcls(aclAuthorizer, prefixedResource))
assertEquals(Set.empty, getAcls(aclAuthorizer, wildCardResource))
assertEquals(Set.empty, getAcls(aclAuthorizer, resource))
}
@Test
def testAuthorizeWithPrefixedResource(): Unit = {
addAcls(aclAuthorizer, Set(denyReadAcl), new ResourcePattern(TOPIC, "a_other", LITERAL))
addAcls(aclAuthorizer, Set(denyReadAcl), new ResourcePattern(TOPIC, "a_other", PREFIXED))
addAcls(aclAuthorizer, Set(denyReadAcl), new ResourcePattern(TOPIC, "foo-" + UUID.randomUUID(), PREFIXED))
addAcls(aclAuthorizer, Set(denyReadAcl), new ResourcePattern(TOPIC, "foo-" + UUID.randomUUID(), PREFIXED))
addAcls(aclAuthorizer, Set(denyReadAcl), new ResourcePattern(TOPIC, "foo-" + UUID.randomUUID() + "-zzz", PREFIXED))
addAcls(aclAuthorizer, Set(denyReadAcl), new ResourcePattern(TOPIC, "fooo-" + UUID.randomUUID(), PREFIXED))
addAcls(aclAuthorizer, Set(denyReadAcl), new ResourcePattern(TOPIC, "fo-" + UUID.randomUUID(), PREFIXED))
addAcls(aclAuthorizer, Set(denyReadAcl), new ResourcePattern(TOPIC, "fop-" + UUID.randomUUID(), PREFIXED))
addAcls(aclAuthorizer, Set(denyReadAcl), new ResourcePattern(TOPIC, "fon-" + UUID.randomUUID(), PREFIXED))
addAcls(aclAuthorizer, Set(denyReadAcl), new ResourcePattern(TOPIC, "fon-", PREFIXED))
addAcls(aclAuthorizer, Set(denyReadAcl), new ResourcePattern(TOPIC, "z_other", PREFIXED))
addAcls(aclAuthorizer, Set(denyReadAcl), new ResourcePattern(TOPIC, "z_other", LITERAL))
addAcls(aclAuthorizer, Set(allowReadAcl), prefixedResource)
assertTrue(authorize(aclAuthorizer, requestContext, READ, resource))
}
@Test
def testSingleCharacterResourceAcls(): Unit = {
addAcls(aclAuthorizer, Set(allowReadAcl), new ResourcePattern(TOPIC, "f", LITERAL))
assertTrue(authorize(aclAuthorizer, requestContext, READ, new ResourcePattern(TOPIC, "f", LITERAL)))
assertFalse(authorize(aclAuthorizer, requestContext, READ, new ResourcePattern(TOPIC, "foo", LITERAL)))
addAcls(aclAuthorizer, Set(allowReadAcl), new ResourcePattern(TOPIC, "_", PREFIXED))
assertTrue(authorize(aclAuthorizer, requestContext, READ, new ResourcePattern(TOPIC, "_foo", LITERAL)))
assertTrue(authorize(aclAuthorizer, requestContext, READ, new ResourcePattern(TOPIC, "_", LITERAL)))
assertFalse(authorize(aclAuthorizer, requestContext, READ, new ResourcePattern(TOPIC, "foo_", LITERAL)))
}
@Test
def testGetAclsPrincipal(): Unit = {
val aclOnSpecificPrincipal = new AccessControlEntry(principal.toString, WildcardHost, WRITE, ALLOW)
addAcls(aclAuthorizer, Set(aclOnSpecificPrincipal), resource)
assertEquals(0,
getAcls(aclAuthorizer, wildcardPrincipal).size, "acl on specific should not be returned for wildcard request")
assertEquals(1,
getAcls(aclAuthorizer, principal).size, "acl on specific should be returned for specific request")
assertEquals(1,
getAcls(aclAuthorizer, new KafkaPrincipal(principal.getPrincipalType, principal.getName)).size, "acl on specific should be returned for different principal instance")
removeAcls(aclAuthorizer, Set.empty, resource)
val aclOnWildcardPrincipal = new AccessControlEntry(WildcardPrincipalString, WildcardHost, WRITE, ALLOW)
addAcls(aclAuthorizer, Set(aclOnWildcardPrincipal), resource)
assertEquals(1, getAcls(aclAuthorizer, wildcardPrincipal).size, "acl on wildcard should be returned for wildcard request")
assertEquals(0, getAcls(aclAuthorizer, principal).size, "acl on wildcard should not be returned for specific request")
}
@Test
def testAclsFilter(): Unit = {
val resource1 = new ResourcePattern(TOPIC, "foo-" + UUID.randomUUID(), LITERAL)
val resource2 = new ResourcePattern(TOPIC, "bar-" + UUID.randomUUID(), LITERAL)
val prefixedResource = new ResourcePattern(TOPIC, "bar-", PREFIXED)
val acl1 = new AclBinding(resource1, new AccessControlEntry(principal.toString, WildcardHost, READ, ALLOW))
val acl2 = new AclBinding(resource1, new AccessControlEntry(principal.toString, "192.168.0.1", WRITE, ALLOW))
val acl3 = new AclBinding(resource2, new AccessControlEntry(principal.toString, WildcardHost, DESCRIBE, ALLOW))
val acl4 = new AclBinding(prefixedResource, new AccessControlEntry(wildcardPrincipal.toString, WildcardHost, READ, ALLOW))
aclAuthorizer.createAcls(requestContext, List(acl1, acl2, acl3, acl4).asJava)
assertEquals(Set(acl1, acl2, acl3, acl4), aclAuthorizer.acls(AclBindingFilter.ANY).asScala.toSet)
assertEquals(Set(acl1, acl2), aclAuthorizer.acls(new AclBindingFilter(resource1.toFilter, AccessControlEntryFilter.ANY)).asScala.toSet)
assertEquals(Set(acl4), aclAuthorizer.acls(new AclBindingFilter(prefixedResource.toFilter, AccessControlEntryFilter.ANY)).asScala.toSet)
val matchingFilter = new AclBindingFilter(new ResourcePatternFilter(ResourceType.ANY, resource2.name, MATCH), AccessControlEntryFilter.ANY)
assertEquals(Set(acl3, acl4), aclAuthorizer.acls(matchingFilter).asScala.toSet)
val filters = List(matchingFilter,
acl1.toFilter,
new AclBindingFilter(resource2.toFilter, AccessControlEntryFilter.ANY),
new AclBindingFilter(new ResourcePatternFilter(TOPIC, "baz", PatternType.ANY), AccessControlEntryFilter.ANY))
val deleteResults = aclAuthorizer.deleteAcls(requestContext, filters.asJava).asScala.map(_.toCompletableFuture.get)
assertEquals(List.empty, deleteResults.filter(_.exception.isPresent))
filters.indices.foreach { i =>
assertEquals(Set.empty, deleteResults(i).aclBindingDeleteResults.asScala.toSet.filter(_.exception.isPresent))
}
assertEquals(Set(acl3, acl4), deleteResults(0).aclBindingDeleteResults.asScala.map(_.aclBinding).toSet)
assertEquals(Set(acl1), deleteResults(1).aclBindingDeleteResults.asScala.map(_.aclBinding).toSet)
assertEquals(Set.empty, deleteResults(2).aclBindingDeleteResults.asScala.map(_.aclBinding).toSet)
assertEquals(Set.empty, deleteResults(3).aclBindingDeleteResults.asScala.map(_.aclBinding).toSet)
}
@Test
def testThrowsOnAddPrefixedAclIfInterBrokerProtocolVersionTooLow(): Unit = {
givenAuthorizerWithProtocolVersion(Option(KAFKA_2_0_IV0))
val e = assertThrows(classOf[ApiException],
() => addAcls(aclAuthorizer, Set(denyReadAcl), new ResourcePattern(TOPIC, "z_other", PREFIXED)))
assertTrue(e.getCause.isInstanceOf[UnsupportedVersionException], s"Unexpected exception $e")
}
@Test
def testWritesExtendedAclChangeEventIfInterBrokerProtocolNotSet(): Unit = {
givenAuthorizerWithProtocolVersion(Option.empty)
val resource = new ResourcePattern(TOPIC, "z_other", PREFIXED)
val expected = new String(ZkAclStore(PREFIXED).changeStore
.createChangeNode(resource).bytes, UTF_8)
addAcls(aclAuthorizer, Set(denyReadAcl), resource)
val actual = getAclChangeEventAsString(PREFIXED)
assertEquals(expected, actual)
}
@Test
def testWritesExtendedAclChangeEventWhenInterBrokerProtocolAtLeastKafkaV2(): Unit = {
givenAuthorizerWithProtocolVersion(Option(KAFKA_2_0_IV1))
val resource = new ResourcePattern(TOPIC, "z_other", PREFIXED)
val expected = new String(ZkAclStore(PREFIXED).changeStore
.createChangeNode(resource).bytes, UTF_8)
addAcls(aclAuthorizer, Set(denyReadAcl), resource)
val actual = getAclChangeEventAsString(PREFIXED)
assertEquals(expected, actual)
}
@Test
def testWritesLiteralWritesLiteralAclChangeEventWhenInterBrokerProtocolLessThanKafkaV2eralAclChangesForOlderProtocolVersions(): Unit = {
givenAuthorizerWithProtocolVersion(Option(KAFKA_2_0_IV0))
val resource = new ResourcePattern(TOPIC, "z_other", LITERAL)
val expected = new String(ZkAclStore(LITERAL).changeStore
.createChangeNode(resource).bytes, UTF_8)
addAcls(aclAuthorizer, Set(denyReadAcl), resource)
val actual = getAclChangeEventAsString(LITERAL)
assertEquals(expected, actual)
}
@Test
def testWritesLiteralAclChangeEventWhenInterBrokerProtocolIsKafkaV2(): Unit = {
givenAuthorizerWithProtocolVersion(Option(KAFKA_2_0_IV1))
val resource = new ResourcePattern(TOPIC, "z_other", LITERAL)
val expected = new String(ZkAclStore(LITERAL).changeStore
.createChangeNode(resource).bytes, UTF_8)
addAcls(aclAuthorizer, Set(denyReadAcl), resource)
val actual = getAclChangeEventAsString(LITERAL)
assertEquals(expected, actual)
}
@Test
def testAuthorizerNoZkConfig(): Unit = {
val noTlsProps = Kafka.getPropsFromArgs(Array(prepareDefaultConfig))
val zkClientConfig = AclAuthorizer.zkClientConfigFromKafkaConfigAndMap(
KafkaConfig.fromProps(noTlsProps),
noTlsProps.asInstanceOf[java.util.Map[String, Any]].asScala)
KafkaConfig.ZkSslConfigToSystemPropertyMap.keys.foreach { propName =>
assertNull(zkClientConfig.getProperty(propName))
}
}
@Test
def testAuthorizerZkConfigFromKafkaConfigWithDefaults(): Unit = {
val props = new java.util.Properties()
val kafkaValue = "kafkaValue"
val configs = Map("zookeeper.connect" -> "somewhere", // required, otherwise we would omit it
KafkaConfig.ZkSslClientEnableProp -> "true",
KafkaConfig.ZkClientCnxnSocketProp -> kafkaValue,
KafkaConfig.ZkSslKeyStoreLocationProp -> kafkaValue,
KafkaConfig.ZkSslKeyStorePasswordProp -> kafkaValue,
KafkaConfig.ZkSslKeyStoreTypeProp -> kafkaValue,
KafkaConfig.ZkSslTrustStoreLocationProp -> kafkaValue,
KafkaConfig.ZkSslTrustStorePasswordProp -> kafkaValue,
KafkaConfig.ZkSslTrustStoreTypeProp -> kafkaValue,
KafkaConfig.ZkSslEnabledProtocolsProp -> kafkaValue,
KafkaConfig.ZkSslCipherSuitesProp -> kafkaValue)
configs.foreach { case (key, value) => props.put(key, value) }
val zkClientConfig = AclAuthorizer.zkClientConfigFromKafkaConfigAndMap(
KafkaConfig.fromProps(props), mutable.Map(configs.toSeq: _*))
// confirm we get all the values we expect
KafkaConfig.ZkSslConfigToSystemPropertyMap.keys.foreach(prop => prop match {
case KafkaConfig.ZkSslClientEnableProp | KafkaConfig.ZkSslEndpointIdentificationAlgorithmProp =>
assertEquals("true", KafkaConfig.zooKeeperClientProperty(zkClientConfig, prop).getOrElse("<None>"))
case KafkaConfig.ZkSslCrlEnableProp | KafkaConfig.ZkSslOcspEnableProp =>
assertEquals("false", KafkaConfig.zooKeeperClientProperty(zkClientConfig, prop).getOrElse("<None>"))
case KafkaConfig.ZkSslProtocolProp =>
assertEquals("TLSv1.2", KafkaConfig.zooKeeperClientProperty(zkClientConfig, prop).getOrElse("<None>"))
case _ => assertEquals(kafkaValue, KafkaConfig.zooKeeperClientProperty(zkClientConfig, prop).getOrElse("<None>"))
})
}
@Test
def testAuthorizerZkConfigFromKafkaConfig(): Unit = {
val props = new java.util.Properties()
val kafkaValue = "kafkaValue"
val configs = Map("zookeeper.connect" -> "somewhere", // required, otherwise we would omit it
KafkaConfig.ZkSslClientEnableProp -> "true",
KafkaConfig.ZkClientCnxnSocketProp -> kafkaValue,
KafkaConfig.ZkSslKeyStoreLocationProp -> kafkaValue,
KafkaConfig.ZkSslKeyStorePasswordProp -> kafkaValue,
KafkaConfig.ZkSslKeyStoreTypeProp -> kafkaValue,
KafkaConfig.ZkSslTrustStoreLocationProp -> kafkaValue,
KafkaConfig.ZkSslTrustStorePasswordProp -> kafkaValue,
KafkaConfig.ZkSslTrustStoreTypeProp -> kafkaValue,
KafkaConfig.ZkSslProtocolProp -> kafkaValue,
KafkaConfig.ZkSslEnabledProtocolsProp -> kafkaValue,
KafkaConfig.ZkSslCipherSuitesProp -> kafkaValue,
KafkaConfig.ZkSslEndpointIdentificationAlgorithmProp -> "HTTPS",
KafkaConfig.ZkSslCrlEnableProp -> "false",
KafkaConfig.ZkSslOcspEnableProp -> "false")
configs.foreach{case (key, value) => props.put(key, value.toString) }
val zkClientConfig = AclAuthorizer.zkClientConfigFromKafkaConfigAndMap(
KafkaConfig.fromProps(props), mutable.Map(configs.toSeq: _*))
// confirm we get all the values we expect
KafkaConfig.ZkSslConfigToSystemPropertyMap.keys.foreach(prop => prop match {
case KafkaConfig.ZkSslClientEnableProp | KafkaConfig.ZkSslEndpointIdentificationAlgorithmProp =>
assertEquals("true", KafkaConfig.zooKeeperClientProperty(zkClientConfig, prop).getOrElse("<None>"))
case KafkaConfig.ZkSslCrlEnableProp | KafkaConfig.ZkSslOcspEnableProp =>
assertEquals("false", KafkaConfig.zooKeeperClientProperty(zkClientConfig, prop).getOrElse("<None>"))
case _ => assertEquals(kafkaValue, KafkaConfig.zooKeeperClientProperty(zkClientConfig, prop).getOrElse("<None>"))
})
}
@Test
def testAuthorizerZkConfigFromPrefixOverrides(): Unit = {
val props = new java.util.Properties()
val kafkaValue = "kafkaValue"
val prefixedValue = "prefixedValue"
val prefix = "authorizer."
val configs = Map("zookeeper.connect" -> "somewhere", // required, otherwise we would omit it
KafkaConfig.ZkSslClientEnableProp -> "false",
KafkaConfig.ZkClientCnxnSocketProp -> kafkaValue,
KafkaConfig.ZkSslKeyStoreLocationProp -> kafkaValue,
KafkaConfig.ZkSslKeyStorePasswordProp -> kafkaValue,
KafkaConfig.ZkSslKeyStoreTypeProp -> kafkaValue,
KafkaConfig.ZkSslTrustStoreLocationProp -> kafkaValue,
KafkaConfig.ZkSslTrustStorePasswordProp -> kafkaValue,
KafkaConfig.ZkSslTrustStoreTypeProp -> kafkaValue,
KafkaConfig.ZkSslProtocolProp -> kafkaValue,
KafkaConfig.ZkSslEnabledProtocolsProp -> kafkaValue,
KafkaConfig.ZkSslCipherSuitesProp -> kafkaValue,
KafkaConfig.ZkSslEndpointIdentificationAlgorithmProp -> "HTTPS",
KafkaConfig.ZkSslCrlEnableProp -> "false",
KafkaConfig.ZkSslOcspEnableProp -> "false",
prefix + KafkaConfig.ZkSslClientEnableProp -> "true",
prefix + KafkaConfig.ZkClientCnxnSocketProp -> prefixedValue,
prefix + KafkaConfig.ZkSslKeyStoreLocationProp -> prefixedValue,
prefix + KafkaConfig.ZkSslKeyStorePasswordProp -> prefixedValue,
prefix + KafkaConfig.ZkSslKeyStoreTypeProp -> prefixedValue,
prefix + KafkaConfig.ZkSslTrustStoreLocationProp -> prefixedValue,
prefix + KafkaConfig.ZkSslTrustStorePasswordProp -> prefixedValue,
prefix + KafkaConfig.ZkSslTrustStoreTypeProp -> prefixedValue,
prefix + KafkaConfig.ZkSslProtocolProp -> prefixedValue,
prefix + KafkaConfig.ZkSslEnabledProtocolsProp -> prefixedValue,
prefix + KafkaConfig.ZkSslCipherSuitesProp -> prefixedValue,
prefix + KafkaConfig.ZkSslEndpointIdentificationAlgorithmProp -> "",
prefix + KafkaConfig.ZkSslCrlEnableProp -> "true",
prefix + KafkaConfig.ZkSslOcspEnableProp -> "true")
configs.foreach{case (key, value) => props.put(key, value.toString) }
val zkClientConfig = AclAuthorizer.zkClientConfigFromKafkaConfigAndMap(
KafkaConfig.fromProps(props), mutable.Map(configs.toSeq: _*))
// confirm we get all the values we expect
KafkaConfig.ZkSslConfigToSystemPropertyMap.keys.foreach(prop => prop match {
case KafkaConfig.ZkSslClientEnableProp | KafkaConfig.ZkSslCrlEnableProp | KafkaConfig.ZkSslOcspEnableProp =>
assertEquals("true", KafkaConfig.zooKeeperClientProperty(zkClientConfig, prop).getOrElse("<None>"))
case KafkaConfig.ZkSslEndpointIdentificationAlgorithmProp =>
assertEquals("false", KafkaConfig.zooKeeperClientProperty(zkClientConfig, prop).getOrElse("<None>"))
case _ => assertEquals(prefixedValue, KafkaConfig.zooKeeperClientProperty(zkClientConfig, prop).getOrElse("<None>"))
})
}
@Test
def testCreateDeleteTiming(): Unit = {
val literalResource = new ResourcePattern(TOPIC, "foo-" + UUID.randomUUID(), LITERAL)
val prefixedResource = new ResourcePattern(TOPIC, "bar-", PREFIXED)
val wildcardResource = new ResourcePattern(TOPIC, "*", LITERAL)
val ace = new AccessControlEntry(principal.toString, WildcardHost, READ, ALLOW)
val updateSemaphore = new Semaphore(1)
def createAcl(createAuthorizer: AclAuthorizer, resource: ResourcePattern): AclBinding = {
val acl = new AclBinding(resource, ace)
createAuthorizer.createAcls(requestContext, Collections.singletonList(acl)).asScala
.foreach(_.toCompletableFuture.get(15, TimeUnit.SECONDS))
acl
}
def deleteAcl(deleteAuthorizer: AclAuthorizer,
resource: ResourcePattern,
deletePatternType: PatternType): List[AclBinding] = {
val filter = new AclBindingFilter(
new ResourcePatternFilter(resource.resourceType(), resource.name(), deletePatternType),
AccessControlEntryFilter.ANY)
deleteAuthorizer.deleteAcls(requestContext, Collections.singletonList(filter)).asScala
.map(_.toCompletableFuture.get(15, TimeUnit.SECONDS))
.flatMap(_.aclBindingDeleteResults.asScala)
.map(_.aclBinding)
.toList
}
def listAcls(authorizer: AclAuthorizer): List[AclBinding] = {
authorizer.acls(AclBindingFilter.ANY).asScala.toList
}
def verifyCreateDeleteAcl(deleteAuthorizer: AclAuthorizer,
resource: ResourcePattern,
deletePatternType: PatternType): Unit = {
updateSemaphore.acquire()
assertEquals(List.empty, listAcls(deleteAuthorizer))
val acl = createAcl(aclAuthorizer, resource)
val deleted = deleteAcl(deleteAuthorizer, resource, deletePatternType)
if (deletePatternType != PatternType.MATCH) {
assertEquals(List(acl), deleted)
} else {
assertEquals(List.empty[AclBinding], deleted)
}
updateSemaphore.release()
if (deletePatternType == PatternType.MATCH) {
TestUtils.waitUntilTrue(() => listAcls(deleteAuthorizer).nonEmpty, "ACL not propagated")
assertEquals(List(acl), deleteAcl(deleteAuthorizer, resource, deletePatternType))
}
TestUtils.waitUntilTrue(() => listAcls(deleteAuthorizer).isEmpty, "ACL delete not propagated")
}
val deleteAuthorizer = new AclAuthorizer {
override def processAclChangeNotification(resource: ResourcePattern): Unit = {
updateSemaphore.acquire()
try {
super.processAclChangeNotification(resource)
} finally {
updateSemaphore.release()
}
}
}
try {
deleteAuthorizer.configure(config.originals)
List(literalResource, prefixedResource, wildcardResource).foreach { resource =>
verifyCreateDeleteAcl(deleteAuthorizer, resource, resource.patternType())
verifyCreateDeleteAcl(deleteAuthorizer, resource, PatternType.ANY)
verifyCreateDeleteAcl(deleteAuthorizer, resource, PatternType.MATCH)
}
} finally {
deleteAuthorizer.close()
}
}
@Test
def testAuthorizeByResourceTypeNoAclFoundOverride(): Unit = {
val props = TestUtils.createBrokerConfig(1, zkConnect)
props.put(AclAuthorizer.AllowEveryoneIfNoAclIsFoundProp, "true")
val cfg = KafkaConfig.fromProps(props)
val aclAuthorizer = new AclAuthorizer
try {
aclAuthorizer.configure(cfg.originals)
assertTrue(authorizeByResourceType(aclAuthorizer, requestContext, READ, resource.resourceType()),
"If allow.everyone.if.no.acl.found = true, caller should have read access to at least one topic")
assertTrue(authorizeByResourceType(aclAuthorizer, requestContext, WRITE, resource.resourceType()),
"If allow.everyone.if.no.acl.found = true, caller should have write access to at least one topic")
} finally {
aclAuthorizer.close()
}
}
private def givenAuthorizerWithProtocolVersion(protocolVersion: Option[ApiVersion]): Unit = {
aclAuthorizer.close()
val props = TestUtils.createBrokerConfig(0, zkConnect)
props.put(AclAuthorizer.SuperUsersProp, superUsers)
protocolVersion.foreach(version => props.put(KafkaConfig.InterBrokerProtocolVersionProp, version.toString))
config = KafkaConfig.fromProps(props)
aclAuthorizer.configure(config.originals)
}
private def getAclChangeEventAsString(patternType: PatternType) = {
val store = ZkAclStore(patternType)
val children = zooKeeperClient.handleRequest(GetChildrenRequest(store.changeStore.aclChangePath, registerWatch = true))
children.maybeThrow()
assertEquals(1, children.children.size, "Expecting 1 change event")
val data = zooKeeperClient.handleRequest(GetDataRequest(s"${store.changeStore.aclChangePath}/${children.children.head}"))
data.maybeThrow()
new String(data.data, UTF_8)
}
private def changeAclAndVerify(originalAcls: Set[AccessControlEntry],
addedAcls: Set[AccessControlEntry],
removedAcls: Set[AccessControlEntry],
resource: ResourcePattern = resource): Set[AccessControlEntry] = {
var acls = originalAcls
if(addedAcls.nonEmpty) {
addAcls(aclAuthorizer, addedAcls, resource)
acls ++= addedAcls
}
if(removedAcls.nonEmpty) {
removeAcls(aclAuthorizer, removedAcls, resource)
acls --=removedAcls
}
TestUtils.waitAndVerifyAcls(acls, aclAuthorizer, resource)
acls
}
private def authorize(authorizer: AclAuthorizer, requestContext: RequestContext, operation: AclOperation, resource: ResourcePattern): Boolean = {
val action = new Action(operation, resource, 1, true, true)
authorizer.authorize(requestContext, List(action).asJava).asScala.head == AuthorizationResult.ALLOWED
}
private def getAcls(authorizer: AclAuthorizer, resourcePattern: ResourcePattern): Set[AccessControlEntry] = {
val acls = authorizer.acls(new AclBindingFilter(resourcePattern.toFilter, AccessControlEntryFilter.ANY)).asScala.toSet
acls.map(_.entry)
}
private def getAcls(authorizer: AclAuthorizer, principal: KafkaPrincipal): Set[AclBinding] = {
val filter = new AclBindingFilter(ResourcePatternFilter.ANY,
new AccessControlEntryFilter(principal.toString, null, AclOperation.ANY, AclPermissionType.ANY))
authorizer.acls(filter).asScala.toSet
}
private def getAcls(authorizer: AclAuthorizer): Set[AclBinding] = {
authorizer.acls(AclBindingFilter.ANY).asScala.toSet
}
private def validOp(op: AclOperation): Boolean = {
op != AclOperation.ANY && op != AclOperation.UNKNOWN
}
private def prepareDefaultConfig: String =
prepareConfig(Array("broker.id=1", "zookeeper.connect=somewhere"))
private def prepareConfig(lines : Array[String]): String = {
val file = File.createTempFile("kafkatest", ".properties")
file.deleteOnExit()
val writer = Files.newOutputStream(file.toPath)
try {
lines.foreach { l =>
writer.write(l.getBytes)
writer.write("\\n".getBytes)
}
file.getAbsolutePath
} finally writer.close()
}
}
| guozhangwang/kafka | core/src/test/scala/unit/kafka/security/authorizer/AclAuthorizerTest.scala | Scala | apache-2.0 | 50,778 |
package se.lu.nateko.cp.meta.test
import org.scalatest.funspec.AnyFunSpec
import java.net.URI
import se.lu.nateko.cp.meta.onto.Onto
import se.lu.nateko.cp.meta.onto.InstOnto
class InstOntoTests extends AnyFunSpec{
val onto = new Onto(TestConfig.owlOnto)
val instOnto = new InstOnto(TestConfig.instServer, onto)
describe("getIndividual"){
it("correctly constructs display name for Membership individual"){
val uri = new URI(TestConfig.instOntUri + "atcDirector")
val indInfo = instOnto.getIndividual(uri)
assert(indInfo.resource.displayName === "Director at Atmosphere Thematic Centre")
}
}
describe("getRangeValues"){
it("lists LatLonBox instances as range values of hasSpatialCoverage (on Station class instances)"){
val stationClass = new URI(TestConfig.ontUri + "Station")
val spatialCovProp = new URI(TestConfig.ontUri + "hasSpatialCoverage")
val globalBox = new URI(TestConfig.instOntUri + "globalLatLonBox")
val range = instOnto.getRangeValues(stationClass, spatialCovProp)
assert(range.map(_.uri).contains(globalBox))
}
}
}
| ICOS-Carbon-Portal/meta | src/test/scala/se/lu/nateko/cp/meta/test/InstOntoTests.scala | Scala | gpl-3.0 | 1,083 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.plan.util
import org.apache.flink.table.plan.`trait`.{AccModeTraitDef, UpdateAsRetractionTraitDef}
import org.apache.flink.table.plan.nodes.calcite.Sink
import org.apache.flink.table.plan.nodes.exec.{ExecNode, ExecNodeVisitorImpl}
import org.apache.flink.table.plan.nodes.physical.stream.StreamPhysicalRel
import com.google.common.collect.{Maps, Sets}
import org.apache.calcite.rel.RelNode
import org.apache.calcite.rel.externalize.RelWriterImpl
import org.apache.calcite.sql.SqlExplainLevel
import org.apache.calcite.util.Pair
import java.io.{PrintWriter, StringWriter}
import java.util
import java.util.concurrent.atomic.AtomicInteger
import java.util.{ArrayList => JArrayList, List => JList}
import scala.collection.JavaConversions._
/**
* An utility class for converting an exec node plan to a string as a tree style.
*
* The implementation is based on RelNode#explain now.
*/
object ExecNodePlanDumper {
/**
* Converts an [[ExecNode]] tree to a string as a tree style.
*
* @param node the ExecNode to convert
* @param detailLevel detailLevel defines detail levels for EXPLAIN PLAN.
* @param withExecNodeId whether including ID of ExecNode
* @param withRetractTraits whether including Retraction Traits of RelNode corresponding to
* an ExecNode (only apply to StreamPhysicalRel node at present)
* @param withOutputType whether including output rowType
* @return explain plan of ExecNode
*/
def treeToString(
node: ExecNode[_, _],
detailLevel: SqlExplainLevel = SqlExplainLevel.EXPPLAN_ATTRIBUTES,
withExecNodeId: Boolean = false,
withRetractTraits: Boolean = false,
withOutputType: Boolean = false,
withResource: Boolean = false): String = {
doConvertTreeToString(
node,
detailLevel = detailLevel,
withExecNodeId = withExecNodeId,
withRetractTraits = withRetractTraits,
withOutputType = withOutputType,
withResource = withResource)
}
/**
* Converts an [[ExecNode]] DAG to a string as a tree style.
*
* @param nodes the ExecNodes to convert
* @param detailLevel detailLevel defines detail levels for EXPLAIN PLAN.
* @param withExecNodeId whether including ID of ExecNode
* @param withRetractTraits whether including Retraction Traits of RelNode corresponding to
* an ExecNode (only apply to StreamPhysicalRel node at present)
* @param withOutputType whether including output rowType
* @return explain plan of ExecNode
*/
def dagToString(
nodes: Seq[ExecNode[_, _]],
detailLevel: SqlExplainLevel = SqlExplainLevel.EXPPLAN_ATTRIBUTES,
withExecNodeId: Boolean = false,
withRetractTraits: Boolean = false,
withOutputType: Boolean = false,
withResource: Boolean = false): String = {
if (nodes.length == 1) {
return treeToString(
nodes.head,
detailLevel,
withExecNodeId = withExecNodeId,
withRetractTraits = withRetractTraits,
withOutputType = withOutputType,
withResource = withResource)
}
val reuseInfoBuilder = new ReuseInfoBuilder()
nodes.foreach(reuseInfoBuilder.visit)
// node sets that stop explain when meet them
val stopExplainNodes = Sets.newIdentityHashSet[ExecNode[_, _]]()
// mapping node to reuse info, the map value is a tuple2,
// the first value of the tuple is reuse id,
// the second value is true if the node is first visited else false.
val reuseInfoMap = Maps.newIdentityHashMap[ExecNode[_, _], (Integer, Boolean)]()
// mapping node object to visited times
val mapNodeToVisitedTimes = Maps.newIdentityHashMap[ExecNode[_, _], Int]()
val sb = new StringBuilder()
val visitor = new ExecNodeVisitorImpl {
override def visit(node: ExecNode[_, _]): Unit = {
val visitedTimes = mapNodeToVisitedTimes.getOrDefault(node, 0) + 1
mapNodeToVisitedTimes.put(node, visitedTimes)
if (visitedTimes == 1) {
super.visit(node)
}
val reuseId = reuseInfoBuilder.getReuseId(node)
val isReuseNode = reuseId.isDefined
if (node.isInstanceOf[Sink] || (isReuseNode && !reuseInfoMap.containsKey(node))) {
if (isReuseNode) {
reuseInfoMap.put(node, (reuseId.get, true))
}
val reusePlan = doConvertTreeToString(
node,
detailLevel = detailLevel,
withExecNodeId = withExecNodeId,
withRetractTraits = withRetractTraits,
withOutputType = withOutputType,
stopExplainNodes = Some(stopExplainNodes),
reuseInfoMap = Some(reuseInfoMap),
withResource = withResource)
sb.append(reusePlan).append(System.lineSeparator)
if (isReuseNode) {
// update visit info after the reuse node visited
stopExplainNodes.add(node)
reuseInfoMap.put(node, (reuseId.get, false))
}
}
}
}
nodes.foreach(visitor.visit)
if (sb.length() > 0) {
// delete last line separator
sb.deleteCharAt(sb.length - 1)
}
sb.toString()
}
private def doConvertTreeToString(
node: ExecNode[_, _],
detailLevel: SqlExplainLevel = SqlExplainLevel.EXPPLAN_ATTRIBUTES,
withExecNodeId: Boolean = false,
withRetractTraits: Boolean = false,
withOutputType: Boolean = false,
stopExplainNodes: Option[util.Set[ExecNode[_, _]]] = None,
reuseInfoMap: Option[util.IdentityHashMap[ExecNode[_, _], (Integer, Boolean)]] = None,
withResource: Boolean = false
): String = {
// TODO refactor this part of code
// get ExecNode explain value by RelNode#explain now
val sw = new StringWriter
val planWriter = new NodeTreeWriterImpl(
node,
new PrintWriter(sw),
explainLevel = detailLevel,
withExecNodeId = withExecNodeId,
withRetractTraits = withRetractTraits,
withOutputType = withOutputType,
stopExplainNodes = stopExplainNodes,
reuseInfoMap = reuseInfoMap,
withResource = withResource)
node.asInstanceOf[RelNode].explain(planWriter)
sw.toString
}
}
/**
* build reuse id in an ExecNode DAG.
*/
class ReuseInfoBuilder extends ExecNodeVisitorImpl {
// visited node set
private val visitedNodes = Sets.newIdentityHashSet[ExecNode[_, _]]()
// mapping reuse node to its reuse id
private val mapReuseNodeToReuseId = Maps.newIdentityHashMap[ExecNode[_, _], Integer]()
private val reuseIdGenerator = new AtomicInteger(0)
override def visit(node: ExecNode[_, _]): Unit = {
// if a node is visited more than once, this node is a reusable node
if (visitedNodes.contains(node)) {
if (!mapReuseNodeToReuseId.containsKey(node)) {
val reuseId = reuseIdGenerator.incrementAndGet()
mapReuseNodeToReuseId.put(node, reuseId)
}
} else {
visitedNodes.add(node)
super.visit(node)
}
}
/**
* Returns reuse id if the given node is a reuse node (that means it has multiple outputs),
* else None.
*/
def getReuseId(node: ExecNode[_, _]): Option[Integer] = {
if (mapReuseNodeToReuseId.containsKey(node)) {
Some(mapReuseNodeToReuseId.get(node))
} else {
None
}
}
}
/**
* Convert node tree to string as a tree style.
*/
class NodeTreeWriterImpl(
node: ExecNode[_, _],
pw: PrintWriter,
explainLevel: SqlExplainLevel = SqlExplainLevel.EXPPLAN_ATTRIBUTES,
withExecNodeId: Boolean = false,
withRetractTraits: Boolean = false,
withOutputType: Boolean = false,
stopExplainNodes: Option[util.Set[ExecNode[_, _]]] = None,
reuseInfoMap: Option[util.IdentityHashMap[ExecNode[_, _], (Integer, Boolean)]] = None,
withResource: Boolean = false)
extends RelWriterImpl(pw, explainLevel, false) {
require((stopExplainNodes.isEmpty && reuseInfoMap.isEmpty) ||
(stopExplainNodes.isDefined && reuseInfoMap.isDefined))
// use reuse info based on `reuseInfoMap` if it's not None,
// else rebuild it using `ReuseInfoBuilder`
class ReuseInfo {
// mapping node object to visited times
var mapNodeToVisitedTimes: util.Map[ExecNode[_, _], Int] = _
var reuseInfoBuilder: ReuseInfoBuilder = _
if (reuseInfoMap.isEmpty) {
reuseInfoBuilder = new ReuseInfoBuilder()
reuseInfoBuilder.visit(node)
mapNodeToVisitedTimes = Maps.newIdentityHashMap[ExecNode[_, _], Int]()
}
/**
* Returns reuse id if the given node is a reuse node, else None.
*/
def getReuseId(node: ExecNode[_, _]): Option[Integer] = {
reuseInfoMap match {
case Some(map) => if (map.containsKey(node)) Some(map.get(node)._1) else None
case _ => reuseInfoBuilder.getReuseId(node)
}
}
/**
* Returns true if the given node is first visited, else false.
*/
def isFirstVisited(node: ExecNode[_, _]): Boolean = {
reuseInfoMap match {
case Some(map) => if (map.containsKey(node)) map.get(node)._2 else true
case _ => mapNodeToVisitedTimes.get(node) == 1
}
}
/**
* Updates visited times for given node if `reuseInfoMap` is None.
*/
def addVisitedTimes(node: ExecNode[_, _]): Unit = {
reuseInfoMap match {
case Some(_) => // do nothing
case _ =>
val visitedTimes = mapNodeToVisitedTimes.getOrDefault(node, 0) + 1
mapNodeToVisitedTimes.put(node, visitedTimes)
}
}
}
val reuseInfo = new ReuseInfo
var lastChildren: Seq[Boolean] = Nil
var depth = 0
override def explain_(rel: RelNode, values: JList[Pair[String, AnyRef]]): Unit = {
val node = rel.asInstanceOf[ExecNode[_, _]]
reuseInfo.addVisitedTimes(node)
val inputs = rel.getInputs
// whether explain input nodes of current node
val explainInputs = needExplainInputs(node)
val mq = rel.getCluster.getMetadataQuery
if (explainInputs && !mq.isVisibleInExplain(rel, explainLevel)) {
// render children in place of this, at same level
inputs.toSeq.foreach(_.explain(this))
return
}
val s = new StringBuilder
if (depth > 0) {
lastChildren.init.foreach { isLast =>
s.append(if (isLast) " " else ": ")
}
s.append(if (lastChildren.last) "+- " else ":- ")
}
val reuseId = reuseInfo.getReuseId(node)
val isReuseNode = reuseId.isDefined
val isFirstVisited = reuseInfo.isFirstVisited(node)
// whether output detail
val printDetail = !isReuseNode || isFirstVisited
if (isReuseNode && !isFirstVisited) {
s.append("Reused")
} else {
rel.getRelTypeName match {
case name if name.startsWith("BatchExec") => s.append(name.substring(9))
case name if name.startsWith("StreamExec") => s.append(name.substring(10))
case name => s.append(name)
}
}
val printValues = new JArrayList[Pair[String, AnyRef]]()
if (printDetail) {
if (explainLevel != SqlExplainLevel.NO_ATTRIBUTES) {
printValues.addAll(values)
}
if (withExecNodeId) {
// use RelNode's id now
printValues.add(Pair.of("__id__", rel.getId.toString))
}
if (withResource) {
printValues.add(Pair.of("resource", node.getResource))
}
if (withRetractTraits) {
rel match {
case streamRel: StreamPhysicalRel =>
val traitSet = streamRel.getTraitSet
printValues.add(
Pair.of("updateAsRetraction",
traitSet.getTrait(UpdateAsRetractionTraitDef.INSTANCE)))
printValues.add(
Pair.of("accMode", traitSet.getTrait(AccModeTraitDef.INSTANCE)))
case _ => // ignore
}
}
}
if (isReuseNode) {
if (isFirstVisited) {
printValues.add(Pair.of("reuse_id", reuseId.get))
} else {
printValues.add(Pair.of("reference_id", reuseId.get))
}
}
if (!printValues.isEmpty) {
var j = 0
printValues.toSeq.foreach {
case value if value.right.isInstanceOf[RelNode] => // do nothing
case value =>
if (j == 0) s.append("(") else s.append(", ")
j = j + 1
s.append(value.left).append("=[").append(value.right).append("]")
}
if (j > 0) s.append(")")
}
if (withOutputType) {
s.append(", rowType=[").append(rel.getRowType.toString).append("]")
}
if (explainLevel == SqlExplainLevel.ALL_ATTRIBUTES && printDetail) {
s.append(": rowcount = ")
.append(mq.getRowCount(rel))
.append(", cumulative cost = ")
.append(mq.getCumulativeCost(rel))
}
pw.println(s)
if (explainInputs && inputs.length > 1 && printDetail) {
inputs.toSeq.init.foreach { rel =>
depth = depth + 1
lastChildren = lastChildren :+ false
rel.explain(this)
depth = depth - 1
lastChildren = lastChildren.init
}
}
if (explainInputs && !inputs.isEmpty && printDetail) {
depth = depth + 1
lastChildren = lastChildren :+ true
inputs.toSeq.last.explain(this)
depth = depth - 1
lastChildren = lastChildren.init
}
}
/**
* Returns true if `stopExplainNodes` is not None and contains the given node, else false.
*/
private def needExplainInputs(node: ExecNode[_, _]): Boolean = {
stopExplainNodes match {
case Some(nodes) => !nodes.contains(node)
case _ => true
}
}
}
| shaoxuan-wang/flink | flink-table/flink-table-planner-blink/src/main/scala/org/apache/flink/table/plan/util/ExecNodePlanDumper.scala | Scala | apache-2.0 | 14,437 |
/*package doodle
package js
import doodle.core.{Color, Stroke, Vec}
import doodle.backend.Canvas
import org.scalajs.dom
class HtmlCanvas(canvas: dom.raw.HTMLCanvasElement) extends Canvas {
val context = canvas.getContext("2d").asInstanceOf[dom.raw.CanvasRenderingContext2D]
var animationFrameCallbackHandle: Option[Int] = None
// The origin in canvas coordinates
var center = Vec(0, 0)
// Convert from canvas coordinates to screen coordinates
def canvasToScreen(x: Double, y: Double): Vec = {
val offsetX = canvas.width / 2
val offsetY = canvas.height / 2
val Vec(centerX, centerY) = center
Vec(x - centerX + offsetX, offsetY - y + centerY)
}
def setOrigin(x: Int, y: Int): Unit = {
center = Vec(x, y)
}
def clear(color: Color): Unit = {
val oldFill = context.fillStyle
context.fillStyle = color.toCanvas
context.fillRect(0, 0, canvas.width, canvas.height)
context.fillStyle = oldFill
}
def setSize(width: Int, height: Int): Unit = {
canvas.width = width
canvas.height = height
}
def setStroke(stroke: Stroke): Unit = {
context.lineWidth = stroke.width
context.lineCap = stroke.cap.toCanvas
context.lineJoin = stroke.join.toCanvas
context.strokeStyle = stroke.color.toCanvas
}
def stroke(): Unit =
context.stroke()
def setFill(color: Color): Unit = {
context.fillStyle = color.toCanvas
}
def fill(): Unit =
context.fill()
def beginPath(): Unit =
context.beginPath()
def moveTo(x: Double, y: Double): Unit = {
val Vec(screenX, screenY) = canvasToScreen(x, y)
context.moveTo(screenX, screenY)
}
def lineTo(x: Double, y: Double): Unit = {
val Vec(screenX, screenY) = canvasToScreen(x, y)
context.lineTo(screenX, screenY)
}
def bezierCurveTo(cp1x: Double, cp1y: Double, cp2x: Double, cp2y: Double, endX: Double, endY: Double): Unit = {
val Vec(screenCp1X, screenCp1Y) = canvasToScreen(cp1x, cp1y)
val Vec(screenCp2X, screenCp2Y) = canvasToScreen(cp2x, cp2y)
val Vec(screenEndX, screenEndY) = canvasToScreen(endX, endY)
context.bezierCurveTo(
screenCp1X , screenCp1Y,
screenCp2X , screenCp2Y,
screenEndX , screenEndY
)
}
def endPath(): Unit =
context.closePath()
def setAnimationFrameCallback(callback: () => Unit): Unit = {
animationFrameCallbackHandle.foreach(handle => dom.window.cancelAnimationFrame(handle))
animationFrameCallbackHandle =
Some(dom.window.requestAnimationFrame((_: Double)=> callback()))
}
}
object HtmlCanvas {
implicit def canvas(implicit elt: dom.raw.HTMLCanvasElement): Canvas =
new HtmlCanvas(elt)
def fromElementId(id: String): Canvas = {
val elt = dom.document.getElementById(id).asInstanceOf[dom.raw.HTMLCanvasElement]
canvas(elt)
}
}
*/
| Angeldude/doodle | js/src/main/scala/doodle/js/HtmlCanvas.scala | Scala | apache-2.0 | 2,806 |
/**
* Copyright (C) 2013 Stefan Niederhauser (nidin@gmx.ch)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package guru.nidi.atlassian.remote.query
import org.scalatest.FlatSpec
/**
*
*/
class JqlBuilderTest extends FlatSpec {
behavior of "JqlBuilder"
it should "concat query correctly with and" in {
assert(JqlBuilder.and("a", "b") === "(a) and (b)")
assert(JqlBuilder.and("", "b") === "b")
assert(JqlBuilder.and(null, "b") === "b")
assert(JqlBuilder.and("a", "") === "a")
assert(JqlBuilder.and("a", null) === "a")
}
it should "concat order correctly with and" in {
assert(JqlBuilder.and("a order by c", "b") === "(a) and (b) order by c")
assert(JqlBuilder.and("a", "b order by c") === "(a) and (b) order by c")
assert(JqlBuilder.and("a order by c", "b order by d") === "(a) and (b) order by c,d")
}
it should "construct correct jql strings" in {
assert(JqlBuilder.jql("a", "b", null) === "(a) and (b)")
assert(JqlBuilder.jql("a", "b", "c") === "(a) and (b) order by c")
}
}
| nidi3/simple-remote-atlassian | src/test/scala/guru/nidi/atlassian/remote/query/JqlBuilderTest.scala | Scala | apache-2.0 | 1,558 |
package com.sksamuel.elastic4s.requests.searches.template
import com.fasterxml.jackson.annotation.JsonProperty
import com.sksamuel.elastic4s.requests.searches.{GetSearchTemplateRequest, PutSearchTemplateRequest, RemoveSearchTemplateRequest, SearchResponse, TemplateSearchRequest}
import com.sksamuel.elastic4s.{ElasticRequest, Handler, HttpEntity, HttpResponse, IndexesAndTypes, ResponseHandler}
import com.sksamuel.exts.OptionImplicits._
trait SearchTemplateHandlers {
implicit object TemplateSearchHandler extends Handler[TemplateSearchRequest, SearchResponse] {
override def build(req: TemplateSearchRequest): ElasticRequest = {
val endpoint = req.indexesAndTypes match {
case IndexesAndTypes(Nil, Nil) => "/_search/template"
case IndexesAndTypes(indexes, Nil) => "/" + indexes.mkString(",") + "/_search/template"
case IndexesAndTypes(Nil, types) => "/_all/" + types.mkString(",") + "/_search/template"
case IndexesAndTypes(indexes, types) =>
"/" + indexes.mkString(",") + "/" + types.mkString(",") + "/_search/template"
}
val body = TemplateSearchBuilderFn(req).string()
ElasticRequest("POST", endpoint, HttpEntity(body, "application/json"))
}
}
implicit object RemoveSearchTemplateHandler
extends Handler[RemoveSearchTemplateRequest, RemoveSearchTemplateResponse] {
override def build(req: RemoveSearchTemplateRequest): ElasticRequest = {
val endpoint = "/_scripts/" + req.name
ElasticRequest("DELETE", endpoint)
}
}
implicit object PutSearchTemplateHandler extends Handler[PutSearchTemplateRequest, PutSearchTemplateResponse] {
override def build(req: PutSearchTemplateRequest): ElasticRequest = {
val endpoint = "/_scripts/" + req.name
val body = PutSearchTemplateBuilderFn(req).string()
val entity = HttpEntity(body, "application/json")
ElasticRequest("POST", endpoint, entity)
}
}
implicit object GetSearchTemplateHandler
extends Handler[GetSearchTemplateRequest, Option[GetSearchTemplateResponse]] {
override def responseHandler: ResponseHandler[Option[GetSearchTemplateResponse]] =
new ResponseHandler[Option[GetSearchTemplateResponse]] {
/**
* Accepts a HttpResponse and returns an Either of an ElasticError or a type specific to the request
* as determined by the instance of this handler.
*/
override def handle(response: HttpResponse) =
response.statusCode match {
case 200 => Right(ResponseHandler.fromResponse[GetSearchTemplateResponse](response).some)
case 404 => Right(None)
case _ => sys.error(response.entity.map(_.content).getOrElse(""))
}
}
override def build(req: GetSearchTemplateRequest): ElasticRequest = {
val endpoint = "/_scripts/" + req.name
ElasticRequest("GET", endpoint)
}
}
}
case class PutSearchTemplateResponse(acknowledged: Boolean)
case class GetSearchTemplateResponse(@JsonProperty("_id") id: String, lang: String, found: Boolean, template: String)
case class RemoveSearchTemplateResponse(acknowledged: Boolean)
| stringbean/elastic4s | elastic4s-core/src/main/scala/com/sksamuel/elastic4s/requests/searches/template/SearchTemplateHandlers.scala | Scala | apache-2.0 | 3,246 |
package org.shapelogic.sc.streams
/**
* The idea is that you can call a plugin in both active and lazy mode.
* <br />
*
* How should this be instantiated?<br />
* <br />
* There should be a constructor where Imageprocessor is set.<br />
*
* @author Sami Badawi
*
*/
trait LazyPlugInFilter[E] {
/** Used when calling in lazy mode. */
def getStream(): ListStream[E]
/** Maybe getStreamName would be better. */
def getStreamName(): String
/** Maybe setStreamName would be better. */
def setStreamName(name: String)
}
| sami-badawi/shapelogic-scala | src/main/scala/org/shapelogic/sc/streams/LazyPlugInFilter.scala | Scala | mit | 541 |
package smarthouse.restapi.http.ws.models
trait ApiMessage {
def messageType: String
}
trait ApiMessageSecure extends ApiMessage
{
def token: String
}
trait CallbackApiMessage extends ApiMessage
{
def requestId: String
} | andrewobukhov/smart-house | src/main/scala/smarthouse/restapi/http/ws/models/ApiMessage.scala | Scala | mit | 229 |
/*
* Copyright (c) 2014-2021 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.tail
import cats.implicits._
import cats.effect.{ContextShift, IO, Timer}
import minitest.SimpleTestSuite
import monix.catnap.ProducerF
import monix.execution.BufferCapacity.{Bounded, Unbounded}
import monix.execution.ChannelType.{MultiProducer, SingleProducer}
import monix.execution.internal.Platform
import monix.execution.{BufferCapacity, Scheduler}
import monix.catnap.SchedulerEffect
object IterantChannelSuite extends SimpleTestSuite {
implicit val ec: Scheduler = Scheduler.global
implicit def contextShift(implicit s: Scheduler): ContextShift[IO] =
SchedulerEffect.contextShift[IO](s)(IO.ioEffect)
implicit def timer(implicit s: Scheduler): Timer[IO] =
SchedulerEffect.timerLiftIO[IO](s)(IO.ioEffect)
def testIO(name: String)(f: => IO[Unit]) =
testAsync(name)(f.unsafeToFuture())
testIO("concurrent sum; producers=4, consumers=4, capacity=Bounded(16)") {
testConcurrentSum(
producers = 4,
consumers = 4,
capacity = Bounded(16),
count = if (Platform.isJVM) 100000 else 100
)
}
testIO("concurrent sum; producers=4, consumers=4, capacity=Unbounded") {
testConcurrentSum(
producers = 4,
consumers = 4,
capacity = Unbounded(),
count = if (Platform.isJVM) 100000 else 100
)
}
testIO("concurrent sum; producers=1, consumers=4, capacity=Bounded(16)") {
testConcurrentSum(
producers = 1,
consumers = 4,
capacity = Bounded(16),
count = if (Platform.isJVM) 100000 else 100
)
}
testIO("concurrent sum; producers=1, consumers=4, capacity=Unbounded") {
testConcurrentSum(
producers = 1,
consumers = 4,
capacity = Unbounded(),
count = if (Platform.isJVM) 100000 else 100
)
}
testIO("concurrent sum; producers=4, consumers=1, capacity=Bounded(16)") {
testConcurrentSum(
producers = 4,
consumers = 1,
capacity = Bounded(16),
count = if (Platform.isJVM) 100000 else 100
)
}
testIO("concurrent sum; producers=4, consumers=1, capacity=Unbounded") {
testConcurrentSum(
producers = 4,
consumers = 1,
capacity = Unbounded(),
count = if (Platform.isJVM) 100000 else 100
)
}
testIO("concurrent sum; producers=1, consumers=1, capacity=Bounded(16)") {
testConcurrentSum(
producers = 1,
consumers = 1,
capacity = Bounded(16),
count = if (Platform.isJVM) 100000 else 100
)
}
testIO("concurrent sum; producers=1, consumers=1, capacity=Unbounded") {
testConcurrentSum(
producers = 1,
consumers = 1,
capacity = Unbounded(),
count = if (Platform.isJVM) 100000 else 100
)
}
def testConcurrentSum(producers: Int, consumers: Int, capacity: BufferCapacity, count: Int) = {
def produce(channel: ProducerF[IO, Option[Throwable], Int]): IO[Unit] = {
def loop(channel: ProducerF[IO, Option[Throwable], Int], n: Int): IO[Unit] =
if (n > 0) channel.push(n).flatMap {
case true => loop(channel, n - 1)
case false => IO.unit
}
else {
IO.unit
}
val task = loop(channel, count)
if (producers < 2)
task
else
(0 until producers).map(_ => task).toList.parSequence_
}
def consumeMany(channel: Iterant[IO, Int]): IO[Long] = {
val task = channel.foldLeftL(0L)(_ + _)
if (consumers < 2) {
task
} else {
val list = (0 until consumers).map(_ => task).toList
list.parSequence.map(_.sum)
}
}
val pt = if (producers > 1) MultiProducer else SingleProducer
Iterant[IO].channel[Int](capacity, producerType = pt).flatMap {
case (producer, stream) =>
for {
fiber <- consumeMany(stream).start
_ <- producer.awaitConsumers(consumers)
_ <- produce(producer)
_ <- producer.halt(None)
sum <- fiber.join
} yield {
val perProducer = count.toLong * (count + 1) / 2
assertEquals(sum, perProducer * producers * consumers)
}
}
}
}
| monixio/monix | monix-tail/shared/src/test/scala/monix/tail/IterantChannelSuite.scala | Scala | apache-2.0 | 4,776 |
package controllers
import javax.inject._
import play.api.Logger
import play.api.libs.functional.syntax._
import play.api.libs.json.Reads._
import play.api.libs.json._
import play.api.mvc._
import play.modules.reactivemongo._
import play.api.libs.concurrent.Execution.Implicits._
import reactivemongo.api.ReadPreference
import reactivemongo.play.json._
import reactivemongo.play.json.collection.JSONCollection
import service.PlayerService
import scala.concurrent.{ExecutionContext, Future}
import models.PlayerModel
import models.NewPlayer
@Singleton
class PlayerController @Inject() (playerService: PlayerService) extends Controller {
val transformer: Reads[JsObject] =
Reads.jsPickBranch[JsString](__ \\ "name") and
Reads.jsPickBranch[JsString](__ \\ "session") reduce
def create = Action.async(parse.json) { request =>
request.body.transform(transformer) match {
case JsSuccess(player, _) => playerService.createNewPlayer(NewPlayer.fromJson(player)) map { res => Ok(res) }
case _ => Future.successful(BadRequest("Invalid JSON"))
} }
def findByName(name: String) = Action.async {
playerService.findPlayer(name) map { res => Ok(res) }
}
} | eimink/play-mongo | app/controllers/PlayerController.scala | Scala | mit | 1,198 |
package org.broadinstitute.clio.client.util
import java.io.{IOException, PrintWriter, StringWriter}
import java.net.URI
import akka.stream.scaladsl.Sink
import better.files.File
import com.google.cloud.storage.BlobInfo
import com.google.cloud.storage.contrib.nio.testing.LocalStorageHelper
import io.circe.syntax._
import org.broadinstitute.clio.client.BaseClientSpec
import org.broadinstitute.clio.transfer.model.{CramIndex, UbamIndex}
import org.broadinstitute.clio.transfer.model.ubam.UbamMetadata
import org.scalamock.scalatest.AsyncMockFactory
import org.scalatest.AsyncTestSuite
import scala.collection.JavaConverters._
import scala.collection.immutable
class IoUtilSpec extends BaseClientSpec with AsyncTestSuite with AsyncMockFactory {
behavior of "IoUtil"
private def uriToBlobInfo(uri: URI) = {
BlobInfo.newBuilder(IoUtil.toBlobId(uri)).build()
}
private def createStorage = {
LocalStorageHelper.getOptions.getService
}
it should "read a file from file location" in {
val contents = "I'm a file!"
File.temporaryFile() { f =>
new IoUtil(createStorage)
.readFile(f.write(contents).uri) should be(contents)
}
}
it should "identify google directories" in {
val ioUtil = new IoUtil(createStorage)
ioUtil.isGoogleDirectory(URI.create("gs://bucket/directory/")) should be(true)
ioUtil.isGoogleDirectory(URI.create("gs://bucket/file.txt")) should be(false)
ioUtil.isGoogleDirectory(URI.create("foo")) should be(false)
}
it should "build streams for deleting multiple cloud objects" in {
val uris = immutable.Iterable(
URI.create("gs://bucket/to/the.object"),
URI.create("gs://bucket/to/the/other.object")
)
val storage = createStorage
uris.foreach(uri => storage.create(uriToBlobInfo(uri)))
val ioUtil = new IoUtil(storage)
ioUtil.deleteCloudObjects(uris).runWith(Sink.head).map { result =>
result should be(())
storage.list("bucket").getValues.asScala should be(empty)
}
}
it should "not fail when building a stream for zero deletes" in {
val stream = new IoUtil(createStorage)
.deleteCloudObjects(immutable.Iterable.empty)
stream.runWith(Sink.head).map(_ should be(()))
}
it should "include all failures in the exception message when parallel deletes fail" in {
val urisToFail =
Set("gs://path/to/the.object", "gs://path/to/the/other.object").map(URI.create)
val uriToSucceed = URI.create("gs://some/other/object")
val storage = createStorage
storage.create(uriToBlobInfo(uriToSucceed))
val stream = new IoUtil(storage).deleteCloudObjects(urisToFail + uriToSucceed)
recoverToExceptionIf[IOException](stream.runWith(Sink.ignore)).map { ex =>
val sw = new StringWriter
ex.printStackTrace(new PrintWriter(sw))
val errorText = sw.toString
urisToFail.foreach { uri =>
errorText should include(uri.toString)
}
storage.list("bucket").getValues.asScala should be(empty)
}
}
it should "read google object data" in {
val location = URI.create("gs://bucket/path/data")
val contents = "my data"
val storage = createStorage
storage.create(uriToBlobInfo(location), contents.getBytes)
new IoUtil(storage).readFile(location) should be(contents)
}
it should "write google object data" in {
val location = URI.create("gs://bucket/path/data")
val contents = "my data"
val storage = createStorage
new IoUtil(storage).writeGoogleObjectData(contents, location)
storage.readAllBytes(IoUtil.toBlobId(location)) should be(contents.getBytes)
}
it should "write google object data when the file already exists" in {
val location = URI.create("gs://bucket/path/data")
val contents = "my data"
val storage = createStorage
storage.create(uriToBlobInfo(location), "original data".getBytes)
new IoUtil(storage).writeGoogleObjectData(contents, location)
storage.readAllBytes(IoUtil.toBlobId(location)) should be(contents.getBytes)
}
it should "detect if a google object exists or not" in {
val location = URI.create("gs://bucket/path/data")
val storage = createStorage
val ioutil = new IoUtil(storage)
ioutil.googleObjectExists(location) should be(false)
storage.create(uriToBlobInfo(location), "data".getBytes)
ioutil.googleObjectExists(location) should be(true)
}
it should "copy a google object" in {
val source = URI.create("gs://bucket/path/data")
val destination = URI.create("gs://bucket/path/newdata")
val contents = "my data"
val storage = createStorage
storage.create(uriToBlobInfo(source), contents.getBytes)
new IoUtil(storage).copyGoogleObject(source, destination)
storage.readAllBytes(IoUtil.toBlobId(destination)) should be(contents.getBytes)
}
it should "list all children of objects" in {
val source = URI.create("gs://bucket/path/data")
val source2 = URI.create("gs://bucket/path/data2")
val expected = Seq(source, source2)
val contents = "my data"
val storage = createStorage
storage.create(uriToBlobInfo(source), contents.getBytes)
storage.create(uriToBlobInfo(source2), contents.getBytes)
val ioUtil = new IoUtil(storage)
ioUtil.listGoogleObjects(URI.create("gs://bucket/path/")) should contain theSameElementsAs expected
}
it should "parse a metadata json" in {
val metadata = UbamMetadata(ubamSize = Some(10l), ubamMd5 = Some(Symbol("md5")))
val metadataFile =
File.newTemporaryFile().deleteOnExit().write(metadata.asJson.printWith(implicitly))
new IoUtil(createStorage)
.readMetadata(UbamIndex)(metadataFile.uri)
.map { readMetadata =>
readMetadata should be(metadata)
}
.runWith(Sink.head)
}
it should "fail to decode incorrect metadata types" in {
val metadata = UbamMetadata(ubamSize = Some(10l), ubamMd5 = Some(Symbol("md5")))
val metadataFile =
File.newTemporaryFile().deleteOnExit().write(metadata.asJson.printWith(implicitly))
recoverToSucceededIf[IllegalArgumentException] {
new IoUtil(createStorage)
.readMetadata(CramIndex)(metadataFile.uri)
.runWith(Sink.head)
}
}
it should "fail to parse invalid json" in {
val metadataFile = File.newTemporaryFile().deleteOnExit().write("{not valid JSON'")
recoverToSucceededIf[IllegalArgumentException] {
new IoUtil(createStorage)
.readMetadata(CramIndex)(metadataFile.uri)
.runWith(Sink.ignore)
}
}
}
| broadinstitute/clio | clio-client/src/test/scala/org/broadinstitute/clio/client/util/IoUtilSpec.scala | Scala | bsd-3-clause | 6,524 |
import sbt._
import Keys._
object build extends Build {
lazy val sharedSettings = Defaults.defaultSettings ++ Seq(
scalaVersion := "2.11.6",
crossVersion := CrossVersion.binary,
version := "0.1.0-SNAPSHOT",
organization := "org.scalameta",
description := "Metaprogramming and hosting APIs of scala.meta",
resolvers += Resolver.sonatypeRepo("snapshots"),
resolvers += Resolver.sonatypeRepo("releases"),
publishMavenStyle := true,
publishArtifact in Compile := false,
publishArtifact in Test := false,
scalacOptions ++= Seq("-deprecation", "-feature", "-optimise", "-unchecked"),
scalacOptions in (Compile, doc) ++= Seq("-skip-packages", "scala.meta.internal.ast:scala.meta.internal.semantic:scala.meta.internal.tql"),
scalacOptions in (Compile, doc) ++= Seq("-implicits", "-implicits-hide:.,scala.meta.syntactic.Api.XtensionInputLike,scala.meta.ui.Api.XtensionShow"),
scalacOptions in (Compile, doc) ++= Seq("-groups"),
parallelExecution in Test := false, // hello, reflection sync!!
logBuffered := false,
scalaHome := {
val scalaHome = System.getProperty("core.scala.home")
if (scalaHome != null) {
println(s"Going for custom scala home at $scalaHome")
Some(file(scalaHome))
} else None
},
publishMavenStyle := true,
publishOnlyWhenOnMaster := publishOnlyWhenOnMasterImpl.value,
publishTo <<= version { v: String =>
val nexus = "https://oss.sonatype.org/"
if (v.trim.endsWith("SNAPSHOT"))
Some("snapshots" at nexus + "content/repositories/snapshots")
else
Some("releases" at nexus + "service/local/staging/deploy/maven2")
},
pomIncludeRepository := { x => false },
pomExtra := (
<url>https://github.com/scalameta/scalameta</url>
<inceptionYear>2014</inceptionYear>
<licenses>
<license>
<name>BSD-like</name>
<url>http://www.scala-lang.org/downloads/license.html</url>
<distribution>repo</distribution>
</license>
</licenses>
<scm>
<url>git://github.com/scalameta/scalameta.git</url>
<connection>scm:git:git://github.com/scalameta/scalameta.git</connection>
</scm>
<issueManagement>
<system>GitHub</system>
<url>https://github.com/scalameta/scalameta/issues</url>
</issueManagement>
<developers>
<developer>
<id>xeno-by</id>
<name>Eugene Burmako</name>
<url>http://xeno.by</url>
</developer>
<developer>
<id>densh</id>
<name>Denys Shabalin</name>
<url>http://den.sh</url>
</developer>
</developers>
),
addCompilerPlugin("org.scalamacros" % "paradise" % "2.1.0-M5" cross CrossVersion.full)
)
// http://stackoverflow.com/questions/20665007/how-to-publish-only-when-on-master-branch-under-travis-and-sbt-0-13
val publishOnlyWhenOnMaster = taskKey[Unit]("publish task for Travis (don't publish when building pull requests, only publish when the build is triggered by merge into master)")
def publishOnlyWhenOnMasterImpl = Def.taskDyn {
import scala.util.Try
val travis = Try(sys.env("TRAVIS")).getOrElse("false") == "true"
val pr = Try(sys.env("TRAVIS_PULL_REQUEST")).getOrElse("false") != "false"
val branch = Try(sys.env("TRAVIS_BRANCH")).getOrElse("??")
val snapshot = version.value.trim.endsWith("SNAPSHOT")
(travis, pr, branch, snapshot) match {
case (true, false, "master", true) => publish
case _ => Def.task ()
}
}
lazy val publishableSettings = sharedSettings ++ Seq(
publishArtifact in Compile := true,
publishArtifact in Test := false,
credentials ++= {
val mavenSettingsFile = System.getProperty("maven.settings.file")
if (mavenSettingsFile != null) {
println("Loading Sonatype credentials from " + mavenSettingsFile)
try {
import scala.xml._
val settings = XML.loadFile(mavenSettingsFile)
def readServerConfig(key: String) = (settings \\ "settings" \\ "servers" \\ "server" \\ key).head.text
Some(Credentials(
"Sonatype Nexus Repository Manager",
"oss.sonatype.org",
readServerConfig("username"),
readServerConfig("password")
))
} catch {
case ex: Exception =>
println("Failed to load Maven settings from " + mavenSettingsFile + ": " + ex)
None
}
} else {
for {
realm <- sys.env.get("SCALAMETA_MAVEN_REALM")
domain <- sys.env.get("SCALAMETA_MAVEN_DOMAIN")
user <- sys.env.get("SCALAMETA_MAVEN_USER")
password <- sys.env.get("SCALAMETA_MAVEN_PASSWORD")
} yield {
println("Loading Sonatype credentials from environment variables")
Credentials(realm, domain, user, password)
}
}
}.toList
)
lazy val root = Project(
id = "root",
base = file("root")
) settings (
sharedSettings : _*
) settings (
test in Test := (test in tests in Test).value,
packagedArtifacts := Map.empty
) aggregate (scalameta, tokens, foundation, tests)
lazy val foundation = Project(
id = "foundation",
base = file("foundation")
) settings (
publishableSettings: _*
) settings (
libraryDependencies <+= (scalaVersion)("org.scala-lang" % "scala-reflect" % _ % "provided")
)
lazy val tokens = Project(
id = "tokens",
base = file("tokens")
) settings (
publishableSettings: _*
) settings (
libraryDependencies <+= (scalaVersion)("org.scala-lang" % "scala-reflect" % _ % "provided"),
libraryDependencies <+= (scalaVersion)("org.scala-lang" % "scala-compiler" % _ % "provided")
) dependsOn (foundation)
lazy val scalameta = Project(
id = "scalameta",
base = file("scalameta")
) settings (
publishableSettings: _*
) settings (
libraryDependencies <+= (scalaVersion)("org.scala-lang" % "scala-reflect" % _ % "provided"),
libraryDependencies <+= (scalaVersion)("org.scala-lang" % "scala-compiler" % _ % "provided")
) dependsOn (foundation, tokens)
lazy val sandbox = Project(
id = "sandbox",
base = file("sandbox")
) settings (
sharedSettings: _*
) settings (
scalaSource in Compile <<= (baseDirectory in Compile)(base => base)
) dependsOn (scalameta)
lazy val tests = Project(
id = "tests",
base = file("tests")
) settings (
sharedSettings: _*
) settings (
libraryDependencies <+= (scalaVersion)("org.scala-lang" % "scala-reflect" % _),
libraryDependencies += "org.scalatest" %% "scalatest" % "2.1.3" % "test",
libraryDependencies += "org.scalacheck" %% "scalacheck" % "1.11.3" % "test",
packagedArtifacts := Map.empty,
sourceDirectory in Test := {
val defaultValue = (sourceDirectory in Test).value
System.setProperty("sbt.paths.tests.source", defaultValue.getAbsolutePath)
defaultValue
}
) dependsOn (scalameta)
}
| mdemarne/scalameta | project/build.scala | Scala | bsd-3-clause | 7,081 |
package com.github.jarlakxen.scalatra.rest.queryable
import org.scalatra.ScalatraBase
import scala.reflect.runtime.universe._
import java.util.concurrent.ConcurrentHashMap
import scala.reflect.runtime.universe
trait QueryableSupport {
self : ScalatraBase =>
case class CaseClassField( name : String, `type` : RuntimeClass )
private val mirror = runtimeMirror( getClass.getClassLoader )
private val cache = new ConcurrentHashMap[TypeTag[_], Seq[CaseClassField]]
def paramsOf[T]( ignoreNotQueryable : Boolean )( implicit ttag : TypeTag[T] ) : Map[String, Any] = paramsOf[T]( ttag, ignoreNotQueryable )
def paramsOf[T]( implicit ttag : TypeTag[T], ignoreNotQueryable : Boolean = false ) : Map[String, Any] = {
if ( !cache.contains( ttag ) ) {
cache.put( ttag, extract( ignoreNotQueryable, ttag ) )
}
val fields = cache.get( ttag )
params.filter( param => fields.exists( _.name == param._1 ) )
.map( param => fields.find( _.name == param._1 ).get.`type` match {
case tp if tp == classOf[Boolean] => ( param._1, param._2.toBoolean )
case tp if tp == classOf[Int] => ( param._1, param._2.toInt )
case tp if tp == classOf[Long] => ( param._1, param._2.toLong )
case tp if tp == classOf[Float] => ( param._1, param._2.toFloat )
case tp if tp == classOf[Double] => ( param._1, param._2.toDouble )
case _ => param
} )
}
private def extract[T]( ignoreNotQueryable : Boolean, ttag : TypeTag[T] ) : Seq[CaseClassField] = QueryableSupport.synchronized {
val cto = ttag.tpe.member( nme.CONSTRUCTOR ).asMethod
cto.paramss.head.collect{
case p : TermSymbol if ignoreNotQueryable || !p.annotations.exists( _.tpe =:= typeOf[NotQueryable] ) => p
}.map( param => CaseClassField( param.name.toString, mirror.runtimeClass( param.typeSignature.typeSymbol.asClass ) ) ).toSeq
}
}
object QueryableSupport {
} | Jarlakxen/scalatra-rest | src/main/scala/com/github/jarlakxen/scalatra/rest/queryable/QueryableSupport.scala | Scala | gpl-2.0 | 1,913 |
/*
* Copyright (C) 2017
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package code
package model
import net.liftweb.common._
import net.liftweb.mapper._
import net.liftweb.util._
import net.liftweb.http._
class Sponsor extends LongKeyedMapper[Sponsor] with IdPK {
def getSingleton = Sponsor
object district extends MappedInt(this)
object name extends MappedString(this, 255)
object state extends MappedString(this, 4)
object thomas_id extends MappedString(this, 15) {
override def dbIndexed_? = true
}
object title extends MappedString(this, 30)
object sponsor_type extends MappedString(this, 25)
object last_name extends MappedString(this, 255)
object first_name extends MappedString(this, 255)
object birthday extends MappedDate(this)
object gender extends MappedGender(this)
object `type` extends MappedString(this, 30)
object party extends MappedString(this, 100)
object url extends MappedString(this, 255)
object address extends MappedString(this, 255)
object phone extends MappedString(this, 25)
object contact_form extends MappedString(this, 255)
object rss_url extends MappedString(this, 255)
object twitter extends MappedString(this, 100)
object facebook extends MappedString(this, 255)
object facebook_id extends MappedInt(this)
object youtube extends MappedString(this, 100)
object youtube_id extends MappedString(this, 255)
object bioguide_id extends MappedString(this, 100)
object opensecrets_id extends MappedString(this, 100)
object lis_id extends MappedString(this, 100)
object cspan_id extends MappedInt(this)
object govtrack_id extends MappedInt(this)
object votesmart_id extends MappedInt(this)
object ballotpedia_id extends MappedString(this, 100)
object washington_post_id extends MappedString(this, 100)
object icpsr_id extends MappedInt(this)
object wikipedia_id extends MappedString(this, 100)
object term_start extends MappedDate(this)
object term_end extends MappedDate(this)
object religion extends MappedString(this, 100)
def countProposedBills = BillSponsor.count(By(BillSponsor.sponsor, this.id.get),
By(BillSponsor.congress, Props.get("settings.congress").getOrElse("115").toInt),
By(BillSponsor.sponsorship, "sponsor"))
/*
*
NotBy(BillSponsor.bill_type, "hconres"),
NotBy(BillSponsor.bill_type, "hjres"),
NotBy(BillSponsor.bill_type, "hres"),
NotBy(BillSponsor.bill_type, "sconres"),
NotBy(BillSponsor.bill_type, "sres"),
NotBy(BillSponsor.bill_type, "sjres"),
*/
}
object Sponsor extends Sponsor with LongKeyedMetaMapper[Sponsor] {
override def dbTableName = "sponsor"
} | EasterTheBunny/ourdistrict | src/main/scala/code/model/Sponsor.scala | Scala | gpl-3.0 | 3,337 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.samza.storage.kv.inmemory
import java.io.File
import org.apache.samza.container.SamzaContainerContext
import org.apache.samza.metrics.MetricsRegistry
import org.apache.samza.storage.kv.{KeyValueStoreMetrics, BaseKeyValueStorageEngineFactory, KeyValueStore}
import org.apache.samza.system.SystemStreamPartition
class InMemoryKeyValueStorageEngineFactory[K, V] extends BaseKeyValueStorageEngineFactory[K, V] {
override def getKVStore(storeName: String,
storeDir: File,
registry: MetricsRegistry,
changeLogSystemStreamPartition: SystemStreamPartition,
containerContext: SamzaContainerContext): KeyValueStore[Array[Byte], Array[Byte]] = {
val metrics = new KeyValueStoreMetrics(storeName, registry)
val inMemoryDb = new InMemoryKeyValueStore (metrics)
inMemoryDb
}
}
| vjagadish/samza-clone | samza-kv-inmemory/src/main/scala/org/apache/samza/storage/kv/inmemory/InMemoryKeyValueStorageEngineFactory.scala | Scala | apache-2.0 | 1,716 |
package todomvc
import japgolly.scalajs.react.ScalazReact._
import japgolly.scalajs.react._
import japgolly.scalajs.react.vdom.prefix_<^._
import org.scalajs.dom.ext.KeyCode
import scalaz.effect.IO
import scalaz.syntax.semigroup._
import scalaz.syntax.std.option._
import scalaz.std.anyVal.unitInstance
object CTodoItem {
case class Props private[CTodoItem] (
onToggle: IO[Unit],
onDelete: IO[Unit],
onStartEditing: IO[Unit],
onUpdateTitle: Title => IO[Unit],
onCancelEditing: IO[Unit],
todo: Todo,
isEditing: Boolean
)
case class State(editText: UnfinishedTitle)
case class Backend($: BackendScope[Props, State]) {
def editFieldSubmit: IO[Unit] =
$.state.editText.validated.fold($.props.onDelete)($.props.onUpdateTitle)
/**
* It's OK to make these into `val`s as long as they don't touch state.
*/
val resetText: IO[Unit] =
$.modStateIO(_.copy(editText = $.props.todo.title.editable))
val editFieldKeyDown: ReactKeyboardEvent => Option[IO[Unit]] =
e => e.nativeEvent.keyCode match {
case KeyCode.Escape => (resetText |+| $.props.onCancelEditing).some
case KeyCode.Enter => editFieldSubmit.some
case _ => None
}
val editFieldChanged: ReactEventI => IO[Unit] =
e => $.modStateIO(_.copy(editText = UnfinishedTitle(e.target.value)))
def render: ReactElement = {
<.li(
^.classSet(
"completed" -> $.props.todo.isCompleted,
"editing" -> $.props.isEditing
),
<.div(
^.className := "view",
<.input(
^.className := "toggle",
^.`type` := "checkbox",
^.checked := $.props.todo.isCompleted,
^.onChange ~~> $.props.onToggle
),
<.label($.props.todo.title.value, ^.onDoubleClick ~~> $.props.onStartEditing),
<.button(^.className := "destroy", ^.onClick ~~> $.props.onDelete)
),
<.input(
^.className := "edit",
^.onBlur ~~> editFieldSubmit,
^.onChange ~~> editFieldChanged,
^.onKeyDown ~~>? editFieldKeyDown,
^.value := $.state.editText.value
)
)
}
}
private val component = ReactComponentB[Props]("CTodoItem")
.initialStateP(p => State(p.todo.title.editable))
.backend(Backend)
.render(_.backend.render)
.build
def apply(onToggle: IO[Unit],
onDelete: IO[Unit],
onStartEditing: IO[Unit],
onUpdateTitle: Title => IO[Unit],
onCancelEditing: IO[Unit],
todo: Todo,
isEditing: Boolean) =
component.withKey(todo.id.id.toString)(
Props(
onToggle = onToggle,
onDelete = onDelete,
onStartEditing = onStartEditing,
onUpdateTitle = onUpdateTitle,
onCancelEditing = onCancelEditing,
todo = todo,
isEditing = isEditing
)
)
}
| dchambers/todomvc | examples/scalajs-react/src/main/scala/todomvc/CTodoItem.scala | Scala | mit | 3,072 |
package com.bayesianwitch.injera.counting
import scala.collection.mutable.HashMap
object SimpleCounter extends IterableAddableCounterFactory[SimpleCounter] {
def apply[T]() = new SimpleCounter[T]
def newZero[T](old: SimpleCounter[T]) = new SimpleCounter[T]
}
class SimpleCounter[T] extends ZeroableCounter[T] with IterableCounter[T] with AddableCounter[T] {
//NOT THREAD SAFE
private var counts = new HashMap[T,Long]()
def keys = counts.keys.iterator
def inc(t: T): Long = add(t,1L)
def add(t: T, n: Long): Long = {
val newCount = n + get(t)
counts.put(t, newCount)
newCount
}
def get(t: T): Long = counts.get(t).getOrElse(0L)
def zero: Map[T,Long] = {
val result = counts.toMap
counts = new HashMap[T,Long]()
result
}
}
| bayesianwitch/injera | src/main/scala/injera/counting/SimpleCounter.scala | Scala | gpl-3.0 | 775 |
package helpers.disposal_of_vehicle
import controllers.MicroServiceError.MicroServiceErrorRefererCacheKey
import org.joda.time.{LocalDate, DateTime}
import org.openqa.selenium.{Cookie, WebDriver}
import models.BusinessChooseYourAddressFormModel
import models.BusinessChooseYourAddressFormModel.BusinessChooseYourAddressCacheKey
import models.EnterAddressManuallyFormModel
import models.DisposeCacheKeyPrefix.CookiePrefix
import models.DisposeFormModel
import models.DisposeFormModel.DisposeFormRegistrationNumberCacheKey
import models.DisposeFormModel.DisposeFormTimestampIdCacheKey
import models.DisposeFormModel.DisposeFormTransactionIdCacheKey
import models.DisposeFormModel.DisposeOccurredCacheKey
import models.DisposeFormModel.PreventGoingToDisposePageCacheKey
import models.DisposeFormModel.DisposeFormModelCacheKey
import models.PrivateDisposeFormModel
import models.EnterAddressManuallyFormModel.EnterAddressManuallyCacheKey
import models.VehicleLookupFormModel
import models.VehicleLookupFormModel.VehicleLookupFormModelCacheKey
import play.api.Play
import play.api.Play.current
import play.api.libs.json.{Json, Writes}
import uk.gov.dvla.vehicles.presentation.common
import common.model.AddressModel
import common.model.BruteForcePreventionModel
import common.model.BruteForcePreventionModel.bruteForcePreventionViewModelCacheKey
import common.model.MicroserviceResponseModel
import common.model.MicroserviceResponseModel.MsResponseCacheKey
import common.model.SetupTradeDetailsFormModel
import common.model.SetupTradeDetailsFormModel.setupTradeDetailsCacheKey
import common.model.TraderDetailsModel
import common.model.TraderDetailsModel.traderDetailsCacheKey
import common.model.VehicleAndKeeperDetailsModel
import common.model.VehicleAndKeeperDetailsModel.vehicleAndKeeperLookupDetailsCacheKey
import common.clientsidesession.TrackingId
import common.controllers.AlternateLanguages.{CyId, EnId}
import common.views.models.{AddressAndPostcodeViewModel, AddressLinesViewModel}
import common.webserviceclients.common.MicroserviceResponse
import webserviceclients.fakes.FakeAddressLookupService.BuildingNameOrNumberValid
import webserviceclients.fakes.FakeAddressLookupService.Line2Valid
import webserviceclients.fakes.FakeAddressLookupService.Line3Valid
import webserviceclients.fakes.FakeAddressLookupService.PostTownValid
import webserviceclients.fakes.FakeAddressLookupService.PostcodeValid
import webserviceclients.fakes.FakeAddressLookupService.TraderBusinessNameValid
import webserviceclients.fakes.FakeAddressLookupService.addressWithoutUprn
import webserviceclients.fakes.FakeDateServiceImpl.DateOfDisposalDayValid
import webserviceclients.fakes.FakeDateServiceImpl.DateOfDisposalMonthValid
import webserviceclients.fakes.FakeDateServiceImpl.DateOfDisposalYearValid
import webserviceclients.fakes.FakeDisposeWebServiceImpl.TransactionIdValid
import webserviceclients.fakes.FakeVehicleAndKeeperLookupWebService.ReferenceNumberValid
import webserviceclients.fakes.FakeVehicleAndKeeperLookupWebService.RegistrationNumberValid
import webserviceclients.fakes.FakeVehicleAndKeeperLookupWebService.VehicleMakeValid
import webserviceclients.fakes.FakeVehicleAndKeeperLookupWebService.VehicleModelValid
import webserviceclients.fakes.brute_force_protection.FakeBruteForcePreventionWebServiceImpl.MaxAttempts
import webserviceclients.fakes.FakeDisposeWebServiceImpl
import webserviceclients.fakes.FakeAddressLookupWebServiceImpl.selectedAddress
object CookieFactoryForUISpecs {
private def addCookie[A](key: String, value: A)(implicit tjs: Writes[A], webDriver: WebDriver): Unit = {
val valueAsString = Json.toJson(value).toString()
val manage = webDriver.manage()
val cookie = new Cookie(key, valueAsString)
manage.addCookie(cookie)
}
def withLanguageCy()(implicit webDriver: WebDriver) = {
val key = Play.langCookieName
val value = CyId
addCookie(key, value)
this
}
def withLanguageEn()(implicit webDriver: WebDriver) = {
val key = Play.langCookieName
val value = EnId
addCookie(key, value)
this
}
def withIdentifier(id: String)(implicit webDriver: WebDriver) = {
addCookie(models.IdentifierCacheKey, id)
this
}
def setupTradeDetails(traderPostcode: String = PostcodeValid)(implicit webDriver: WebDriver) = {
val key = setupTradeDetailsCacheKey
val value = SetupTradeDetailsFormModel(traderBusinessName = TraderBusinessNameValid,
traderPostcode = traderPostcode, traderEmail = None)
addCookie(key, value)
this
}
def businessChooseYourAddress(addressSelected: String = selectedAddress)(implicit webDriver: WebDriver) = {
val key = BusinessChooseYourAddressCacheKey
val value = BusinessChooseYourAddressFormModel(addressSelected)
addCookie(key, value)
this
}
def enterAddressManually()(implicit webDriver: WebDriver) = {
val key = EnterAddressManuallyCacheKey
val value = EnterAddressManuallyFormModel(addressAndPostcodeModel = AddressAndPostcodeViewModel(
addressLinesModel = AddressLinesViewModel(
buildingNameOrNumber = BuildingNameOrNumberValid,
line2 = Some(Line2Valid),
line3 = Some(Line3Valid),
postTown = PostTownValid
),
postCode = PostcodeValid)
)
addCookie(key, value)
this
}
def dealerDetails(address: AddressModel = addressWithoutUprn)(implicit webDriver: WebDriver) = {
val key = traderDetailsCacheKey
val value = TraderDetailsModel(
traderName = TraderBusinessNameValid,
traderAddress = address,
traderEmail = None
)
addCookie(key, value)
this
}
def bruteForcePreventionViewModel(permitted: Boolean = true,
attempts: Int = 0,
maxAttempts: Int = MaxAttempts,
dateTimeISOChronology: String = org.joda.time.DateTime.now().toString)
(implicit webDriver: WebDriver) = {
val key = bruteForcePreventionViewModelCacheKey
val value = BruteForcePreventionModel(
permitted,
attempts,
maxAttempts,
dateTimeISOChronology
)
addCookie(key, value)
this
}
def vehicleAndKeeperDetailsModel(registrationNumber: String = RegistrationNumberValid,
vehicleMake: Option[String] = Some(VehicleMakeValid),
vehicleModel: Option[String] = Some(VehicleModelValid),
title: Option[String] = None,
firstName: Option[String] = None,
lastName: Option[String] = None,
address: Option[AddressModel] = None,
disposeFlag: Boolean = false,
suppressedV5CFlag: Boolean = false)(implicit webDriver: WebDriver) = {
val key = vehicleAndKeeperLookupDetailsCacheKey
val value = VehicleAndKeeperDetailsModel(
registrationNumber = registrationNumber,
make = vehicleMake,
model = vehicleModel,
title = title,
firstName = firstName,
lastName = lastName,
address = address,
disposeFlag = Some(disposeFlag),
keeperEndDate = if (disposeFlag) Some(new DateTime()) else None,
keeperChangeDate = None,
suppressedV5Flag = Some(suppressedV5CFlag)
)
addCookie(key, value)
this
}
def vehicleLookupFormModel(referenceNumber: String = ReferenceNumberValid,
registrationNumber: String = RegistrationNumberValid)
(implicit webDriver: WebDriver) = {
val key = VehicleLookupFormModelCacheKey
val value = VehicleLookupFormModel(referenceNumber = referenceNumber,
registrationNumber = registrationNumber)
addCookie(key, value)
this
}
def vehicleLookupResponse(responseMessage: String = "disposal_vehiclelookupfailure")
(implicit webDriver: WebDriver) = {
val key = MsResponseCacheKey
val value = MicroserviceResponseModel(MicroserviceResponse("", responseMessage))
addCookie(key, value)
this
}
def disposeFormModel()(implicit webDriver: WebDriver) = {
val key = DisposeFormModelCacheKey
val value = DisposeFormModel(mileage = None,
dateOfDisposal = new LocalDate(),
consent = FakeDisposeWebServiceImpl.ConsentValid,
lossOfRegistrationConsent = FakeDisposeWebServiceImpl.ConsentValid,
email = None)
addCookie(key, value)
this
}
def privateDisposeFormModel()(implicit webDriver: WebDriver) = {
val key = models.PrivateDisposeFormModel.PrivateDisposeFormModelCacheKey
val value = PrivateDisposeFormModel(mileage = None,
dateOfDisposal = new LocalDate(),
email = None,
consent = FakeDisposeWebServiceImpl.ConsentValid,
lossOfRegistrationConsent = FakeDisposeWebServiceImpl.ConsentValid)
addCookie(key, value)
this
}
def disposeTransactionId(transactionId: TrackingId = TransactionIdValid)(implicit webDriver: WebDriver) = {
val key = DisposeFormTransactionIdCacheKey
val value = transactionId.value
addCookie(key, value)
this
}
def disposeFormTimestamp()(implicit webDriver: WebDriver) = {
val key = DisposeFormTimestampIdCacheKey
val value = new DateTime(DateOfDisposalYearValid.toInt,
DateOfDisposalMonthValid.toInt,
DateOfDisposalDayValid.toInt,
0,
0
).toString
addCookie(key, value)
this
}
def vehicleRegistrationNumber()(implicit webDriver: WebDriver) = {
val key = DisposeFormRegistrationNumberCacheKey
val value = RegistrationNumberValid
addCookie(key, value)
this
}
def preventGoingToDisposePage(url: String)(implicit webDriver: WebDriver) = {
val key = PreventGoingToDisposePageCacheKey
val value = url
addCookie(key, value)
this
}
def disposeOccurred(implicit webDriver: WebDriver) = {
val key = DisposeOccurredCacheKey
addCookie(key, "")
this
}
def microServiceError(origin: String)(implicit webDriver: WebDriver) = {
val key = MicroServiceErrorRefererCacheKey
val value = origin
addCookie(key, value)
this
}
}
| dvla/vehicles-online | test/helpers/disposal_of_vehicle/CookieFactoryForUISpecs.scala | Scala | mit | 10,213 |
class Rational(n: Int, d: Int) extends Ordered[Rational] {
require(d != 0)
private val g = gcd(n.abs, d.abs)
val numer = n / g
val denom = d / g
def this(n: Int) = this(n, 1)
override def toString = numer + "/" + denom
def +(that: Rational): Rational = {
val newNumer = (numer * that.denom) + (that.numer * denom)
val newDenom = denom * that.denom
new Rational(newNumer, newDenom)
}
def +(i: Int): Rational = this + new Rational(i)
def -(that: Rational): Rational = {
val newNumer = (numer * that.denom) - (that.numer * denom)
val newDenom = denom * that.denom
new Rational(newNumer, newDenom)
}
def -(i: Int): Rational = this - new Rational(i)
def *(that: Rational): Rational = new Rational(numer * that.numer, denom * that.denom)
def *(i: Int): Rational = this * new Rational(i)
def /(that: Rational): Rational = new Rational(numer * that.denom, denom * that.numer)
def /(i: Int): Rational = this / new Rational(i)
def max(that: Rational): Rational = if (this < that) that else this
private def gcd(a: Int, b: Int): Int = if (b == 0) a else gcd(b, a % b)
// Denne kommer fra trait Ordered. Når denne funksjoner er definert vil Ordered tilby <, >, <=, >=
override def compare(that: Rational): Int =
this.numer * that.denom - that.numer * this.denom
} | ahusby/programmingInScala | 06chapter/Rational.scala | Scala | gpl-3.0 | 1,334 |
package com.github.gdefacci.di.macrodef
import scala.reflect.macros.blackbox
trait ModuleDagNodeOrRefMixin[C <: blackbox.Context] { self: DagNodes[C] with DagNodeOrRefFactory[C] with Unifier[C] with ProvidersMixin[C] =>
import context.universe._
private class ExprAlias(module: context.Tree, val typ: Type, val parent: Option[Dag[DagNodeOrRef]]) {
def this(module: context.Tree, parent: Option[Dag[DagNodeOrRef]]) = this(module, module.tpe, parent)
val termName = TermName(context.freshName(typ.typeSymbol.name.decodedName.toString))
val dag = alias(termName, module, typ, ApplicationScope, parent)
}
def emptyProviders = ProvidersMap.empty[DagNodeOrRef, DagNodeDagFactory, Ref, Type, Decorator, PolyDecorator]
class ModuleDagNodeOrRef(membersSelect: MembersSelect[context.type]) {
def apply(module: context.Expr[_]): Providers[DagNodeOrRef] = {
createDagNodeOrRefProviders(new ExprAlias(module.tree, None))
}
private def createDagNodeOrRefProviders(moduleOrModuleContainerAlias: ExprAlias): Providers[DagNodeOrRef] = {
val exprTyp = moduleOrModuleContainerAlias.typ
if (membersSelect.isModuleContainerInstance(exprTyp)) {
moduleContainerDagNodeOrRefProviders(moduleOrModuleContainerAlias)
} else {
moduleDagNodeOrRefProviders(moduleOrModuleContainerAlias)
}
}
def skipIndex[T](i:Int, seq:Seq[T]):Seq[T] = {
seq.zipWithIndex flatMap {
case (x, idx) if (idx == i) => Nil
case (x, _) => List(x)
}
}
private def moduleDagNodeOrRefProviders(exprAlias: ExprAlias): Providers[DagNodeOrRef] = {
val exprNm = exprAlias.termName
val exprDag = exprAlias.dag
val acc = emptyProviders
membersSelect.getBindings(exprAlias.typ).foreach {
case membersSelect.MethodBinding(member) =>
val dg = methodDag(exprDag, exprNm, member)
acc.members += dg
case membersSelect.DecoratorBinding(member, selfIndex) =>
val inpDags = skipIndex(selfIndex, paramListsDags(member.paramLists))
val dec = Decorator(inpDags :+ exprDag, exprNm, member, selfIndex)
val scope = scopeProvider(member)
if (scope != DefaultScope) context.abort(member.pos, "decorators cant have scope annotation")
acc.decoratorsBuffer += (member.returnType -> dec)
case membersSelect.BindInstance(member, abstractType, concreteType) =>
val scope = scopeProvider(member)
val ref = Ref(scope, concreteType, member.pos)
acc.members += Dag(ref)
if (abstractType == concreteType) {
acc.topLevelRefsSet += ref
}
case membersSelect.ModuleContainerBinding(member, typ) =>
val memAlias = new ExprAlias(q"${exprAlias.termName}.${member.name.toTermName}", typ, Some(exprAlias.dag))
val prvdrs = moduleContainerDagNodeOrRefProviders(memAlias)
acc ++= prvdrs
case membersSelect.ObjectBinding(moduleSymbol) =>
val typ = moduleSymbol.asModule.moduleClass.asType.toType
val memAlias = new ExprAlias(q"${exprAlias.termName}.${moduleSymbol.name}", typ, Some(exprAlias.dag))
acc.members += memAlias.dag
case membersSelect.PolyMethodBinding(member, polyType) =>
val knds = scopeProvider(member)
acc.polyMembers += new PolyDagNodeFactory(knds, Some(exprNm -> exprDag), member, polyType)
case membersSelect.PolyDecoratorBinding(member, polyType, selfIndex) =>
val inpDags = skipIndex(selfIndex, paramListsDags(member.paramLists))
val dec = PolyDecorator(inpDags :+ exprDag, exprNm, new PolyDagNodeFactory(DefaultScope, Some(exprNm -> exprDag), member, polyType), selfIndex)
val scope = scopeProvider(member)
if (scope != DefaultScope) context.abort(member.pos, "decorators cant have scope annotation")
acc.polyDecoratorsBuffer += dec
}
acc.members += exprDag
acc
}
private def moduleContainerDagNodeOrRefProviders(moduleContainerAlias: ExprAlias): Providers[DagNodeOrRef] = {
val mappings = emptyProviders
membersSelect.getValues(moduleContainerAlias.typ).map { member =>
val typ = if (member.isModule) member.asModule.moduleClass.asType.toType
else if (member.isMethod) member.asMethod.returnType
else context.abort(member.pos, "unrecognized member " + member)
val memAlias = new ExprAlias(q"${moduleContainerAlias.termName}.${member.name.toTermName}", typ, Some(moduleContainerAlias.dag))
mappings ++= createDagNodeOrRefProviders(memAlias)
}
mappings
}
}
} | gdefacci/di | macros/src/main/scala/com/github/gdefacci/di/macrodef/ModuleDagNodeOrRefMixin.scala | Scala | mit | 4,703 |
/*
* Copyright © 2015-2019 the contributors (see Contributors.md).
*
* This file is part of Knora.
*
* Knora is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Knora is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public
* License along with Knora. If not, see <http://www.gnu.org/licenses/>.
*/
package org.knora.webapi.responders.v1
import java.util.UUID
import akka.http.scaladsl.util.FastFuture
import akka.pattern._
import org.knora.webapi._
import org.knora.webapi.messages.admin.responder.permissionsmessages.{PermissionDataGetADM, PermissionsDataADM}
import org.knora.webapi.messages.store.triplestoremessages._
import org.knora.webapi.messages.v1.responder.projectmessages.{ProjectInfoByIRIGetV1, ProjectInfoV1}
import org.knora.webapi.messages.v1.responder.usermessages.UserProfileTypeV1.UserProfileType
import org.knora.webapi.messages.v1.responder.usermessages._
import org.knora.webapi.responders.Responder.handleUnexpectedMessage
import org.knora.webapi.responders.{Responder, ResponderData}
import org.knora.webapi.util.CacheUtil
import scala.concurrent.Future
/**
* Provides information about Knora users to other responders.
*/
class UsersResponderV1(responderData: ResponderData) extends Responder(responderData) {
// The IRI used to lock user creation and update
val USERS_GLOBAL_LOCK_IRI = "http://rdfh.ch/users"
val USER_PROFILE_CACHE_NAME = "userProfileCache"
/**
* Receives a message of type [[UsersResponderRequestV1]], and returns an appropriate response message.
*/
def receive(msg: UsersResponderRequestV1) = msg match {
case UsersGetV1(userProfile) => usersGetV1(userProfile)
case UsersGetRequestV1(userProfileV1) => usersGetRequestV1(userProfileV1)
case UserDataByIriGetV1(userIri, short) => userDataByIriGetV1(userIri, short)
case UserProfileByIRIGetV1(userIri, profileType) => userProfileByIRIGetV1(userIri, profileType)
case UserProfileByIRIGetRequestV1(userIri, profileType, userProfile) => userProfileByIRIGetRequestV1(userIri, profileType, userProfile)
case UserProfileByEmailGetV1(email, profileType) => userProfileByEmailGetV1(email, profileType)
case UserProfileByEmailGetRequestV1(email, profileType, userProfile) => userProfileByEmailGetRequestV1(email, profileType, userProfile)
case UserProjectMembershipsGetRequestV1(userIri, userProfile, apiRequestID) => userProjectMembershipsGetRequestV1(userIri, userProfile, apiRequestID)
case UserProjectAdminMembershipsGetRequestV1(userIri, userProfile, apiRequestID) => userProjectAdminMembershipsGetRequestV1(userIri, userProfile, apiRequestID)
case UserGroupMembershipsGetRequestV1(userIri, userProfile, apiRequestID) => userGroupMembershipsGetRequestV1(userIri, userProfile, apiRequestID)
case other => handleUnexpectedMessage(other, log, this.getClass.getName)
}
/**
* Gets all the users and returns them as a sequence of [[UserDataV1]].
*
* @return all the users as a sequence of [[UserDataV1]].
*/
private def usersGetV1(userProfileV1: UserProfileV1): Future[Seq[UserDataV1]] = {
//log.debug("usersGetV1")
for {
_ <- Future(
if (!userProfileV1.permissionData.isSystemAdmin) {
throw ForbiddenException("SystemAdmin permissions are required.")
}
)
sparqlQueryString <- Future(queries.sparql.v1.txt.getUsers(
triplestore = settings.triplestoreType
).toString())
usersResponse <- (storeManager ? SparqlSelectRequest(sparqlQueryString)).mapTo[SparqlSelectResponse]
usersResponseRows: Seq[VariableResultsRow] = usersResponse.results.bindings
usersWithProperties: Map[String, Map[String, String]] = usersResponseRows.groupBy(_.rowMap("s")).map {
case (userIri: IRI, rows: Seq[VariableResultsRow]) => (userIri, rows.map(row => (row.rowMap("p"), row.rowMap("o"))).toMap)
}
users = usersWithProperties.map {
case (userIri: IRI, propsMap: Map[String, String]) =>
UserDataV1(
lang = propsMap.get(OntologyConstants.KnoraAdmin.PreferredLanguage) match {
case Some(langList) => langList
case None => settings.fallbackLanguage
},
user_id = Some(userIri),
email = propsMap.get(OntologyConstants.KnoraAdmin.Email),
firstname = propsMap.get(OntologyConstants.KnoraAdmin.GivenName),
lastname = propsMap.get(OntologyConstants.KnoraAdmin.FamilyName),
status = propsMap.get(OntologyConstants.KnoraAdmin.Status).map(_.toBoolean)
)
}.toSeq
} yield users
}
/**
* Gets all the users and returns them as a [[UsersGetResponseV1]].
*
* @param userProfileV1 the type of the requested profile (restricted of full).
* @return all the users as a [[UsersGetResponseV1]].
*/
private def usersGetRequestV1(userProfileV1: UserProfileV1): Future[UsersGetResponseV1] = {
for {
maybeUsersListToReturn <- usersGetV1(userProfileV1)
result = maybeUsersListToReturn match {
case users: Seq[UserDataV1] if users.nonEmpty => UsersGetResponseV1(users = users)
case _ => throw NotFoundException(s"No users found")
}
} yield result
}
/**
* Gets basic information about a Knora user, and returns it in a [[UserDataV1]].
*
* @param userIri the IRI of the user.
* @return a [[UserDataV1]] describing the user.
*/
private def userDataByIriGetV1(userIri: IRI, short: Boolean): Future[Option[UserDataV1]] = {
//log.debug("userDataByIriGetV1 - userIri: {}", userIri)
for {
sparqlQueryString <- Future(queries.sparql.v1.txt.getUserByIri(
triplestore = settings.triplestoreType,
userIri = userIri
).toString())
// _ = log.debug("userDataByIRIGetV1 - sparqlQueryString: {}", sparqlQueryString)
userDataQueryResponse <- (storeManager ? SparqlSelectRequest(sparqlQueryString)).mapTo[SparqlSelectResponse]
maybeUserDataV1 <- userDataQueryResponse2UserDataV1(userDataQueryResponse, short)
// _ = log.debug("userDataByIriGetV1 - maybeUserDataV1: {}", maybeUserDataV1)
} yield maybeUserDataV1
}
/**
* Gets information about a Knora user, and returns it in a [[UserProfileV1]]. If possible, tries to retrieve the
* user profile from cache. If not, it retrieves it from the triplestore and writes it to the cache.
*
* @param userIri the IRI of the user.
* @param profileType the type of the requested profile (restricted of full).
* @return a [[UserProfileV1]] describing the user.
*/
private def userProfileByIRIGetV1(userIri: IRI, profileType: UserProfileType): Future[Option[UserProfileV1]] = {
// log.debug(s"userProfileByIRIGetV1: userIri = $userIRI', clean = '$profileType'")
CacheUtil.get[UserProfileV1](USER_PROFILE_CACHE_NAME, userIri) match {
case Some(userProfile) =>
// found a user profile in the cache
log.debug(s"userProfileByIRIGetV1 - cache hit: $userProfile")
FastFuture.successful(Some(userProfile.ofType(profileType)))
case None =>
for {
sparqlQueryString <- Future(queries.sparql.v1.txt.getUserByIri(
triplestore = settings.triplestoreType,
userIri = userIri
).toString())
// _ = log.debug(s"userProfileByIRIGetV1 - sparqlQueryString: {}", sparqlQueryString)
userDataQueryResponse <- (storeManager ? SparqlSelectRequest(sparqlQueryString)).mapTo[SparqlSelectResponse]
maybeUserProfileV1 <- userDataQueryResponse2UserProfileV1(userDataQueryResponse)
_ = if (maybeUserProfileV1.nonEmpty) {
writeUserProfileV1ToCache(maybeUserProfileV1.get)
}
result = maybeUserProfileV1.map(_.ofType(profileType))
// _ = log.debug("userProfileByIRIGetV1 - maybeUserProfileV1: {}", MessageUtil.toSource(maybeUserProfileV1))
} yield result // UserProfileV1(userData, groups, projects_info, sessionId, isSystemUser, permissionData)
}
}
/**
* Gets information about a Knora user, and returns it as a [[UserProfileResponseV1]].
*
* @param userIRI the IRI of the user.
* @param profileType the type of the requested profile (restriced or full).
* @param userProfile the requesting user's profile.
* @return a [[UserProfileResponseV1]]
*/
private def userProfileByIRIGetRequestV1(userIRI: IRI, profileType: UserProfileType, userProfile: UserProfileV1): Future[UserProfileResponseV1] = {
for {
_ <- Future(
if (!userProfile.permissionData.isSystemAdmin && !userProfile.userData.user_id.contains(userIRI)) {
throw ForbiddenException("SystemAdmin permissions are required.")
}
)
maybeUserProfileToReturn <- userProfileByIRIGetV1(userIRI, profileType)
result = maybeUserProfileToReturn match {
case Some(up) => UserProfileResponseV1(up)
case None => throw NotFoundException(s"User '$userIRI' not found")
}
} yield result
}
/**
* Gets information about a Knora user, and returns it in a [[UserProfileV1]]. If possible, tries to retrieve the user profile
* from cache. If not, it retrieves it from the triplestore and writes it to the cache.
*
* @param email the email of the user.
* @param profileType the type of the requested profile (restricted or full).
* @return a [[UserProfileV1]] describing the user.
*/
private def userProfileByEmailGetV1(email: String, profileType: UserProfileType): Future[Option[UserProfileV1]] = {
// log.debug(s"userProfileByEmailGetV1: username = '{}', type = '{}'", email, profileType)
CacheUtil.get[UserProfileV1](USER_PROFILE_CACHE_NAME, email) match {
case Some(userProfile) =>
// found a user profile in the cache
log.debug(s"userProfileByIRIGetV1 - cache hit: $userProfile")
FastFuture.successful(Some(userProfile.ofType(profileType)))
case None =>
for {
sparqlQueryString <- Future(queries.sparql.v1.txt.getUserByEmail(
triplestore = settings.triplestoreType,
email = email
).toString())
//_ = log.debug(s"userProfileByEmailGetV1 - sparqlQueryString: $sparqlQueryString")
userDataQueryResponse <- (storeManager ? SparqlSelectRequest(sparqlQueryString)).mapTo[SparqlSelectResponse]
//_ = log.debug(MessageUtil.toSource(userDataQueryResponse))
maybeUserProfileV1 <- userDataQueryResponse2UserProfileV1(userDataQueryResponse)
_ = if (maybeUserProfileV1.nonEmpty) {
writeUserProfileV1ToCache(maybeUserProfileV1.get)
}
result = maybeUserProfileV1.map(_.ofType(profileType))
// _ = log.debug("userProfileByEmailGetV1 - maybeUserProfileV1: {}", MessageUtil.toSource(maybeUserProfileV1))
} yield result // UserProfileV1(userDataV1, groupIris, projectIris)
}
}
/**
* Gets information about a Knora user, and returns it as a [[UserProfileResponseV1]].
*
* @param email the email of the user.
* @param profileType the type of the requested profile (restricted or full).
* @param userProfile the requesting user's profile.
* @return a [[UserProfileResponseV1]]
* @throws NotFoundException if the user with the supplied email is not found.
*/
private def userProfileByEmailGetRequestV1(email: String, profileType: UserProfileType, userProfile: UserProfileV1): Future[UserProfileResponseV1] = {
for {
maybeUserProfileToReturn <- userProfileByEmailGetV1(email, profileType)
result = maybeUserProfileToReturn match {
case Some(up: UserProfileV1) => UserProfileResponseV1(up)
case None => throw NotFoundException(s"User '$email' not found")
}
} yield result
}
/**
* Returns the user's project memberships, where the result contains the IRIs of the projects the user is member of.
*
* @param userIri the user's IRI.
* @param userProfileV1 the user profile of the requesting user.
* @param apiRequestID the unique api request ID.
* @return a [[UserProjectMembershipsGetResponseV1]].
*/
def userProjectMembershipsGetRequestV1(userIri: IRI, userProfileV1: UserProfileV1, apiRequestID: UUID): Future[UserProjectMembershipsGetResponseV1] = {
for {
sparqlQueryString <- Future(queries.sparql.v1.txt.getUserByIri(
triplestore = settings.triplestoreType,
userIri = userIri
).toString())
//_ = log.debug("userDataByIRIGetV1 - sparqlQueryString: {}", sparqlQueryString)
userDataQueryResponse <- (storeManager ? SparqlSelectRequest(sparqlQueryString)).mapTo[SparqlSelectResponse]
groupedUserData: Map[String, Seq[String]] = userDataQueryResponse.results.bindings.groupBy(_.rowMap("p")).map {
case (predicate, rows) => predicate -> rows.map(_.rowMap("o"))
}
/* the projects the user is member of */
projectIris: Seq[IRI] = groupedUserData.get(OntologyConstants.KnoraAdmin.IsInProject) match {
case Some(projects) => projects
case None => Seq.empty[IRI]
}
// _ = log.debug("userProjectMembershipsGetRequestV1 - userIri: {}, projectIris: {}", userIri, projectIris)
} yield UserProjectMembershipsGetResponseV1(projects = projectIris)
}
/**
* Returns the user's project admin group memberships, where the result contains the IRIs of the projects the user
* is a member of the project admin group.
*
* @param userIri the user's IRI.
* @param userProfileV1 the user profile of the requesting user.
* @param apiRequestID the unique api request ID.
* @return a [[UserProjectMembershipsGetResponseV1]].
*/
def userProjectAdminMembershipsGetRequestV1(userIri: IRI, userProfileV1: UserProfileV1, apiRequestID: UUID): Future[UserProjectAdminMembershipsGetResponseV1] = {
for {
sparqlQueryString <- Future(queries.sparql.v1.txt.getUserByIri(
triplestore = settings.triplestoreType,
userIri = userIri
).toString())
//_ = log.debug("userDataByIRIGetV1 - sparqlQueryString: {}", sparqlQueryString)
userDataQueryResponse <- (storeManager ? SparqlSelectRequest(sparqlQueryString)).mapTo[SparqlSelectResponse]
groupedUserData: Map[String, Seq[String]] = userDataQueryResponse.results.bindings.groupBy(_.rowMap("p")).map {
case (predicate, rows) => predicate -> rows.map(_.rowMap("o"))
}
/* the projects the user is member of */
projectIris: Seq[IRI] = groupedUserData.get(OntologyConstants.KnoraAdmin.IsInProjectAdminGroup) match {
case Some(projects) => projects
case None => Seq.empty[IRI]
}
// _ = log.debug("userProjectAdminMembershipsGetRequestV1 - userIri: {}, projectIris: {}", userIri, projectIris)
} yield UserProjectAdminMembershipsGetResponseV1(projects = projectIris)
}
/**
* Returns the user's custom (without ProjectMember and ProjectAdmin) group memberships
*
* @param userIri the user's IRI.
* @param userProfileV1 the user profile of the requesting user.
* @param apiRequestID the unique api request ID.
* @return a [[UserGroupMembershipsGetResponseV1]]
*/
def userGroupMembershipsGetRequestV1(userIri: IRI, userProfileV1: UserProfileV1, apiRequestID: UUID): Future[UserGroupMembershipsGetResponseV1] = {
for {
sparqlQueryString <- Future(queries.sparql.v1.txt.getUserByIri(
triplestore = settings.triplestoreType,
userIri = userIri
).toString())
//_ = log.debug("userDataByIRIGetV1 - sparqlQueryString: {}", sparqlQueryString)
userDataQueryResponse <- (storeManager ? SparqlSelectRequest(sparqlQueryString)).mapTo[SparqlSelectResponse]
groupedUserData: Map[String, Seq[String]] = userDataQueryResponse.results.bindings.groupBy(_.rowMap("p")).map {
case (predicate, rows) => predicate -> rows.map(_.rowMap("o"))
}
/* the groups the user is member of */
groupIris: Seq[IRI] = groupedUserData.get(OntologyConstants.KnoraAdmin.IsInGroup) match {
case Some(projects) => projects
case None => Seq.empty[IRI]
}
//_ = log.debug("userDataByIriGetV1 - maybeUserDataV1: {}", maybeUserDataV1)
} yield UserGroupMembershipsGetResponseV1(groups = groupIris)
}
////////////////////
// Helper Methods //
////////////////////
/**
* Helper method used to create a [[UserDataV1]] from the [[SparqlSelectResponse]] containing user data.
*
* @param userDataQueryResponse a [[SparqlSelectResponse]] containing user data.
* @param short denotes if all information should be returned. If short == true, then no token and password should be returned.
* @return a [[UserDataV1]] containing the user's basic data.
*/
private def userDataQueryResponse2UserDataV1(userDataQueryResponse: SparqlSelectResponse, short: Boolean): Future[Option[UserDataV1]] = {
// log.debug("userDataQueryResponse2UserDataV1 - " + MessageUtil.toSource(userDataQueryResponse))
if (userDataQueryResponse.results.bindings.nonEmpty) {
val returnedUserIri = userDataQueryResponse.getFirstRow.rowMap("s")
val groupedUserData: Map[String, Seq[String]] = userDataQueryResponse.results.bindings.groupBy(_.rowMap("p")).map {
case (predicate, rows) => predicate -> rows.map(_.rowMap("o"))
}
// _ = log.debug(s"userDataQueryResponse2UserProfileV1 - groupedUserData: ${MessageUtil.toSource(groupedUserData)}")
val userDataV1 = UserDataV1(
lang = groupedUserData.get(OntologyConstants.KnoraAdmin.PreferredLanguage) match {
case Some(langList) => langList.head
case None => settings.fallbackLanguage
},
user_id = Some(returnedUserIri),
email = groupedUserData.get(OntologyConstants.KnoraAdmin.Email).map(_.head),
firstname = groupedUserData.get(OntologyConstants.KnoraAdmin.GivenName).map(_.head),
lastname = groupedUserData.get(OntologyConstants.KnoraAdmin.FamilyName).map(_.head),
password = if (!short) {
groupedUserData.get(OntologyConstants.KnoraAdmin.Password).map(_.head)
} else None,
status = groupedUserData.get(OntologyConstants.KnoraAdmin.Status).map(_.head.toBoolean)
)
// _ = log.debug(s"userDataQueryResponse - userDataV1: {}", MessageUtil.toSource(userDataV1)")
FastFuture.successful(Some(userDataV1))
} else {
FastFuture.successful(None)
}
}
/**
* Helper method used to create a [[UserProfileV1]] from the [[SparqlSelectResponse]] containing user data.
*
* @param userDataQueryResponse a [[SparqlSelectResponse]] containing user data.
* @return a [[UserProfileV1]] containing the user's data.
*/
private def userDataQueryResponse2UserProfileV1(userDataQueryResponse: SparqlSelectResponse): Future[Option[UserProfileV1]] = {
// log.debug("userDataQueryResponse2UserProfileV1 - userDataQueryResponse: {}", MessageUtil.toSource(userDataQueryResponse))
if (userDataQueryResponse.results.bindings.nonEmpty) {
val returnedUserIri = userDataQueryResponse.getFirstRow.rowMap("s")
val groupedUserData: Map[String, Seq[String]] = userDataQueryResponse.results.bindings.groupBy(_.rowMap("p")).map {
case (predicate, rows) => predicate -> rows.map(_.rowMap("o"))
}
// log.debug("userDataQueryResponse2UserProfileV1 - groupedUserData: {}", MessageUtil.toSource(groupedUserData))
val userDataV1 = UserDataV1(
lang = groupedUserData.get(OntologyConstants.KnoraAdmin.PreferredLanguage) match {
case Some(langList) => langList.head
case None => settings.fallbackLanguage
},
user_id = Some(returnedUserIri),
email = groupedUserData.get(OntologyConstants.KnoraAdmin.Email).map(_.head),
firstname = groupedUserData.get(OntologyConstants.KnoraAdmin.GivenName).map(_.head),
lastname = groupedUserData.get(OntologyConstants.KnoraAdmin.FamilyName).map(_.head),
password = groupedUserData.get(OntologyConstants.KnoraAdmin.Password).map(_.head),
status = groupedUserData.get(OntologyConstants.KnoraAdmin.Status).map(_.head.toBoolean)
)
// log.debug("userDataQueryResponse2UserProfileV1 - userDataV1: {}", MessageUtil.toSource(userDataV1))
/* the projects the user is member of */
val projectIris: Seq[IRI] = groupedUserData.get(OntologyConstants.KnoraAdmin.IsInProject) match {
case Some(projects) => projects
case None => Seq.empty[IRI]
}
// log.debug(s"userDataQueryResponse2UserProfileV1 - projectIris: ${MessageUtil.toSource(projectIris)}")
/* the groups the user is member of (only explicit groups) */
val groupIris = groupedUserData.get(OntologyConstants.KnoraAdmin.IsInGroup) match {
case Some(groups) => groups
case None => Seq.empty[IRI]
}
// log.debug(s"userDataQueryResponse2UserProfileV1 - groupIris: ${MessageUtil.toSource(groupIris)}")
/* the projects for which the user is implicitly considered a member of the 'http://www.knora.org/ontology/knora-base#ProjectAdmin' group */
val isInProjectAdminGroups = groupedUserData.getOrElse(OntologyConstants.KnoraAdmin.IsInProjectAdminGroup, Vector.empty[IRI])
/* is the user implicitly considered a member of the 'http://www.knora.org/ontology/knora-base#SystemAdmin' group */
val isInSystemAdminGroup = groupedUserData.get(OntologyConstants.KnoraAdmin.IsInSystemAdminGroup).exists(p => p.head.toBoolean)
for {
/* get the user's permission profile from the permissions responder */
permissionData <- (responderManager ? PermissionDataGetADM(
projectIris = projectIris,
groupIris = groupIris,
isInProjectAdminGroups = isInProjectAdminGroups,
isInSystemAdminGroup = isInSystemAdminGroup,
requestingUser = KnoraSystemInstances.Users.SystemUser
)).mapTo[PermissionsDataADM]
maybeProjectInfoFutures: Seq[Future[Option[ProjectInfoV1]]] = projectIris.map {
projectIri => (responderManager ? ProjectInfoByIRIGetV1(iri = projectIri, userProfileV1 = None)).mapTo[Option[ProjectInfoV1]]
}
maybeProjectInfos: Seq[Option[ProjectInfoV1]] <- Future.sequence(maybeProjectInfoFutures)
projectInfos = maybeProjectInfos.flatten
projectInfoMap: Map[IRI, ProjectInfoV1] = projectInfos.map(projectInfo => projectInfo.id -> projectInfo).toMap
/* construct the user profile from the different parts */
up = UserProfileV1(
userData = userDataV1,
groups = groupIris,
projects_info = projectInfoMap,
sessionId = None,
permissionData = permissionData
)
// _ = log.debug("Retrieved UserProfileV1: {}", up.toString)
result: Option[UserProfileV1] = Some(up)
} yield result
} else {
FastFuture.successful(None)
}
}
/**
* Helper method for checking if a user exists.
*
* @param userIri the IRI of the user.
* @return a [[Boolean]].
*/
def userExists(userIri: IRI): Future[Boolean] = {
for {
askString <- Future(queries.sparql.v1.txt.checkUserExists(userIri = userIri).toString)
// _ = log.debug("userExists - query: {}", askString)
checkUserExistsResponse <- (storeManager ? SparqlAskRequest(askString)).mapTo[SparqlAskResponse]
result = checkUserExistsResponse.result
} yield result
}
/**
* Helper method for checking if a project exists.
*
* @param projectIri the IRI of the project.
* @return a [[Boolean]].
*/
def projectExists(projectIri: IRI): Future[Boolean] = {
for {
askString <- Future(queries.sparql.admin.txt.checkProjectExistsByIri(projectIri = projectIri).toString)
// _ = log.debug("projectExists - query: {}", askString)
checkUserExistsResponse <- (storeManager ? SparqlAskRequest(askString)).mapTo[SparqlAskResponse]
result = checkUserExistsResponse.result
} yield result
}
/**
* Helper method for checking if a group exists.
*
* @param groupIri the IRI of the group.
* @return a [[Boolean]].
*/
def groupExists(groupIri: IRI): Future[Boolean] = {
for {
askString <- Future(queries.sparql.admin.txt.checkGroupExistsByIri(groupIri = groupIri).toString)
// _ = log.debug("groupExists - query: {}", askString)
checkUserExistsResponse <- (storeManager ? SparqlAskRequest(askString)).mapTo[SparqlAskResponse]
result = checkUserExistsResponse.result
} yield result
}
/**
* Writes the user profile to cache.
*
* @param userProfile a [[UserProfileV1]].
* @return true if writing was successful.
* @throws ApplicationCacheException when there is a problem with writing the user's profile to cache.
*/
private def writeUserProfileV1ToCache(userProfile: UserProfileV1): Boolean = {
val iri = if (userProfile.userData.user_id.nonEmpty) {
userProfile.userData.user_id.get
} else {
throw ApplicationCacheException("A user profile without an IRI is invalid. Not writing to cache.")
}
val email = if (userProfile.userData.email.nonEmpty) {
userProfile.userData.email.get
} else {
throw ApplicationCacheException("A user profile without an email is invalid. Not writing to cache.")
}
CacheUtil.put(USER_PROFILE_CACHE_NAME, iri, userProfile)
if (CacheUtil.get(USER_PROFILE_CACHE_NAME, iri).isEmpty) {
throw ApplicationCacheException("Writing the user's profile to cache was not successful.")
}
CacheUtil.put(USER_PROFILE_CACHE_NAME, email, userProfile)
if (CacheUtil.get(USER_PROFILE_CACHE_NAME, email).isEmpty) {
throw ApplicationCacheException("Writing the user's profile to cache was not successful.")
}
true
}
/**
* Removes the user profile from cache.
*
* @param userIri the user's IRI und which a profile could be cached.
* @param email the user's email under which a profile could be cached.
*/
private def invalidateCachedUserProfileV1(userIri: Option[IRI] = None, email: Option[String] = None): Unit = {
if (userIri.nonEmpty) {
CacheUtil.remove(USER_PROFILE_CACHE_NAME, userIri.get)
}
if (email.nonEmpty) {
CacheUtil.remove(USER_PROFILE_CACHE_NAME, email.get)
}
}
}
| musicEnfanthen/Knora | webapi/src/main/scala/org/knora/webapi/responders/v1/UsersResponderV1.scala | Scala | agpl-3.0 | 29,382 |
/* __ *\\
** ________ ___ / / ___ __ ____ Scala.js sbt plugin **
** / __/ __// _ | / / / _ | __ / // __/ (c) 2013, LAMP/EPFL **
** __\\ \\/ /__/ __ |/ /__/ __ |/_// /_\\ \\ http://scala-js.org/ **
** /____/\\___/_/ |_/____/_/ | |__/ /____/ **
** |/____/ **
\\* */
package org.scalajs.jsenv.phantomjs
import org.scalajs.jsenv._
import org.scalajs.jsenv.Utils.OptDeadline
import org.scalajs.core.ir.Utils.{escapeJS, fixFileURI}
import org.scalajs.core.tools.io._
import org.scalajs.core.tools.classpath._
import org.scalajs.core.tools.logging._
import java.io.{ Console => _, _ }
import java.net._
import scala.io.Source
import scala.collection.mutable
import scala.annotation.tailrec
import scala.concurrent.{ExecutionContext, TimeoutException}
import scala.concurrent.duration.Duration
class PhantomJSEnv(
phantomjsPath: String = "phantomjs",
addArgs: Seq[String] = Seq.empty,
addEnv: Map[String, String] = Map.empty,
val autoExit: Boolean = true,
jettyClassLoader: ClassLoader = null
) extends ExternalJSEnv(addArgs, addEnv) with ComJSEnv {
import PhantomJSEnv._
protected def vmName: String = "PhantomJS"
protected def executable: String = phantomjsPath
override def jsRunner(classpath: CompleteClasspath, code: VirtualJSFile,
logger: Logger, console: JSConsole): JSRunner = {
new PhantomRunner(classpath, code, logger, console)
}
override def asyncRunner(classpath: CompleteClasspath, code: VirtualJSFile,
logger: Logger, console: JSConsole): AsyncJSRunner = {
new AsyncPhantomRunner(classpath, code, logger, console)
}
override def comRunner(classpath: CompleteClasspath, code: VirtualJSFile,
logger: Logger, console: JSConsole): ComJSRunner = {
new ComPhantomRunner(classpath, code, logger, console)
}
protected class PhantomRunner(classpath: CompleteClasspath,
code: VirtualJSFile, logger: Logger, console: JSConsole
) extends ExtRunner(classpath, code, logger, console)
with AbstractPhantomRunner
protected class AsyncPhantomRunner(classpath: CompleteClasspath,
code: VirtualJSFile, logger: Logger, console: JSConsole
) extends AsyncExtRunner(classpath, code, logger, console)
with AbstractPhantomRunner
protected class ComPhantomRunner(classpath: CompleteClasspath,
code: VirtualJSFile, logger: Logger, console: JSConsole
) extends AsyncPhantomRunner(classpath, code, logger, console)
with ComJSRunner with WebsocketListener {
private def loadMgr() = {
val loader =
if (jettyClassLoader != null) jettyClassLoader
else getClass().getClassLoader()
val clazz = loader.loadClass(
"org.scalajs.jsenv.phantomjs.JettyWebsocketManager")
val ctors = clazz.getConstructors()
assert(ctors.length == 1, "JettyWebsocketManager may only have one ctor")
val mgr = ctors.head.newInstance(this)
mgr.asInstanceOf[WebsocketManager]
}
val mgr: WebsocketManager = loadMgr()
protected var mgrIsRunning: Boolean = false
def onRunning(): Unit = synchronized {
mgrIsRunning = true
notifyAll()
}
def onOpen(): Unit = synchronized(notifyAll())
def onClose(): Unit = synchronized(notifyAll())
future.onComplete(_ => synchronized(notifyAll()))(ExecutionContext.global)
def onMessage(msg: String): Unit = synchronized {
recvBuf.enqueue(msg)
notifyAll()
}
def log(msg: String): Unit = logger.debug(s"PhantomJS WS Jetty: $msg")
private[this] val recvBuf = mutable.Queue.empty[String]
private[this] val fragmentsBuf = new StringBuilder
mgr.start()
private def comSetup = {
def maybeExit(code: Int) =
if (autoExit)
s"window.callPhantom({ action: 'exit', returnValue: $code });"
else
""
/* The WebSocket server starts asynchronously. We must wait for it to
* be fully operational before a) retrieving the port it is running on
* and b) feeding the connecting JS script to the VM.
*/
synchronized {
while (!mgrIsRunning)
wait(10000)
if (!mgrIsRunning)
throw new TimeoutException(
"The PhantomJS WebSocket server startup timed out")
}
val serverPort = mgr.localPort
assert(serverPort > 0,
s"Manager running with a non-positive port number: $serverPort")
val code = s"""
|(function() {
| var MaxPayloadSize = $MaxCharPayloadSize;
|
| // The socket for communication
| var websocket = null;
|
| // Buffer for messages sent before socket is open
| var outMsgBuf = null;
|
| function sendImpl(msg) {
| var frags = (msg.length / MaxPayloadSize) | 0;
|
| for (var i = 0; i < frags; ++i) {
| var payload = msg.substring(
| i * MaxPayloadSize, (i + 1) * MaxPayloadSize);
| websocket.send("1" + payload);
| }
|
| websocket.send("0" + msg.substring(frags * MaxPayloadSize));
| }
|
| function recvImpl(recvCB) {
| var recvBuf = "";
|
| return function(evt) {
| var newData = recvBuf + evt.data.substring(1);
| if (evt.data.charAt(0) == "0") {
| recvBuf = "";
| recvCB(newData);
| } else if (evt.data.charAt(0) == "1") {
| recvBuf = newData;
| } else {
| throw new Error("Bad fragmentation flag in " + evt.data);
| }
| };
| }
|
| window.scalajsCom = {
| init: function(recvCB) {
| if (websocket !== null) throw new Error("Com already open");
|
| outMsgBuf = [];
|
| websocket = new WebSocket("ws://localhost:$serverPort");
|
| websocket.onopen = function(evt) {
| for (var i = 0; i < outMsgBuf.length; ++i)
| sendImpl(outMsgBuf[i]);
| outMsgBuf = null;
| };
| websocket.onclose = function(evt) {
| websocket = null;
| if (outMsgBuf !== null)
| throw new Error("WebSocket closed before being opened: " + evt);
| ${maybeExit(0)}
| };
| websocket.onmessage = recvImpl(recvCB);
| websocket.onerror = function(evt) {
| websocket = null;
| throw new Error("Websocket failed: " + evt);
| };
|
| // Take over responsibility to auto exit
| window.callPhantom({
| action: 'setAutoExit',
| autoExit: false
| });
| },
| send: function(msg) {
| if (websocket === null)
| return; // we are closed already. ignore message
|
| if (outMsgBuf !== null)
| outMsgBuf.push(msg);
| else
| sendImpl(msg);
| },
| close: function() {
| if (websocket === null)
| return; // we are closed already. all is well.
|
| if (outMsgBuf !== null)
| // Reschedule ourselves to give onopen a chance to kick in
| window.setTimeout(window.scalajsCom.close, 10);
| else
| websocket.close();
| }
| }
|}).call(this);""".stripMargin
new MemVirtualJSFile("comSetup.js").withContent(code)
}
def send(msg: String): Unit = synchronized {
if (awaitConnection()) {
val fragParts = msg.length / MaxCharPayloadSize
for (i <- 0 until fragParts) {
val payload = msg.substring(
i * MaxCharPayloadSize, (i + 1) * MaxCharPayloadSize)
mgr.sendMessage("1" + payload)
}
mgr.sendMessage("0" + msg.substring(fragParts * MaxCharPayloadSize))
}
}
def receive(timeout: Duration): String = synchronized {
if (recvBuf.isEmpty && !awaitConnection())
throw new ComJSEnv.ComClosedException("Phantom.js isn't connected")
val deadline = OptDeadline(timeout)
@tailrec
def loop(): String = {
/* The fragments are accumulated in an instance-wide buffer in case
* receiving a non-first fragment times out.
*/
val frag = receiveFrag(deadline)
fragmentsBuf ++= frag.substring(1)
if (frag(0) == '0') {
val result = fragmentsBuf.result()
fragmentsBuf.clear()
result
} else if (frag(0) == '1') {
loop()
} else {
throw new AssertionError("Bad fragmentation flag in " + frag)
}
}
try {
loop()
} catch {
case e: Throwable if !e.isInstanceOf[TimeoutException] =>
fragmentsBuf.clear() // the protocol is broken, so discard the buffer
throw e
}
}
private def receiveFrag(deadline: OptDeadline): String = {
while (recvBuf.isEmpty && !mgr.isClosed && !deadline.isOverdue)
wait(deadline.millisLeft)
if (recvBuf.isEmpty) {
if (mgr.isClosed)
throw new ComJSEnv.ComClosedException
else
throw new TimeoutException("Timeout expired")
}
recvBuf.dequeue()
}
def close(): Unit = mgr.stop()
/** Waits until the JS VM has established a connection, or the VM
* terminated. Returns true if a connection was established.
*/
private def awaitConnection(): Boolean = {
while (!mgr.isConnected && !mgr.isClosed && isRunning)
wait(10000)
if (!mgr.isConnected && !mgr.isClosed && isRunning)
throw new TimeoutException(
"The PhantomJS WebSocket client took too long to connect")
mgr.isConnected
}
override protected def initFiles(): Seq[VirtualJSFile] =
super.initFiles :+ comSetup
}
protected trait AbstractPhantomRunner extends AbstractExtRunner {
override protected def getVMArgs() =
// Add launcher file to arguments
additionalArgs :+ createTmpLauncherFile().getAbsolutePath
/** In phantom.js, we include JS using HTML */
override protected def writeJSFile(file: VirtualJSFile, writer: Writer) = {
file match {
case file: FileVirtualJSFile =>
val fname = htmlEscape(fixFileURI(file.file.toURI).toASCIIString)
writer.write(
s"""<script type="text/javascript" src="$fname"></script>""" + "\\n")
case _ =>
writer.write("""<script type="text/javascript">""" + "\\n")
writer.write(s"// Virtual File: ${file.path}\\n")
writer.write(file.content)
writer.write("</script>\\n")
}
}
/**
* PhantomJS doesn't support Function.prototype.bind. We polyfill it.
* https://github.com/ariya/phantomjs/issues/10522
*/
override protected def initFiles(): Seq[VirtualJSFile] = Seq(
new MemVirtualJSFile("bindPolyfill.js").withContent(
"""
|// Polyfill for Function.bind from Facebook react:
|// https://github.com/facebook/react/blob/3dc10749080a460e48bee46d769763ec7191ac76/src/test/phantomjs-shims.js
|// Originally licensed under Apache 2.0
|(function() {
|
| var Ap = Array.prototype;
| var slice = Ap.slice;
| var Fp = Function.prototype;
|
| if (!Fp.bind) {
| // PhantomJS doesn't support Function.prototype.bind natively, so
| // polyfill it whenever this module is required.
| Fp.bind = function(context) {
| var func = this;
| var args = slice.call(arguments, 1);
|
| function bound() {
| var invokedAsConstructor = func.prototype && (this instanceof func);
| return func.apply(
| // Ignore the context parameter when invoking the bound function
| // as a constructor. Note that this includes not only constructor
| // invocations using the new keyword but also calls to base class
| // constructors such as BaseClass.call(this, ...) or super(...).
| !invokedAsConstructor && context || this,
| args.concat(slice.call(arguments))
| );
| }
|
| // The bound function must share the .prototype of the unbound
| // function so that any object created by one constructor will count
| // as an instance of both constructors.
| bound.prototype = func.prototype;
|
| return bound;
| };
| }
|
|})();
|""".stripMargin
),
new MemVirtualJSFile("scalaJSEnvInfo.js").withContent(
"""
|__ScalaJSEnv = {
| exitFunction: function(status) {
| window.callPhantom({
| action: 'exit',
| returnValue: status | 0
| });
| }
|};
""".stripMargin
)
)
protected def writeWebpageLauncher(out: Writer): Unit = {
out.write("<html>\\n<head>\\n<title>Phantom.js Launcher</title>\\n")
sendJS(getLibJSFiles(), out)
writeCodeLauncher(code, out)
out.write("</head>\\n<body></body>\\n</html>\\n")
}
protected def createTmpLauncherFile(): File = {
val webF = createTmpWebpage()
val launcherTmpF = File.createTempFile("phantomjs-launcher", ".js")
launcherTmpF.deleteOnExit()
val out = new FileWriter(launcherTmpF)
try {
out.write(
s"""// Scala.js Phantom.js launcher
|var page = require('webpage').create();
|var url = "${escapeJS(fixFileURI(webF.toURI).toASCIIString)}";
|var autoExit = $autoExit;
|page.onConsoleMessage = function(msg) {
| console.log(msg);
|};
|page.onError = function(msg, trace) {
| console.error(msg);
| if (trace && trace.length) {
| console.error('');
| trace.forEach(function(t) {
| console.error(' ' + t.file + ':' + t.line + (t.function ? ' (in function "' + t.function +'")' : ''));
| });
| }
|
| phantom.exit(2);
|};
|page.onCallback = function(data) {
| if (!data.action) {
| console.error('Called callback without action');
| phantom.exit(3);
| } else if (data.action === 'exit') {
| phantom.exit(data.returnValue || 0);
| } else if (data.action === 'setAutoExit') {
| if (typeof(data.autoExit) === 'boolean')
| autoExit = data.autoExit;
| else
| autoExit = true;
| } else {
| console.error('Unknown callback action ' + data.action);
| phantom.exit(4);
| }
|};
|page.open(url, function (status) {
| if (autoExit || status !== 'success')
| phantom.exit(status !== 'success');
|});
|""".stripMargin)
} finally {
out.close()
}
logger.debug(
"PhantomJS using launcher at: " + launcherTmpF.getAbsolutePath())
launcherTmpF
}
protected def createTmpWebpage(): File = {
val webTmpF = File.createTempFile("phantomjs-launcher-webpage", ".html")
webTmpF.deleteOnExit()
val out = new BufferedWriter(new FileWriter(webTmpF))
try {
writeWebpageLauncher(out)
} finally {
out.close()
}
logger.debug(
"PhantomJS using webpage launcher at: " + webTmpF.getAbsolutePath())
webTmpF
}
protected def writeCodeLauncher(code: VirtualJSFile, out: Writer): Unit = {
out.write("""<script type="text/javascript">""" + "\\n")
out.write("// Phantom.js code launcher\\n")
out.write(s"// Origin: ${code.path}\\n")
out.write("window.addEventListener('load', function() {\\n")
out.write(code.content)
out.write("}, false);\\n")
out.write("</script>\\n")
}
}
protected def htmlEscape(str: String): String = str.flatMap {
case '<' => "<"
case '>' => ">"
case '"' => """
case '&' => "&"
case c => c :: Nil
}
}
object PhantomJSEnv {
private final val MaxByteMessageSize = 32768 // 32 KB
private final val MaxCharMessageSize = MaxByteMessageSize / 2 // 2B per char
private final val MaxCharPayloadSize = MaxCharMessageSize - 1 // frag flag
}
| matthughes/scala-js | js-envs/src/main/scala/org/scalajs/jsenv/phantomjs/PhantomJSEnv.scala | Scala | bsd-3-clause | 17,409 |
package com.chitingraphics.stingray.utils
import com.chitingraphics.stingray.utils.Ar.RealType
import scala.math._
case class Ar(val arr: List[List[Ar.RealType]]) {
private val (nRows, nCols): (Int, Int) = calculateDimensions()
private def verifySameDimensions(that: Ar): Unit = {
val (nRows, nCols) = this.dims
val (mRows, mCols) = that.dims
require(nRows == mRows && nCols == mCols, "Dimensions must match")
}
private def verifyIndices(i: Int, j: Int): Unit = {
require(i >= 0 && i < this.nRows, "`i` must be an index")
require(j >= 0 && j < this.nCols, "`j` must be an index")
}
private def calculateDimensions(): (Int, Int) = {
val nRows = arr.length
require(nRows > 0, "Array must have a value")
val nCols = arr(0).length
require(nCols > 0, "Array must have a value")
for (row <- arr) {
require(row.length == nCols, "Matrix must be rectangular")
}
(nRows, nCols)
}
def dims: (Int, Int) = (nRows, nCols)
def zipWith(that: Ar)(f: (Ar.RealType, Ar.RealType) => Ar.RealType): Ar = {
verifySameDimensions(that)
var matrix = Ar.zeros(nRows, nCols)
for {i <- 0 until nRows; j <- 0 until nCols}
matrix = matrix.set((i, j),
f(this(i, j), that(i, j)))
matrix
}
def map(f: (Ar.RealType) => Ar.RealType): Ar = {
val matrix = Ar.zeros(nRows, nCols)
zipWith(matrix){
(a: Ar.RealType, _: Ar.RealType) => f(a)
}
}
def apply(i: Int, j: Int = 0): Ar.RealType = {
verifyIndices(i, j)
this.arr(i)(j)
}
def set(index: (Int, Int), value: Ar.RealType): Ar = {
val array = Array.fill[RealType](nRows * nCols)(0)
val (x, y) = index
for {i <- 0 until nRows; j <- 0 until nCols}
array(i * nCols + j) =
if (i == x && j == y) value else this(i, j)
Ar(nRows, nCols)(array:_*)
}
def normSquared(): Ar.RealType = {
var res: Ar.RealType = 0
for {i <- 0 until nRows; j <- 0 until nCols}
res += pow(this(i, j), 2)
return res
}
def norm(): Ar.RealType = sqrt(this.normSquared())
def trans(): Ar = new Ar(arr.transpose)
def det(): Ar.RealType = ???
def inverse(): Ar = ???
def dot(that: Ar): Ar.RealType = ???
def cross(that: Ar): Ar = ???
def unary_+(): Ar = this.map(x => x)
def unary_-(): Ar = this.map(x => -x)
def +(that: Ar): Ar = this.zipWith(that)(_ + _)
def -(that: Ar): Ar = this.zipWith(that)(_ - _)
def *(a: RealType): Ar = this.map(_ * a)
def /(a: RealType): Ar = this.map(_ / a)
def *(that: Ar): Ar = {
require(this.nCols == that.nRows, "The Columns of A should match the rows of B in A * B")
var matrix = Ar.zeros(nRows, nCols)
for (i <- 0 until this.nRows) {
for (j <- 0 until that.nCols) {
for (k <- 0 until this.nCols) {
matrix = matrix.set((i, j),
matrix(i, j) + this(i, k) * that(k, j))
}
}
}
matrix
}
def *(ray: Ray) = ray.transformBy(this)
def ==(that: Ar): Boolean = true //TODO: fixeme
def !=(that: Ar): Boolean = !(this == that)
override def toString: String = {
var s = "\\n"
for (i <- 0 until nRows){
s += "["
for (j <- 0 until nCols){
s += f" ${this.arr(i)(j)}%7.3f "
}
s += "]\\n"
}
s
}
}
object Ar {
type RealType = Double
def apply(rows: Int, cols: Int = 1)(vals: RealType*): Ar = {
require(rows > 0, "Number of rows must be greater than zero")
require(cols > 0, "Number of columns must be greater than zero")
require(rows * cols == vals.length, "Incorrect argument list size. Values does not match dimensions")
val matrix = Array.ofDim[RealType](rows, cols)
for (i <- 0 until rows; j <- 0 until cols)
matrix(i)(j) = vals(i * cols + j)
new Ar((for (i <- matrix) yield i.toList).toList)
}
def ones(rows: Int, cols: Int = 1): Ar = {
val oneArray = Array.fill[RealType](rows * cols)(1)
Ar(rows, cols)(oneArray:_*)
}
def zeros(rows: Int, cols: Int = 1): Ar = {
val zeroArray = Array.fill[RealType](rows * cols)(0)
Ar(rows, cols)(zeroArray:_*)
}
def random(rows: Int, cols: Int): Ar = ???
def eye(dims: Int): Ar = {
var matrix = Ar.zeros(dims, dims)
for (i <- 0 until dims){
matrix = matrix.set((i, i), 1)
}
matrix
}
// Hack for getting `2.0 * matrix` to work
case class LeftScalar(val value: Ar.RealType) {
def *(ar: Ar): Ar = ar * value
}
implicit def realToLeftScalar(real: Ar.RealType): LeftScalar = LeftScalar(real)
implicit def intToLeftScalar(int: Int): LeftScalar = realToLeftScalar(int)
}
| acycliczebra/StingRay | src/main/scala-2.11/com/chitingraphics/stingray/utils/Ar.scala | Scala | mit | 4,596 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.errors
import java.io.{FileNotFoundException, IOException}
import java.lang.reflect.InvocationTargetException
import java.net.{URISyntaxException, URL}
import java.sql.{SQLException, SQLFeatureNotSupportedException}
import java.text.{ParseException => JavaParseException}
import java.time.{DateTimeException, LocalDate}
import java.time.format.DateTimeParseException
import java.time.temporal.ChronoField
import java.util.ConcurrentModificationException
import java.util.concurrent.TimeoutException
import com.fasterxml.jackson.core.{JsonParser, JsonToken}
import org.apache.hadoop.fs.{FileAlreadyExistsException, FileStatus, Path}
import org.apache.hadoop.fs.permission.FsPermission
import org.codehaus.commons.compiler.CompileException
import org.codehaus.janino.InternalCompilerException
import org.apache.spark.{Partition, SparkArithmeticException, SparkArrayIndexOutOfBoundsException, SparkClassNotFoundException, SparkConcurrentModificationException, SparkDateTimeException, SparkException, SparkFileAlreadyExistsException, SparkFileNotFoundException, SparkIllegalArgumentException, SparkIllegalStateException, SparkIndexOutOfBoundsException, SparkNoSuchElementException, SparkNoSuchMethodException, SparkNumberFormatException, SparkRuntimeException, SparkSecurityException, SparkSQLException, SparkSQLFeatureNotSupportedException, SparkUnsupportedOperationException, SparkUpgradeException}
import org.apache.spark.executor.CommitDeniedException
import org.apache.spark.launcher.SparkLauncher
import org.apache.spark.memory.SparkOutOfMemoryError
import org.apache.spark.sql.catalyst.ScalaReflection.Schema
import org.apache.spark.sql.catalyst.WalkedTypePath
import org.apache.spark.sql.catalyst.analysis.UnresolvedGenerator
import org.apache.spark.sql.catalyst.catalog.{CatalogDatabase, CatalogTable}
import org.apache.spark.sql.catalyst.expressions.{AttributeReference, Expression, UnevaluableAggregate}
import org.apache.spark.sql.catalyst.parser.ParseException
import org.apache.spark.sql.catalyst.plans.JoinType
import org.apache.spark.sql.catalyst.plans.logical.{DomainJoin, LogicalPlan}
import org.apache.spark.sql.catalyst.plans.logical.statsEstimation.ValueInterval
import org.apache.spark.sql.catalyst.trees.TreeNode
import org.apache.spark.sql.catalyst.util.{sideBySide, BadRecordException, FailFastMode}
import org.apache.spark.sql.connector.catalog.{CatalogNotFoundException, Identifier, Table, TableProvider}
import org.apache.spark.sql.connector.catalog.CatalogV2Implicits._
import org.apache.spark.sql.connector.expressions.Transform
import org.apache.spark.sql.execution.QueryExecutionException
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.internal.StaticSQLConf.GLOBAL_TEMP_DATABASE
import org.apache.spark.sql.streaming.OutputMode
import org.apache.spark.sql.types._
import org.apache.spark.unsafe.array.ByteArrayMethods
import org.apache.spark.unsafe.types.UTF8String
import org.apache.spark.util.CircularBuffer
/**
* Object for grouping error messages from (most) exceptions thrown during query execution.
* This does not include exceptions thrown during the eager execution of commands, which are
* grouped into [[QueryCompilationErrors]].
*/
object QueryExecutionErrors {
def columnChangeUnsupportedError(): Throwable = {
new SparkUnsupportedOperationException(errorClass = "UNSUPPORTED_CHANGE_COLUMN",
messageParameters = Array.empty)
}
def logicalHintOperatorNotRemovedDuringAnalysisError(): Throwable = {
new SparkIllegalStateException(errorClass = "INTERNAL_ERROR",
messageParameters = Array(
"Internal error: logical hint operator should have been removed during analysis"))
}
def cannotEvaluateExpressionError(expression: Expression): Throwable = {
new SparkUnsupportedOperationException(errorClass = "INTERNAL_ERROR",
messageParameters = Array(s"Cannot evaluate expression: $expression"))
}
def cannotGenerateCodeForExpressionError(expression: Expression): Throwable = {
new SparkUnsupportedOperationException(errorClass = "INTERNAL_ERROR",
messageParameters = Array(s"Cannot generate code for expression: $expression"))
}
def cannotTerminateGeneratorError(generator: UnresolvedGenerator): Throwable = {
new SparkUnsupportedOperationException(errorClass = "INTERNAL_ERROR",
messageParameters = Array(s"Cannot terminate expression: $generator"))
}
def castingCauseOverflowError(t: Any, targetType: String): ArithmeticException = {
new SparkArithmeticException(errorClass = "CAST_CAUSES_OVERFLOW",
messageParameters = Array(t.toString, targetType, SQLConf.ANSI_ENABLED.key))
}
def cannotChangeDecimalPrecisionError(
value: Decimal, decimalPrecision: Int, decimalScale: Int): ArithmeticException = {
new SparkArithmeticException(errorClass = "CANNOT_CHANGE_DECIMAL_PRECISION",
messageParameters = Array(value.toDebugString,
decimalPrecision.toString, decimalScale.toString, SQLConf.ANSI_ENABLED.key))
}
def invalidInputSyntaxForNumericError(e: NumberFormatException): NumberFormatException = {
new NumberFormatException(s"${e.getMessage}. To return NULL instead, use 'try_cast'. " +
s"If necessary set ${SQLConf.ANSI_ENABLED.key} to false to bypass this error.")
}
def invalidInputSyntaxForNumericError(s: UTF8String): NumberFormatException = {
new SparkNumberFormatException(errorClass = "INVALID_INPUT_SYNTAX_FOR_NUMERIC_TYPE",
messageParameters = Array(s.toString, SQLConf.ANSI_ENABLED.key))
}
def cannotCastFromNullTypeError(to: DataType): Throwable = {
new SparkException(errorClass = "CANNOT_CAST_DATATYPE",
messageParameters = Array(NullType.typeName, to.typeName), null)
}
def cannotCastError(from: DataType, to: DataType): Throwable = {
new SparkException(errorClass = "CANNOT_CAST_DATATYPE",
messageParameters = Array(from.typeName, to.typeName), null)
}
def cannotParseDecimalError(): Throwable = {
new SparkIllegalStateException(errorClass = "CANNOT_PARSE_DECIMAL",
messageParameters = Array.empty)
}
def simpleStringWithNodeIdUnsupportedError(nodeName: String): Throwable = {
new SparkUnsupportedOperationException(errorClass = "UNSUPPORTED_SIMPLE_STRING_WITH_NODE_ID",
messageParameters = Array(nodeName))
}
def evaluateUnevaluableAggregateUnsupportedError(
methodName: String, unEvaluable: UnevaluableAggregate): Throwable = {
new SparkUnsupportedOperationException(errorClass = "INTERNAL_ERROR",
messageParameters = Array(s"Cannot evaluate expression: $methodName: $unEvaluable"))
}
def dataTypeUnsupportedError(dt: DataType): Throwable = {
new SparkException(errorClass = "UNSUPPORTED_DATATYPE",
messageParameters = Array(dt.typeName), null)
}
def dataTypeUnsupportedError(dataType: String, failure: String): Throwable = {
new SparkIllegalArgumentException(errorClass = "UNSUPPORTED_DATATYPE",
messageParameters = Array(dataType + failure))
}
def failedExecuteUserDefinedFunctionError(funcCls: String, inputTypes: String,
outputType: String, e: Throwable): Throwable = {
new SparkException(errorClass = "FAILED_EXECUTE_UDF",
messageParameters = Array(funcCls, inputTypes, outputType), e)
}
def divideByZeroError(): ArithmeticException = {
new SparkArithmeticException(
errorClass = "DIVIDE_BY_ZERO", messageParameters = Array(SQLConf.ANSI_ENABLED.key))
}
def invalidArrayIndexError(index: Int, numElements: Int): ArrayIndexOutOfBoundsException = {
invalidArrayIndexErrorInternal(index, numElements, SQLConf.ANSI_STRICT_INDEX_OPERATOR.key)
}
def invalidInputIndexError(index: Int, numElements: Int): ArrayIndexOutOfBoundsException = {
invalidArrayIndexErrorInternal(index, numElements, SQLConf.ANSI_ENABLED.key)
}
private def invalidArrayIndexErrorInternal(
index: Int,
numElements: Int,
key: String): ArrayIndexOutOfBoundsException = {
new SparkArrayIndexOutOfBoundsException(errorClass = "INVALID_ARRAY_INDEX",
messageParameters = Array(index.toString, numElements.toString, key))
}
def invalidElementAtIndexError(
index: Int,
numElements: Int): ArrayIndexOutOfBoundsException = {
new SparkArrayIndexOutOfBoundsException(errorClass = "INVALID_ARRAY_INDEX_IN_ELEMENT_AT",
messageParameters = Array(index.toString, numElements.toString, SQLConf.ANSI_ENABLED.key))
}
def mapKeyNotExistError(key: Any, isElementAtFunction: Boolean): NoSuchElementException = {
if (isElementAtFunction) {
new SparkNoSuchElementException(errorClass = "MAP_KEY_DOES_NOT_EXIST_IN_ELEMENT_AT",
messageParameters = Array(key.toString, SQLConf.ANSI_ENABLED.key))
} else {
new SparkNoSuchElementException(errorClass = "MAP_KEY_DOES_NOT_EXIST",
messageParameters = Array(key.toString, SQLConf.ANSI_STRICT_INDEX_OPERATOR.key))
}
}
def rowFromCSVParserNotExpectedError(): Throwable = {
new SparkIllegalArgumentException(errorClass = "ROW_FROM_CSV_PARSER_NOT_EXPECTED",
messageParameters = Array.empty)
}
def inputTypeUnsupportedError(dataType: DataType): Throwable = {
new IllegalArgumentException(s"Unsupported input type ${dataType.catalogString}")
}
def invalidFractionOfSecondError(): DateTimeException = {
new SparkDateTimeException(errorClass = "INVALID_FRACTION_OF_SECOND",
Array(SQLConf.ANSI_ENABLED.key))
}
def ansiDateTimeParseError(e: DateTimeParseException): DateTimeParseException = {
val newMessage = s"${e.getMessage}. " +
s"If necessary set ${SQLConf.ANSI_ENABLED.key} to false to bypass this error."
new DateTimeParseException(newMessage, e.getParsedString, e.getErrorIndex, e.getCause)
}
def ansiDateTimeError(e: DateTimeException): DateTimeException = {
val newMessage = s"${e.getMessage}. " +
s"If necessary set ${SQLConf.ANSI_ENABLED.key} to false to bypass this error."
new DateTimeException(newMessage, e.getCause)
}
def ansiParseError(e: JavaParseException): JavaParseException = {
val newMessage = s"${e.getMessage}. " +
s"If necessary set ${SQLConf.ANSI_ENABLED.key} to false to bypass this error."
new JavaParseException(newMessage, e.getErrorOffset)
}
def ansiIllegalArgumentError(message: String): IllegalArgumentException = {
val newMessage = s"$message. If necessary set ${SQLConf.ANSI_ENABLED.key} " +
s"to false to bypass this error."
new IllegalArgumentException(newMessage)
}
def ansiIllegalArgumentError(e: IllegalArgumentException): IllegalArgumentException = {
ansiIllegalArgumentError(e.getMessage)
}
def overflowInSumOfDecimalError(): ArithmeticException = {
arithmeticOverflowError("Overflow in sum of decimals")
}
def overflowInIntegralDivideError(): ArithmeticException = {
arithmeticOverflowError("Overflow in integral divide", "try_divide")
}
def mapSizeExceedArraySizeWhenZipMapError(size: Int): RuntimeException = {
new RuntimeException(s"Unsuccessful try to zip maps with $size " +
"unique keys due to exceeding the array size limit " +
s"${ByteArrayMethods.MAX_ROUNDED_ARRAY_LENGTH}.")
}
def copyNullFieldNotAllowedError(): Throwable = {
new IllegalStateException("Do not attempt to copy a null field")
}
def literalTypeUnsupportedError(v: Any): RuntimeException = {
new SparkRuntimeException("UNSUPPORTED_LITERAL_TYPE",
Array(v.getClass.toString, v.toString))
}
def noDefaultForDataTypeError(dataType: DataType): RuntimeException = {
new RuntimeException(s"no default for type $dataType")
}
def doGenCodeOfAliasShouldNotBeCalledError(): Throwable = {
new IllegalStateException("Alias.doGenCode should not be called.")
}
def orderedOperationUnsupportedByDataTypeError(dataType: DataType): Throwable = {
new IllegalArgumentException(s"Type $dataType does not support ordered operations")
}
def regexGroupIndexLessThanZeroError(): Throwable = {
new IllegalArgumentException("The specified group index cannot be less than zero")
}
def regexGroupIndexExceedGroupCountError(
groupCount: Int, groupIndex: Int): Throwable = {
new IllegalArgumentException(
s"Regex group count is $groupCount, but the specified group index is $groupIndex")
}
def invalidUrlError(url: UTF8String, e: URISyntaxException): Throwable = {
new IllegalArgumentException(s"Find an invalid url string ${url.toString}. " +
s"If necessary set ${SQLConf.ANSI_ENABLED.key} to false to bypass this error.", e)
}
def dataTypeOperationUnsupportedError(): Throwable = {
new UnsupportedOperationException("dataType")
}
def mergeUnsupportedByWindowFunctionError(): Throwable = {
new UnsupportedOperationException("Window Functions do not support merging.")
}
def dataTypeUnexpectedError(dataType: DataType): Throwable = {
new UnsupportedOperationException(s"Unexpected data type ${dataType.catalogString}")
}
def typeUnsupportedError(dataType: DataType): Throwable = {
new IllegalArgumentException(s"Unexpected type $dataType")
}
def negativeValueUnexpectedError(frequencyExpression : Expression): Throwable = {
new SparkException(s"Negative values found in ${frequencyExpression.sql}")
}
def addNewFunctionMismatchedWithFunctionError(funcName: String): Throwable = {
new IllegalArgumentException(s"$funcName is not matched at addNewFunction")
}
def cannotGenerateCodeForUncomparableTypeError(
codeType: String, dataType: DataType): Throwable = {
new IllegalArgumentException(
s"cannot generate $codeType code for un-comparable type: ${dataType.catalogString}")
}
def cannotGenerateCodeForUnsupportedTypeError(dataType: DataType): Throwable = {
new IllegalArgumentException(s"cannot generate code for unsupported type: $dataType")
}
def cannotInterpolateClassIntoCodeBlockError(arg: Any): Throwable = {
new IllegalArgumentException(
s"Can not interpolate ${arg.getClass.getName} into code block.")
}
def customCollectionClsNotResolvedError(): Throwable = {
new UnsupportedOperationException("not resolved")
}
def classUnsupportedByMapObjectsError(cls: Class[_]): RuntimeException = {
new RuntimeException(s"class `${cls.getName}` is not supported by `MapObjects` as " +
"resulting collection.")
}
def nullAsMapKeyNotAllowedError(): RuntimeException = {
new RuntimeException("Cannot use null as map key!")
}
def methodNotDeclaredError(name: String): Throwable = {
new SparkNoSuchMethodException(errorClass = "INTERNAL_ERROR",
messageParameters = Array(
s"""A method named "$name" is not declared in any enclosing class nor any supertype"""))
}
def constructorNotFoundError(cls: String): Throwable = {
new RuntimeException(s"Couldn't find a valid constructor on $cls")
}
def primaryConstructorNotFoundError(cls: Class[_]): Throwable = {
new RuntimeException(s"Couldn't find a primary constructor on $cls")
}
def unsupportedNaturalJoinTypeError(joinType: JoinType): Throwable = {
new RuntimeException("Unsupported natural join type " + joinType)
}
def notExpectedUnresolvedEncoderError(attr: AttributeReference): Throwable = {
new RuntimeException(s"Unresolved encoder expected, but $attr was found.")
}
def unsupportedEncoderError(): Throwable = {
new RuntimeException("Only expression encoders are supported for now.")
}
def notOverrideExpectedMethodsError(className: String, m1: String, m2: String): Throwable = {
new RuntimeException(s"$className must override either $m1 or $m2")
}
def failToConvertValueToJsonError(value: AnyRef, cls: Class[_], dataType: DataType): Throwable = {
new RuntimeException(s"Failed to convert value $value (class of $cls) " +
s"with the type of $dataType to JSON.")
}
def unexpectedOperatorInCorrelatedSubquery(op: LogicalPlan, pos: String = ""): Throwable = {
new RuntimeException(s"Unexpected operator $op in correlated subquery" + pos)
}
def unreachableError(err: String = ""): Throwable = {
new RuntimeException("This line should be unreachable" + err)
}
def unsupportedRoundingMode(roundMode: BigDecimal.RoundingMode.Value): Throwable = {
new RuntimeException(s"Not supported rounding mode: $roundMode")
}
def resolveCannotHandleNestedSchema(plan: LogicalPlan): Throwable = {
new RuntimeException(s"Can not handle nested schema yet... plan $plan")
}
def inputExternalRowCannotBeNullError(): RuntimeException = {
new RuntimeException("The input external row cannot be null.")
}
def fieldCannotBeNullMsg(index: Int, fieldName: String): String = {
s"The ${index}th field '$fieldName' of input row cannot be null."
}
def fieldCannotBeNullError(index: Int, fieldName: String): RuntimeException = {
new RuntimeException(fieldCannotBeNullMsg(index, fieldName))
}
def unableToCreateDatabaseAsFailedToCreateDirectoryError(
dbDefinition: CatalogDatabase, e: IOException): Throwable = {
new SparkException(s"Unable to create database ${dbDefinition.name} as failed " +
s"to create its directory ${dbDefinition.locationUri}", e)
}
def unableToDropDatabaseAsFailedToDeleteDirectoryError(
dbDefinition: CatalogDatabase, e: IOException): Throwable = {
new SparkException(s"Unable to drop database ${dbDefinition.name} as failed " +
s"to delete its directory ${dbDefinition.locationUri}", e)
}
def unableToCreateTableAsFailedToCreateDirectoryError(
table: String, defaultTableLocation: Path, e: IOException): Throwable = {
new SparkException(s"Unable to create table $table as failed " +
s"to create its directory $defaultTableLocation", e)
}
def unableToDeletePartitionPathError(partitionPath: Path, e: IOException): Throwable = {
new SparkException(s"Unable to delete partition path $partitionPath", e)
}
def unableToDropTableAsFailedToDeleteDirectoryError(
table: String, dir: Path, e: IOException): Throwable = {
new SparkException(s"Unable to drop table $table as failed " +
s"to delete its directory $dir", e)
}
def unableToRenameTableAsFailedToRenameDirectoryError(
oldName: String, newName: String, oldDir: Path, e: IOException): Throwable = {
new SparkException(s"Unable to rename table $oldName to $newName as failed " +
s"to rename its directory $oldDir", e)
}
def unableToCreatePartitionPathError(partitionPath: Path, e: IOException): Throwable = {
new SparkException(s"Unable to create partition path $partitionPath", e)
}
def unableToRenamePartitionPathError(oldPartPath: Path, e: IOException): Throwable = {
new SparkException(s"Unable to rename partition path $oldPartPath", e)
}
def methodNotImplementedError(methodName: String): Throwable = {
new UnsupportedOperationException(s"$methodName is not implemented")
}
def tableStatsNotSpecifiedError(): Throwable = {
new IllegalStateException("table stats must be specified.")
}
def arithmeticOverflowError(e: ArithmeticException): ArithmeticException = {
new ArithmeticException(s"${e.getMessage}. If necessary set ${SQLConf.ANSI_ENABLED.key} " +
s"to false to bypass this error.")
}
def arithmeticOverflowError(message: String, hint: String = ""): ArithmeticException = {
val alternative = if (hint.nonEmpty) s" To return NULL instead, use '$hint'." else ""
new ArithmeticException(s"$message.$alternative If necessary set " +
s"${SQLConf.ANSI_ENABLED.key} to false (except for ANSI interval type) to bypass this error.")
}
def unaryMinusCauseOverflowError(originValue: AnyVal): ArithmeticException = {
arithmeticOverflowError(s"- $originValue caused overflow")
}
def binaryArithmeticCauseOverflowError(
eval1: Short, symbol: String, eval2: Short): ArithmeticException = {
arithmeticOverflowError(s"$eval1 $symbol $eval2 caused overflow")
}
def failedSplitSubExpressionMsg(length: Int): String = {
"Failed to split subexpression code into small functions because " +
s"the parameter length of at least one split function went over the JVM limit: $length"
}
def failedSplitSubExpressionError(length: Int): Throwable = {
new IllegalStateException(failedSplitSubExpressionMsg(length))
}
def failedToCompileMsg(e: Exception): String = {
s"failed to compile: $e"
}
def internalCompilerError(e: InternalCompilerException): Throwable = {
new InternalCompilerException(failedToCompileMsg(e), e)
}
def compilerError(e: CompileException): Throwable = {
new CompileException(failedToCompileMsg(e), e.getLocation)
}
def unsupportedTableChangeError(e: IllegalArgumentException): Throwable = {
new SparkException(s"Unsupported table change: ${e.getMessage}", e)
}
def notADatasourceRDDPartitionError(split: Partition): Throwable = {
new SparkException(s"[BUG] Not a DataSourceRDDPartition: $split")
}
def dataPathNotSpecifiedError(): Throwable = {
new IllegalArgumentException("'path' is not specified")
}
def createStreamingSourceNotSpecifySchemaError(): Throwable = {
new IllegalArgumentException(
s"""
|Schema must be specified when creating a streaming source DataFrame. If some
|files already exist in the directory, then depending on the file format you
|may be able to create a static DataFrame on that directory with
|'spark.read.load(directory)' and infer schema from it.
""".stripMargin)
}
def streamedOperatorUnsupportedByDataSourceError(
className: String, operator: String): Throwable = {
new UnsupportedOperationException(
s"Data source $className does not support streamed $operator")
}
def multiplePathsSpecifiedError(allPaths: Seq[String]): Throwable = {
new IllegalArgumentException("Expected exactly one path to be specified, but " +
s"got: ${allPaths.mkString(", ")}")
}
def failedToFindDataSourceError(provider: String, error: Throwable): Throwable = {
new ClassNotFoundException(
s"""
|Failed to find data source: $provider. Please find packages at
|http://spark.apache.org/third-party-projects.html
""".stripMargin, error)
}
def removedClassInSpark2Error(className: String, e: Throwable): Throwable = {
new ClassNotFoundException(s"$className was removed in Spark 2.0. " +
"Please check if your library is compatible with Spark 2.0", e)
}
def incompatibleDataSourceRegisterError(e: Throwable): Throwable = {
new SparkClassNotFoundException("INCOMPATIBLE_DATASOURCE_REGISTER", Array(e.getMessage), e)
}
def unrecognizedFileFormatError(format: String): Throwable = {
new IllegalStateException(s"unrecognized format $format")
}
def sparkUpgradeInReadingDatesError(
format: String, config: String, option: String): SparkUpgradeException = {
new SparkUpgradeException("3.0",
s"""
|reading dates before 1582-10-15 or timestamps before 1900-01-01T00:00:00Z from $format
|files can be ambiguous, as the files may be written by Spark 2.x or legacy versions of
|Hive, which uses a legacy hybrid calendar that is different from Spark 3.0+'s Proleptic
|Gregorian calendar. See more details in SPARK-31404. You can set the SQL config
|'$config' or the datasource option '$option' to 'LEGACY' to rebase the datetime values
|w.r.t. the calendar difference during reading. To read the datetime values as it is,
|set the SQL config '$config' or the datasource option '$option' to 'CORRECTED'.
""".stripMargin, null)
}
def sparkUpgradeInWritingDatesError(format: String, config: String): SparkUpgradeException = {
new SparkUpgradeException("3.0",
s"""
|writing dates before 1582-10-15 or timestamps before 1900-01-01T00:00:00Z into $format
|files can be dangerous, as the files may be read by Spark 2.x or legacy versions of Hive
|later, which uses a legacy hybrid calendar that is different from Spark 3.0+'s Proleptic
|Gregorian calendar. See more details in SPARK-31404. You can set $config to 'LEGACY' to
|rebase the datetime values w.r.t. the calendar difference during writing, to get maximum
|interoperability. Or set $config to 'CORRECTED' to write the datetime values as it is,
|if you are 100% sure that the written files will only be read by Spark 3.0+ or other
|systems that use Proleptic Gregorian calendar.
""".stripMargin, null)
}
def buildReaderUnsupportedForFileFormatError(format: String): Throwable = {
new UnsupportedOperationException(s"buildReader is not supported for $format")
}
def jobAbortedError(cause: Throwable): Throwable = {
new SparkException("Job aborted.", cause)
}
def taskFailedWhileWritingRowsError(cause: Throwable): Throwable = {
new SparkException("Task failed while writing rows.", cause)
}
def readCurrentFileNotFoundError(e: FileNotFoundException): Throwable = {
new FileNotFoundException(
s"""
|${e.getMessage}\\n
|It is possible the underlying files have been updated. You can explicitly invalidate
|the cache in Spark by running 'REFRESH TABLE tableName' command in SQL or by
|recreating the Dataset/DataFrame involved.
""".stripMargin)
}
def unsupportedSaveModeError(saveMode: String, pathExists: Boolean): Throwable = {
new IllegalStateException(s"unsupported save mode $saveMode ($pathExists)")
}
def cannotClearOutputDirectoryError(staticPrefixPath: Path): Throwable = {
new IOException(s"Unable to clear output directory $staticPrefixPath prior to writing to it")
}
def cannotClearPartitionDirectoryError(path: Path): Throwable = {
new IOException(s"Unable to clear partition directory $path prior to writing to it")
}
def failedToCastValueToDataTypeForPartitionColumnError(
value: String, dataType: DataType, columnName: String): Throwable = {
new RuntimeException(s"Failed to cast value `$value` to " +
s"`$dataType` for partition column `$columnName`")
}
def endOfStreamError(): Throwable = {
new NoSuchElementException("End of stream")
}
def fallbackV1RelationReportsInconsistentSchemaError(
v2Schema: StructType, v1Schema: StructType): Throwable = {
new IllegalArgumentException(
"The fallback v1 relation reports inconsistent schema:\\n" +
"Schema of v2 scan: " + v2Schema + "\\n" +
"Schema of v1 relation: " + v1Schema)
}
def noRecordsFromEmptyDataReaderError(): Throwable = {
new IOException("No records should be returned from EmptyDataReader")
}
def fileNotFoundError(e: FileNotFoundException): Throwable = {
new FileNotFoundException(
e.getMessage + "\\n" +
"It is possible the underlying files have been updated. " +
"You can explicitly invalidate the cache in Spark by " +
"recreating the Dataset/DataFrame involved.")
}
def unsupportedSchemaColumnConvertError(
filePath: String,
column: String,
logicalType: String,
physicalType: String,
e: Exception): Throwable = {
val message = "Parquet column cannot be converted in " +
s"file $filePath. Column: $column, " +
s"Expected: $logicalType, Found: $physicalType"
new QueryExecutionException(message, e)
}
def cannotReadFilesError(
e: Throwable,
path: String): Throwable = {
val message = s"Encountered error while reading file $path. Details: "
new QueryExecutionException(message, e)
}
def cannotCreateColumnarReaderError(): Throwable = {
new UnsupportedOperationException("Cannot create columnar reader.")
}
def invalidNamespaceNameError(namespace: Array[String]): Throwable = {
new IllegalArgumentException(s"Invalid namespace name: ${namespace.quoted}")
}
def unsupportedPartitionTransformError(transform: Transform): Throwable = {
new UnsupportedOperationException(
s"SessionCatalog does not support partition transform: $transform")
}
def missingDatabaseLocationError(): Throwable = {
new IllegalArgumentException("Missing database location")
}
def cannotRemoveReservedPropertyError(property: String): Throwable = {
new UnsupportedOperationException(s"Cannot remove reserved property: $property")
}
def namespaceNotEmptyError(namespace: Array[String]): Throwable = {
new IllegalStateException(s"Namespace ${namespace.quoted} is not empty")
}
def writingJobFailedError(cause: Throwable): Throwable = {
new SparkException("Writing job failed.", cause)
}
def writingJobAbortedError(e: Throwable): Throwable = {
new SparkException(
errorClass = "WRITING_JOB_ABORTED",
messageParameters = Array.empty,
cause = e)
}
def commitDeniedError(
partId: Int, taskId: Long, attemptId: Int, stageId: Int, stageAttempt: Int): Throwable = {
val message = s"Commit denied for partition $partId (task $taskId, attempt $attemptId, " +
s"stage $stageId.$stageAttempt)"
new CommitDeniedException(message, stageId, partId, attemptId)
}
def unsupportedTableWritesError(ident: Identifier): Throwable = {
new SparkException(
s"Table implementation does not support writes: ${ident.quoted}")
}
def cannotCreateJDBCTableWithPartitionsError(): Throwable = {
new UnsupportedOperationException("Cannot create JDBC table with partition")
}
def unsupportedUserSpecifiedSchemaError(): Throwable = {
new UnsupportedOperationException("user-specified schema")
}
def writeUnsupportedForBinaryFileDataSourceError(): Throwable = {
new UnsupportedOperationException("Write is not supported for binary file data source")
}
def fileLengthExceedsMaxLengthError(status: FileStatus, maxLength: Int): Throwable = {
new SparkException(
s"The length of ${status.getPath} is ${status.getLen}, " +
s"which exceeds the max length allowed: ${maxLength}.")
}
def unsupportedFieldNameError(fieldName: String): Throwable = {
new RuntimeException(s"Unsupported field name: ${fieldName}")
}
def cannotSpecifyBothJdbcTableNameAndQueryError(
jdbcTableName: String, jdbcQueryString: String): Throwable = {
new IllegalArgumentException(
s"Both '$jdbcTableName' and '$jdbcQueryString' can not be specified at the same time.")
}
def missingJdbcTableNameAndQueryError(
jdbcTableName: String, jdbcQueryString: String): Throwable = {
new IllegalArgumentException(
s"Option '$jdbcTableName' or '$jdbcQueryString' is required."
)
}
def emptyOptionError(optionName: String): Throwable = {
new IllegalArgumentException(s"Option `$optionName` can not be empty.")
}
def invalidJdbcTxnIsolationLevelError(jdbcTxnIsolationLevel: String, value: String): Throwable = {
new IllegalArgumentException(
s"Invalid value `$value` for parameter `$jdbcTxnIsolationLevel`. This can be " +
"`NONE`, `READ_UNCOMMITTED`, `READ_COMMITTED`, `REPEATABLE_READ` or `SERIALIZABLE`.")
}
def cannotGetJdbcTypeError(dt: DataType): Throwable = {
new IllegalArgumentException(s"Can't get JDBC type for ${dt.catalogString}")
}
def unrecognizedSqlTypeError(sqlType: Int): Throwable = {
new SparkSQLException(errorClass = "UNRECOGNIZED_SQL_TYPE", Array(sqlType.toString))
}
def unsupportedJdbcTypeError(content: String): Throwable = {
new SQLException(s"Unsupported type $content")
}
def unsupportedArrayElementTypeBasedOnBinaryError(dt: DataType): Throwable = {
new IllegalArgumentException(s"Unsupported array element " +
s"type ${dt.catalogString} based on binary")
}
def nestedArraysUnsupportedError(): Throwable = {
new IllegalArgumentException("Nested arrays unsupported")
}
def cannotTranslateNonNullValueForFieldError(pos: Int): Throwable = {
new IllegalArgumentException(s"Can't translate non-null value for field $pos")
}
def invalidJdbcNumPartitionsError(n: Int, jdbcNumPartitions: String): Throwable = {
new IllegalArgumentException(
s"Invalid value `$n` for parameter `$jdbcNumPartitions` in table writing " +
"via JDBC. The minimum value is 1.")
}
def transactionUnsupportedByJdbcServerError(): Throwable = {
new SparkSQLFeatureNotSupportedException(errorClass = "UNSUPPORTED_TRANSACTION_BY_JDBC_SERVER",
Array.empty)
}
def dataTypeUnsupportedYetError(dataType: DataType): Throwable = {
new UnsupportedOperationException(s"$dataType is not supported yet.")
}
def unsupportedOperationForDataTypeError(dataType: DataType): Throwable = {
new UnsupportedOperationException(s"DataType: ${dataType.catalogString}")
}
def inputFilterNotFullyConvertibleError(owner: String): Throwable = {
new SparkException(s"The input filter of $owner should be fully convertible.")
}
def cannotReadFooterForFileError(file: Path, e: IOException): Throwable = {
new SparkException(s"Could not read footer for file: $file", e)
}
def cannotReadFooterForFileError(file: FileStatus, e: RuntimeException): Throwable = {
new IOException(s"Could not read footer for file: $file", e)
}
def foundDuplicateFieldInCaseInsensitiveModeError(
requiredFieldName: String, matchedOrcFields: String): Throwable = {
new RuntimeException(
s"""
|Found duplicate field(s) "$requiredFieldName": $matchedOrcFields
|in case-insensitive mode
""".stripMargin.replaceAll("\\n", " "))
}
def failedToMergeIncompatibleSchemasError(
left: StructType, right: StructType, e: Throwable): Throwable = {
new SparkException(s"Failed to merge incompatible schemas $left and $right", e)
}
def ddlUnsupportedTemporarilyError(ddl: String): Throwable = {
new UnsupportedOperationException(s"$ddl is not supported temporarily.")
}
def operatingOnCanonicalizationPlanError(): Throwable = {
new IllegalStateException("operating on canonicalization plan")
}
def executeBroadcastTimeoutError(timeout: Long, ex: Option[TimeoutException]): Throwable = {
new SparkException(
s"""
|Could not execute broadcast in $timeout secs. You can increase the timeout
|for broadcasts via ${SQLConf.BROADCAST_TIMEOUT.key} or disable broadcast join
|by setting ${SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key} to -1
""".stripMargin.replaceAll("\\n", " "), ex.getOrElse(null))
}
def cannotCompareCostWithTargetCostError(cost: String): Throwable = {
new IllegalArgumentException(s"Could not compare cost with $cost")
}
def unsupportedDataTypeError(dt: String): Throwable = {
new UnsupportedOperationException(s"Unsupported data type: ${dt}")
}
def notSupportTypeError(dataType: DataType): Throwable = {
new Exception(s"not support type: $dataType")
}
def notSupportNonPrimitiveTypeError(): Throwable = {
new RuntimeException("Not support non-primitive type now")
}
def unsupportedTypeError(dataType: DataType): Throwable = {
new Exception(s"Unsupported type: ${dataType.catalogString}")
}
def useDictionaryEncodingWhenDictionaryOverflowError(): Throwable = {
new IllegalStateException(
"Dictionary encoding should not be used because of dictionary overflow.")
}
def endOfIteratorError(): Throwable = {
new NoSuchElementException("End of the iterator")
}
def cannotAllocateMemoryToGrowBytesToBytesMapError(): Throwable = {
new IOException("Could not allocate memory to grow BytesToBytesMap")
}
def cannotAcquireMemoryToBuildLongHashedRelationError(size: Long, got: Long): Throwable = {
new SparkException(s"Can't acquire $size bytes memory to build hash relation, " +
s"got $got bytes")
}
def cannotAcquireMemoryToBuildUnsafeHashedRelationError(): Throwable = {
new SparkOutOfMemoryError("There is not enough memory to build hash map")
}
def rowLargerThan256MUnsupportedError(): Throwable = {
new UnsupportedOperationException("Does not support row that is larger than 256M")
}
def cannotBuildHashedRelationWithUniqueKeysExceededError(): Throwable = {
new UnsupportedOperationException(
"Cannot build HashedRelation with more than 1/3 billions unique keys")
}
def cannotBuildHashedRelationLargerThan8GError(): Throwable = {
new UnsupportedOperationException(
"Can not build a HashedRelation that is larger than 8G")
}
def failedToPushRowIntoRowQueueError(rowQueue: String): Throwable = {
new SparkException(s"failed to push a row into $rowQueue")
}
def unexpectedWindowFunctionFrameError(frame: String): Throwable = {
new RuntimeException(s"Unexpected window function frame $frame.")
}
def cannotParseStatisticAsPercentileError(
stats: String, e: NumberFormatException): Throwable = {
new IllegalArgumentException(s"Unable to parse $stats as a percentile", e)
}
def statisticNotRecognizedError(stats: String): Throwable = {
new IllegalArgumentException(s"$stats is not a recognised statistic")
}
def unknownColumnError(unknownColumn: String): Throwable = {
new IllegalArgumentException(s"Unknown column: $unknownColumn")
}
def unexpectedAccumulableUpdateValueError(o: Any): Throwable = {
new IllegalArgumentException(s"Unexpected: $o")
}
def unscaledValueTooLargeForPrecisionError(): Throwable = {
new ArithmeticException("Unscaled value too large for precision. " +
s"If necessary set ${SQLConf.ANSI_ENABLED.key} to false to bypass this error.")
}
def decimalPrecisionExceedsMaxPrecisionError(precision: Int, maxPrecision: Int): Throwable = {
new ArithmeticException(
s"Decimal precision $precision exceeds max precision $maxPrecision")
}
def outOfDecimalTypeRangeError(str: UTF8String): Throwable = {
new ArithmeticException(s"out of decimal type range: $str")
}
def unsupportedArrayTypeError(clazz: Class[_]): Throwable = {
new RuntimeException(s"Do not support array of type $clazz.")
}
def unsupportedJavaTypeError(clazz: Class[_]): Throwable = {
new RuntimeException(s"Do not support type $clazz.")
}
def failedParsingStructTypeError(raw: String): Throwable = {
new RuntimeException(s"Failed parsing ${StructType.simpleString}: $raw")
}
def failedMergingFieldsError(leftName: String, rightName: String, e: Throwable): Throwable = {
new SparkException(s"Failed to merge fields '$leftName' and '$rightName'. ${e.getMessage}")
}
def cannotMergeDecimalTypesWithIncompatiblePrecisionAndScaleError(
leftPrecision: Int, rightPrecision: Int, leftScale: Int, rightScale: Int): Throwable = {
new SparkException("Failed to merge decimal types with incompatible " +
s"precision $leftPrecision and $rightPrecision & scale $leftScale and $rightScale")
}
def cannotMergeDecimalTypesWithIncompatiblePrecisionError(
leftPrecision: Int, rightPrecision: Int): Throwable = {
new SparkException("Failed to merge decimal types with incompatible " +
s"precision $leftPrecision and $rightPrecision")
}
def cannotMergeDecimalTypesWithIncompatibleScaleError(
leftScale: Int, rightScale: Int): Throwable = {
new SparkException("Failed to merge decimal types with incompatible " +
s"scale $leftScale and $rightScale")
}
def cannotMergeIncompatibleDataTypesError(left: DataType, right: DataType): Throwable = {
new SparkException(s"Failed to merge incompatible data types ${left.catalogString}" +
s" and ${right.catalogString}")
}
def exceedMapSizeLimitError(size: Int): Throwable = {
new RuntimeException(s"Unsuccessful attempt to build maps with $size elements " +
s"due to exceeding the map size limit ${ByteArrayMethods.MAX_ROUNDED_ARRAY_LENGTH}.")
}
def duplicateMapKeyFoundError(key: Any): Throwable = {
new RuntimeException(s"Duplicate map key $key was found, please check the input " +
"data. If you want to remove the duplicated keys, you can set " +
s"${SQLConf.MAP_KEY_DEDUP_POLICY.key} to ${SQLConf.MapKeyDedupPolicy.LAST_WIN} so that " +
"the key inserted at last takes precedence.")
}
def mapDataKeyArrayLengthDiffersFromValueArrayLengthError(): Throwable = {
new RuntimeException("The key array and value array of MapData must have the same length.")
}
def fieldDiffersFromDerivedLocalDateError(
field: ChronoField, actual: Int, expected: Int, candidate: LocalDate): Throwable = {
new DateTimeException(s"Conflict found: Field $field $actual differs from" +
s" $field $expected derived from $candidate")
}
def failToParseDateTimeInNewParserError(s: String, e: Throwable): Throwable = {
new SparkUpgradeException("3.0", s"Fail to parse '$s' in the new parser. You can " +
s"set ${SQLConf.LEGACY_TIME_PARSER_POLICY.key} to LEGACY to restore the behavior " +
s"before Spark 3.0, or set to CORRECTED and treat it as an invalid datetime string.", e)
}
def failToFormatDateTimeInNewFormatterError(
resultCandidate: String, e: Throwable): Throwable = {
new SparkUpgradeException("3.0",
s"""
|Fail to format it to '$resultCandidate' in the new formatter. You can set
|${SQLConf.LEGACY_TIME_PARSER_POLICY.key} to LEGACY to restore the behavior before
|Spark 3.0, or set to CORRECTED and treat it as an invalid datetime string.
""".stripMargin.replaceAll("\\n", " "), e)
}
def failToRecognizePatternAfterUpgradeError(pattern: String, e: Throwable): Throwable = {
new SparkUpgradeException("3.0", s"Fail to recognize '$pattern' pattern in the" +
s" DateTimeFormatter. 1) You can set ${SQLConf.LEGACY_TIME_PARSER_POLICY.key} to LEGACY" +
s" to restore the behavior before Spark 3.0. 2) You can form a valid datetime pattern" +
s" with the guide from https://spark.apache.org/docs/latest/sql-ref-datetime-pattern.html",
e)
}
def failToRecognizePatternError(pattern: String, e: Throwable): Throwable = {
new RuntimeException(s"Fail to recognize '$pattern' pattern in the" +
" DateTimeFormatter. You can form a valid datetime pattern" +
" with the guide from https://spark.apache.org/docs/latest/sql-ref-datetime-pattern.html",
e)
}
def cannotCastToDateTimeError(value: Any, to: DataType): Throwable = {
new DateTimeException(s"Cannot cast $value to $to. To return NULL instead, use 'try_cast'. " +
s"If necessary set ${SQLConf.ANSI_ENABLED.key} to false to bypass this error.")
}
def registeringStreamingQueryListenerError(e: Exception): Throwable = {
new SparkException("Exception when registering StreamingQueryListener", e)
}
def concurrentQueryInstanceError(): Throwable = {
new SparkConcurrentModificationException("CONCURRENT_QUERY", Array.empty)
}
def cannotParseJsonArraysAsStructsError(): Throwable = {
new RuntimeException("Parsing JSON arrays as structs is forbidden.")
}
def cannotParseStringAsDataTypeError(parser: JsonParser, token: JsonToken, dataType: DataType)
: Throwable = {
new RuntimeException(
s"Cannot parse field name ${parser.getCurrentName}, " +
s"field value ${parser.getText}, " +
s"[$token] as target spark data type [$dataType].")
}
def cannotParseStringAsDataTypeError(pattern: String, value: String, dataType: DataType)
: Throwable = {
new RuntimeException(
s"Cannot parse field value ${value} for pattern ${pattern} " +
s"as target spark data type [$dataType].")
}
def failToParseEmptyStringForDataTypeError(dataType: DataType): Throwable = {
new RuntimeException(
s"Failed to parse an empty string for data type ${dataType.catalogString}")
}
def failToParseValueForDataTypeError(parser: JsonParser, token: JsonToken, dataType: DataType)
: Throwable = {
new RuntimeException(
s"Failed to parse field name ${parser.getCurrentName}, " +
s"field value ${parser.getText}, " +
s"[$token] to target spark data type [$dataType].")
}
def rootConverterReturnNullError(): Throwable = {
new RuntimeException("Root converter returned null")
}
def cannotHaveCircularReferencesInBeanClassError(clazz: Class[_]): Throwable = {
new UnsupportedOperationException(
"Cannot have circular references in bean class, but got the circular reference " +
s"of class $clazz")
}
def cannotHaveCircularReferencesInClassError(t: String): Throwable = {
new UnsupportedOperationException(
s"cannot have circular references in class, but got the circular reference of class $t")
}
def cannotUseInvalidJavaIdentifierAsFieldNameError(
fieldName: String, walkedTypePath: WalkedTypePath): Throwable = {
new UnsupportedOperationException(s"`$fieldName` is not a valid identifier of " +
s"Java and cannot be used as field name\\n$walkedTypePath")
}
def cannotFindEncoderForTypeError(
tpe: String, walkedTypePath: WalkedTypePath): Throwable = {
new UnsupportedOperationException(s"No Encoder found for $tpe\\n$walkedTypePath")
}
def attributesForTypeUnsupportedError(schema: Schema): Throwable = {
new UnsupportedOperationException(s"Attributes for type $schema is not supported")
}
def schemaForTypeUnsupportedError(tpe: String): Throwable = {
new UnsupportedOperationException(s"Schema for type $tpe is not supported")
}
def cannotFindConstructorForTypeError(tpe: String): Throwable = {
new UnsupportedOperationException(
s"""
|Unable to find constructor for $tpe.
|This could happen if $tpe is an interface, or a trait without companion object
|constructor.
""".stripMargin.replaceAll("\\n", " "))
}
def paramExceedOneCharError(paramName: String): Throwable = {
new RuntimeException(s"$paramName cannot be more than one character")
}
def paramIsNotIntegerError(paramName: String, value: String): Throwable = {
new RuntimeException(s"$paramName should be an integer. Found $value")
}
def paramIsNotBooleanValueError(paramName: String): Throwable = {
new Exception(s"$paramName flag can be true or false")
}
def foundNullValueForNotNullableFieldError(name: String): Throwable = {
new RuntimeException(s"null value found but field $name is not nullable.")
}
def malformedCSVRecordError(): Throwable = {
new RuntimeException("Malformed CSV record")
}
def elementsOfTupleExceedLimitError(): Throwable = {
new UnsupportedOperationException("Due to Scala's limited support of tuple, " +
"tuple with more than 22 elements are not supported.")
}
def expressionDecodingError(e: Exception, expressions: Seq[Expression]): Throwable = {
new RuntimeException(s"Error while decoding: $e\\n" +
s"${expressions.map(_.simpleString(SQLConf.get.maxToStringFields)).mkString("\\n")}", e)
}
def expressionEncodingError(e: Exception, expressions: Seq[Expression]): Throwable = {
new RuntimeException(s"Error while encoding: $e\\n" +
s"${expressions.map(_.simpleString(SQLConf.get.maxToStringFields)).mkString("\\n")}", e)
}
def classHasUnexpectedSerializerError(clsName: String, objSerializer: Expression): Throwable = {
new RuntimeException(s"class $clsName has unexpected serializer: $objSerializer")
}
def cannotGetOuterPointerForInnerClassError(innerCls: Class[_]): Throwable = {
new RuntimeException(s"Failed to get outer pointer for ${innerCls.getName}")
}
def userDefinedTypeNotAnnotatedAndRegisteredError(udt: UserDefinedType[_]): Throwable = {
new SparkException(s"${udt.userClass.getName} is not annotated with " +
"SQLUserDefinedType nor registered with UDTRegistration.}")
}
def invalidInputSyntaxForBooleanError(s: UTF8String): UnsupportedOperationException = {
new UnsupportedOperationException(s"invalid input syntax for type boolean: $s. " +
s"To return NULL instead, use 'try_cast'. If necessary set ${SQLConf.ANSI_ENABLED.key} " +
"to false to bypass this error.")
}
def unsupportedOperandTypeForSizeFunctionError(dataType: DataType): Throwable = {
new UnsupportedOperationException(
s"The size function doesn't support the operand type ${dataType.getClass.getCanonicalName}")
}
def unexpectedValueForStartInFunctionError(prettyName: String): RuntimeException = {
new RuntimeException(
s"Unexpected value for start in function $prettyName: SQL array indices start at 1.")
}
def unexpectedValueForLengthInFunctionError(prettyName: String): RuntimeException = {
new RuntimeException(s"Unexpected value for length in function $prettyName: " +
"length must be greater than or equal to 0.")
}
def sqlArrayIndexNotStartAtOneError(): ArrayIndexOutOfBoundsException = {
new ArrayIndexOutOfBoundsException("SQL array indices start at 1")
}
def concatArraysWithElementsExceedLimitError(numberOfElements: Long): Throwable = {
new RuntimeException(
s"""
|Unsuccessful try to concat arrays with $numberOfElements
|elements due to exceeding the array size limit
|${ByteArrayMethods.MAX_ROUNDED_ARRAY_LENGTH}.
""".stripMargin.replaceAll("\\n", " "))
}
def flattenArraysWithElementsExceedLimitError(numberOfElements: Long): Throwable = {
new RuntimeException(
s"""
|Unsuccessful try to flatten an array of arrays with $numberOfElements
|elements due to exceeding the array size limit
|${ByteArrayMethods.MAX_ROUNDED_ARRAY_LENGTH}.
""".stripMargin.replaceAll("\\n", " "))
}
def createArrayWithElementsExceedLimitError(count: Any): RuntimeException = {
new RuntimeException(
s"""
|Unsuccessful try to create array with $count elements
|due to exceeding the array size limit
|${ByteArrayMethods.MAX_ROUNDED_ARRAY_LENGTH}.
""".stripMargin.replaceAll("\\n", " "))
}
def unionArrayWithElementsExceedLimitError(length: Int): Throwable = {
new RuntimeException(
s"""
|Unsuccessful try to union arrays with $length
|elements due to exceeding the array size limit
|${ByteArrayMethods.MAX_ROUNDED_ARRAY_LENGTH}.
""".stripMargin.replaceAll("\\n", " "))
}
def initialTypeNotTargetDataTypeError(dataType: DataType, target: String): Throwable = {
new UnsupportedOperationException(s"Initial type ${dataType.catalogString} must be a $target")
}
def initialTypeNotTargetDataTypesError(dataType: DataType): Throwable = {
new UnsupportedOperationException(
s"Initial type ${dataType.catalogString} must be " +
s"an ${ArrayType.simpleString}, a ${StructType.simpleString} or a ${MapType.simpleString}")
}
def cannotConvertColumnToJSONError(name: String, dataType: DataType): Throwable = {
new UnsupportedOperationException(
s"Unable to convert column $name of type ${dataType.catalogString} to JSON.")
}
def malformedRecordsDetectedInSchemaInferenceError(e: Throwable): Throwable = {
new SparkException("Malformed records are detected in schema inference. " +
s"Parse Mode: ${FailFastMode.name}.", e)
}
def malformedJSONError(): Throwable = {
new SparkException("Malformed JSON")
}
def malformedRecordsDetectedInSchemaInferenceError(dataType: DataType): Throwable = {
new SparkException(
s"""
|Malformed records are detected in schema inference.
|Parse Mode: ${FailFastMode.name}. Reasons: Failed to infer a common schema.
|Struct types are expected, but `${dataType.catalogString}` was found.
""".stripMargin.replaceAll("\\n", " "))
}
def cannotRewriteDomainJoinWithConditionsError(
conditions: Seq[Expression], d: DomainJoin): Throwable = {
new IllegalStateException(
s"Unable to rewrite domain join with conditions: $conditions\\n$d")
}
def decorrelateInnerQueryThroughPlanUnsupportedError(plan: LogicalPlan): Throwable = {
new UnsupportedOperationException(
s"Decorrelate inner query through ${plan.nodeName} is not supported.")
}
def methodCalledInAnalyzerNotAllowedError(): Throwable = {
new RuntimeException("This method should not be called in the analyzer")
}
def cannotSafelyMergeSerdePropertiesError(
props1: Map[String, String],
props2: Map[String, String],
conflictKeys: Set[String]): Throwable = {
new UnsupportedOperationException(
s"""
|Cannot safely merge SERDEPROPERTIES:
|${props1.map { case (k, v) => s"$k=$v" }.mkString("{", ",", "}")}
|${props2.map { case (k, v) => s"$k=$v" }.mkString("{", ",", "}")}
|The conflict keys: ${conflictKeys.mkString(", ")}
|""".stripMargin)
}
def pairUnsupportedAtFunctionError(
r1: ValueInterval, r2: ValueInterval, function: String): Throwable = {
new UnsupportedOperationException(s"Not supported pair: $r1, $r2 at $function()")
}
def onceStrategyIdempotenceIsBrokenForBatchError[TreeType <: TreeNode[_]](
batchName: String, plan: TreeType, reOptimized: TreeType): Throwable = {
new RuntimeException(
s"""
|Once strategy's idempotence is broken for batch $batchName
|${sideBySide(plan.treeString, reOptimized.treeString).mkString("\\n")}
""".stripMargin)
}
def structuralIntegrityOfInputPlanIsBrokenInClassError(className: String): Throwable = {
new RuntimeException("The structural integrity of the input plan is broken in " +
s"$className.")
}
def structuralIntegrityIsBrokenAfterApplyingRuleError(
ruleName: String, batchName: String): Throwable = {
new RuntimeException(s"After applying rule $ruleName in batch $batchName, " +
"the structural integrity of the plan is broken.")
}
def ruleIdNotFoundForRuleError(ruleName: String): Throwable = {
new NoSuchElementException(s"Rule id not found for $ruleName")
}
def cannotCreateArrayWithElementsExceedLimitError(
numElements: Long, additionalErrorMessage: String): Throwable = {
new RuntimeException(
s"""
|Cannot create array with $numElements
|elements of data due to exceeding the limit
|${ByteArrayMethods.MAX_ROUNDED_ARRAY_LENGTH} elements for ArrayData.
|$additionalErrorMessage
""".stripMargin.replaceAll("\\n", " "))
}
def indexOutOfBoundsOfArrayDataError(idx: Int): Throwable = {
new SparkIndexOutOfBoundsException(errorClass = "INDEX_OUT_OF_BOUNDS", Array(idx.toString))
}
def malformedRecordsDetectedInRecordParsingError(e: BadRecordException): Throwable = {
new SparkException("Malformed records are detected in record parsing. " +
s"Parse Mode: ${FailFastMode.name}. To process malformed records as null " +
"result, try setting the option 'mode' as 'PERMISSIVE'.", e)
}
def remoteOperationsUnsupportedError(): Throwable = {
new RuntimeException("Remote operations not supported")
}
def invalidKerberosConfigForHiveServer2Error(): Throwable = {
new IOException(
"HiveServer2 Kerberos principal or keytab is not correctly configured")
}
def parentSparkUIToAttachTabNotFoundError(): Throwable = {
new SparkException("Parent SparkUI to attach this tab to not found!")
}
def inferSchemaUnsupportedForHiveError(): Throwable = {
new UnsupportedOperationException("inferSchema is not supported for hive data source.")
}
def requestedPartitionsMismatchTablePartitionsError(
table: CatalogTable, partition: Map[String, Option[String]]): Throwable = {
new SparkException(
s"""
|Requested partitioning does not match the ${table.identifier.table} table:
|Requested partitions: ${partition.keys.mkString(",")}
|Table partitions: ${table.partitionColumnNames.mkString(",")}
""".stripMargin)
}
def dynamicPartitionKeyNotAmongWrittenPartitionPathsError(key: String): Throwable = {
new SparkException(s"Dynamic partition key $key is not among written partition paths.")
}
def cannotRemovePartitionDirError(partitionPath: Path): Throwable = {
new RuntimeException(s"Cannot remove partition directory '$partitionPath'")
}
def cannotCreateStagingDirError(message: String, e: IOException): Throwable = {
new RuntimeException(s"Cannot create staging directory: $message", e)
}
def serDeInterfaceNotFoundError(e: NoClassDefFoundError): Throwable = {
new ClassNotFoundException("The SerDe interface removed since Hive 2.3(HIVE-15167)." +
" Please migrate your custom SerDes to Hive 2.3. See HIVE-15167 for more details.", e)
}
def convertHiveTableToCatalogTableError(
e: SparkException, dbName: String, tableName: String): Throwable = {
new SparkException(s"${e.getMessage}, db: $dbName, table: $tableName", e)
}
def cannotRecognizeHiveTypeError(
e: ParseException, fieldType: String, fieldName: String): Throwable = {
new SparkException(
s"Cannot recognize hive type string: $fieldType, column: $fieldName", e)
}
def getTablesByTypeUnsupportedByHiveVersionError(): Throwable = {
new UnsupportedOperationException("Hive 2.2 and lower versions don't support " +
"getTablesByType. Please use Hive 2.3 or higher version.")
}
def dropTableWithPurgeUnsupportedError(): Throwable = {
new UnsupportedOperationException("DROP TABLE ... PURGE")
}
def alterTableWithDropPartitionAndPurgeUnsupportedError(): Throwable = {
new UnsupportedOperationException("ALTER TABLE ... DROP PARTITION ... PURGE")
}
def invalidPartitionFilterError(): Throwable = {
new UnsupportedOperationException(
"""Partition filter cannot have both `"` and `'` characters""")
}
def getPartitionMetadataByFilterError(e: InvocationTargetException): Throwable = {
new RuntimeException(
s"""
|Caught Hive MetaException attempting to get partition metadata by filter
|from Hive. You can set the Spark configuration setting
|${SQLConf.HIVE_METASTORE_PARTITION_PRUNING_FALLBACK_ON_EXCEPTION.key} to true to work
|around this problem, however this will result in degraded performance. Please
|report a bug: https://issues.apache.org/jira/browse/SPARK
""".stripMargin.replaceAll("\\n", " "), e)
}
def unsupportedHiveMetastoreVersionError(version: String, key: String): Throwable = {
new UnsupportedOperationException(s"Unsupported Hive Metastore version ($version). " +
s"Please set $key with a valid version.")
}
def loadHiveClientCausesNoClassDefFoundError(
cnf: NoClassDefFoundError,
execJars: Seq[URL],
key: String,
e: InvocationTargetException): Throwable = {
new ClassNotFoundException(
s"""
|$cnf when creating Hive client using classpath: ${execJars.mkString(", ")}\\n
|Please make sure that jars for your version of hive and hadoop are included in the
|paths passed to $key.
""".stripMargin.replaceAll("\\n", " "), e)
}
def cannotFetchTablesOfDatabaseError(dbName: String, e: Exception): Throwable = {
new SparkException(s"Unable to fetch tables of db $dbName", e)
}
def illegalLocationClauseForViewPartitionError(): Throwable = {
new SparkException("LOCATION clause illegal for view partition")
}
def renamePathAsExistsPathError(srcPath: Path, dstPath: Path): Throwable = {
new SparkFileAlreadyExistsException(errorClass = "FAILED_RENAME_PATH",
Array(srcPath.toString, dstPath.toString))
}
def renameAsExistsPathError(dstPath: Path): Throwable = {
new FileAlreadyExistsException(s"Failed to rename as $dstPath already exists")
}
def renameSrcPathNotFoundError(srcPath: Path): Throwable = {
new SparkFileNotFoundException(errorClass = "RENAME_SRC_PATH_NOT_FOUND",
Array(srcPath.toString))
}
def failedRenameTempFileError(srcPath: Path, dstPath: Path): Throwable = {
new IOException(s"Failed to rename temp file $srcPath to $dstPath as rename returned false")
}
def legacyMetadataPathExistsError(metadataPath: Path, legacyMetadataPath: Path): Throwable = {
new SparkException(
s"""
|Error: we detected a possible problem with the location of your "_spark_metadata"
|directory and you likely need to move it before restarting this query.
|
|Earlier version of Spark incorrectly escaped paths when writing out the
|"_spark_metadata" directory for structured streaming. While this was corrected in
|Spark 3.0, it appears that your query was started using an earlier version that
|incorrectly handled the "_spark_metadata" path.
|
|Correct "_spark_metadata" Directory: $metadataPath
|Incorrect "_spark_metadata" Directory: $legacyMetadataPath
|
|Please move the data from the incorrect directory to the correct one, delete the
|incorrect directory, and then restart this query. If you believe you are receiving
|this message in error, you can disable it with the SQL conf
|${SQLConf.STREAMING_CHECKPOINT_ESCAPED_PATH_CHECK_ENABLED.key}.
""".stripMargin)
}
def partitionColumnNotFoundInSchemaError(col: String, schema: StructType): Throwable = {
new RuntimeException(s"Partition column $col not found in schema $schema")
}
def stateNotDefinedOrAlreadyRemovedError(): Throwable = {
new NoSuchElementException("State is either not defined or has already been removed")
}
def cannotSetTimeoutDurationError(): Throwable = {
new UnsupportedOperationException(
"Cannot set timeout duration without enabling processing time timeout in " +
"[map|flatMap]GroupsWithState")
}
def cannotGetEventTimeWatermarkError(): Throwable = {
new UnsupportedOperationException(
"Cannot get event time watermark timestamp without setting watermark before " +
"[map|flatMap]GroupsWithState")
}
def cannotSetTimeoutTimestampError(): Throwable = {
new UnsupportedOperationException(
"Cannot set timeout timestamp without enabling event time timeout in " +
"[map|flatMapGroupsWithState")
}
def batchMetadataFileNotFoundError(batchMetadataFile: Path): Throwable = {
new FileNotFoundException(s"Unable to find batch $batchMetadataFile")
}
def multiStreamingQueriesUsingPathConcurrentlyError(
path: String, e: FileAlreadyExistsException): Throwable = {
new ConcurrentModificationException(
s"Multiple streaming queries are concurrently using $path", e)
}
def addFilesWithAbsolutePathUnsupportedError(commitProtocol: String): Throwable = {
new UnsupportedOperationException(
s"$commitProtocol does not support adding files with an absolute path")
}
def microBatchUnsupportedByDataSourceError(srcName: String): Throwable = {
new UnsupportedOperationException(
s"Data source $srcName does not support microbatch processing.")
}
def cannotExecuteStreamingRelationExecError(): Throwable = {
new UnsupportedOperationException("StreamingRelationExec cannot be executed")
}
def invalidStreamingOutputModeError(outputMode: Option[OutputMode]): Throwable = {
new UnsupportedOperationException(s"Invalid output mode: $outputMode")
}
def catalogPluginClassNotFoundError(name: String): Throwable = {
new CatalogNotFoundException(
s"Catalog '$name' plugin class not found: spark.sql.catalog.$name is not defined")
}
def catalogPluginClassNotImplementedError(name: String, pluginClassName: String): Throwable = {
new SparkException(
s"Plugin class for catalog '$name' does not implement CatalogPlugin: $pluginClassName")
}
def catalogPluginClassNotFoundForCatalogError(
name: String,
pluginClassName: String): Throwable = {
new SparkException(s"Cannot find catalog plugin class for catalog '$name': $pluginClassName")
}
def catalogFailToFindPublicNoArgConstructorError(
name: String,
pluginClassName: String,
e: Exception): Throwable = {
new SparkException(
s"Failed to find public no-arg constructor for catalog '$name': $pluginClassName)", e)
}
def catalogFailToCallPublicNoArgConstructorError(
name: String,
pluginClassName: String,
e: Exception): Throwable = {
new SparkException(
s"Failed to call public no-arg constructor for catalog '$name': $pluginClassName)", e)
}
def cannotInstantiateAbstractCatalogPluginClassError(
name: String,
pluginClassName: String,
e: Exception): Throwable = {
new SparkException("Cannot instantiate abstract catalog plugin class for " +
s"catalog '$name': $pluginClassName", e.getCause)
}
def failedToInstantiateConstructorForCatalogError(
name: String,
pluginClassName: String,
e: Exception): Throwable = {
new SparkException("Failed during instantiating constructor for catalog " +
s"'$name': $pluginClassName", e.getCause)
}
def noSuchElementExceptionError(): Throwable = {
new NoSuchElementException
}
def noSuchElementExceptionError(key: String): Throwable = {
new NoSuchElementException(key)
}
def cannotMutateReadOnlySQLConfError(): Throwable = {
new UnsupportedOperationException("Cannot mutate ReadOnlySQLConf.")
}
def cannotCloneOrCopyReadOnlySQLConfError(): Throwable = {
new UnsupportedOperationException("Cannot clone/copy ReadOnlySQLConf.")
}
def cannotGetSQLConfInSchedulerEventLoopThreadError(): Throwable = {
new RuntimeException("Cannot get SQLConf inside scheduler event loop thread.")
}
def unsupportedOperationExceptionError(): Throwable = {
new UnsupportedOperationException
}
def nullLiteralsCannotBeCastedError(name: String): Throwable = {
new UnsupportedOperationException(s"null literals can't be casted to $name")
}
def notUserDefinedTypeError(name: String, userClass: String): Throwable = {
new SparkException(s"$name is not an UserDefinedType. Please make sure registering " +
s"an UserDefinedType for ${userClass}")
}
def cannotLoadUserDefinedTypeError(name: String, userClass: String): Throwable = {
new SparkException(s"Can not load in UserDefinedType ${name} for user class ${userClass}.")
}
def timeZoneIdNotSpecifiedForTimestampTypeError(): Throwable = {
new UnsupportedOperationException(
s"${TimestampType.catalogString} must supply timeZoneId parameter")
}
def notPublicClassError(name: String): Throwable = {
new UnsupportedOperationException(
s"$name is not a public class. Only public classes are supported.")
}
def primitiveTypesNotSupportedError(): Throwable = {
new UnsupportedOperationException("Primitive types are not supported.")
}
def fieldIndexOnRowWithoutSchemaError(): Throwable = {
new UnsupportedOperationException("fieldIndex on a Row without schema is undefined.")
}
def valueIsNullError(index: Int): Throwable = {
new NullPointerException(s"Value at index $index is null")
}
def onlySupportDataSourcesProvidingFileFormatError(providingClass: String): Throwable = {
new SparkException(s"Only Data Sources providing FileFormat are supported: $providingClass")
}
def failToSetOriginalPermissionBackError(
permission: FsPermission,
path: Path,
e: Throwable): Throwable = {
new SparkSecurityException(errorClass = "FAILED_SET_ORIGINAL_PERMISSION_BACK",
Array(permission.toString, path.toString, e.getMessage))
}
def failToSetOriginalACLBackError(aclEntries: String, path: Path, e: Throwable): Throwable = {
new SecurityException(s"Failed to set original ACL $aclEntries back to " +
s"the created path: $path. Exception: ${e.getMessage}")
}
def multiFailuresInStageMaterializationError(error: Throwable): Throwable = {
new SparkException("Multiple failures in stage materialization.", error)
}
def unrecognizedCompressionSchemaTypeIDError(typeId: Int): Throwable = {
new UnsupportedOperationException(s"Unrecognized compression scheme type ID: $typeId")
}
def getParentLoggerNotImplementedError(className: String): Throwable = {
new SQLFeatureNotSupportedException(s"$className.getParentLogger is not yet implemented.")
}
def cannotCreateParquetConverterForTypeError(t: DecimalType, parquetType: String): Throwable = {
new RuntimeException(
s"""
|Unable to create Parquet converter for ${t.typeName}
|whose Parquet type is $parquetType without decimal metadata. Please read this
|column/field as Spark BINARY type.
""".stripMargin.replaceAll("\\n", " "))
}
def cannotCreateParquetConverterForDecimalTypeError(
t: DecimalType, parquetType: String): Throwable = {
new RuntimeException(
s"""
|Unable to create Parquet converter for decimal type ${t.json} whose Parquet type is
|$parquetType. Parquet DECIMAL type can only be backed by INT32, INT64,
|FIXED_LEN_BYTE_ARRAY, or BINARY.
""".stripMargin.replaceAll("\\n", " "))
}
def cannotCreateParquetConverterForDataTypeError(
t: DataType, parquetType: String): Throwable = {
new RuntimeException(s"Unable to create Parquet converter for data type ${t.json} " +
s"whose Parquet type is $parquetType")
}
def cannotAddMultiPartitionsOnNonatomicPartitionTableError(tableName: String): Throwable = {
new UnsupportedOperationException(
s"Nonatomic partition table $tableName can not add multiple partitions.")
}
def userSpecifiedSchemaUnsupportedByDataSourceError(provider: TableProvider): Throwable = {
new UnsupportedOperationException(
s"${provider.getClass.getSimpleName} source does not support user-specified schema.")
}
def cannotDropMultiPartitionsOnNonatomicPartitionTableError(tableName: String): Throwable = {
new UnsupportedOperationException(
s"Nonatomic partition table $tableName can not drop multiple partitions.")
}
def truncateMultiPartitionUnsupportedError(tableName: String): Throwable = {
new UnsupportedOperationException(
s"The table $tableName does not support truncation of multiple partition.")
}
def overwriteTableByUnsupportedExpressionError(table: Table): Throwable = {
new SparkException(s"Table does not support overwrite by expression: $table")
}
def dynamicPartitionOverwriteUnsupportedByTableError(table: Table): Throwable = {
new SparkException(s"Table does not support dynamic partition overwrite: $table")
}
def failedMergingSchemaError(schema: StructType, e: SparkException): Throwable = {
new SparkException(s"Failed merging schema:\\n${schema.treeString}", e)
}
def cannotBroadcastTableOverMaxTableRowsError(
maxBroadcastTableRows: Long, numRows: Long): Throwable = {
new SparkException(
s"Cannot broadcast the table over $maxBroadcastTableRows rows: $numRows rows")
}
def cannotBroadcastTableOverMaxTableBytesError(
maxBroadcastTableBytes: Long, dataSize: Long): Throwable = {
new SparkException("Cannot broadcast the table that is larger than" +
s" ${maxBroadcastTableBytes >> 30}GB: ${dataSize >> 30} GB")
}
def notEnoughMemoryToBuildAndBroadcastTableError(oe: OutOfMemoryError): Throwable = {
new OutOfMemoryError("Not enough memory to build and broadcast the table to all " +
"worker nodes. As a workaround, you can either disable broadcast by setting " +
s"${SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key} to -1 or increase the spark " +
s"driver memory by setting ${SparkLauncher.DRIVER_MEMORY} to a higher value.")
.initCause(oe.getCause)
}
def executeCodePathUnsupportedError(execName: String): Throwable = {
new UnsupportedOperationException(s"$execName does not support the execute() code path.")
}
def cannotMergeClassWithOtherClassError(className: String, otherClass: String): Throwable = {
new UnsupportedOperationException(
s"Cannot merge $className with $otherClass")
}
def continuousProcessingUnsupportedByDataSourceError(sourceName: String): Throwable = {
new UnsupportedOperationException(
s"Data source $sourceName does not support continuous processing.")
}
def failedToReadDataError(failureReason: Throwable): Throwable = {
new SparkException("Data read failed", failureReason)
}
def failedToGenerateEpochMarkerError(failureReason: Throwable): Throwable = {
new SparkException("Epoch marker generation failed", failureReason)
}
def foreachWriterAbortedDueToTaskFailureError(): Throwable = {
new SparkException("Foreach writer has been aborted due to a task failure")
}
def integerOverflowError(message: String): Throwable = {
new ArithmeticException(s"Integer overflow. $message")
}
def failedToReadDeltaFileError(fileToRead: Path, clazz: String, keySize: Int): Throwable = {
new IOException(
s"Error reading delta file $fileToRead of $clazz: key size cannot be $keySize")
}
def failedToReadSnapshotFileError(fileToRead: Path, clazz: String, message: String): Throwable = {
new IOException(s"Error reading snapshot file $fileToRead of $clazz: $message")
}
def cannotPurgeAsBreakInternalStateError(): Throwable = {
new UnsupportedOperationException("Cannot purge as it might break internal state.")
}
def cleanUpSourceFilesUnsupportedError(): Throwable = {
new UnsupportedOperationException("Clean up source files is not supported when" +
" reading from the output directory of FileStreamSink.")
}
def latestOffsetNotCalledError(): Throwable = {
new UnsupportedOperationException(
"latestOffset(Offset, ReadLimit) should be called instead of this method")
}
def legacyCheckpointDirectoryExistsError(
checkpointPath: Path, legacyCheckpointDir: String): Throwable = {
new SparkException(
s"""
|Error: we detected a possible problem with the location of your checkpoint and you
|likely need to move it before restarting this query.
|
|Earlier version of Spark incorrectly escaped paths when writing out checkpoints for
|structured streaming. While this was corrected in Spark 3.0, it appears that your
|query was started using an earlier version that incorrectly handled the checkpoint
|path.
|
|Correct Checkpoint Directory: $checkpointPath
|Incorrect Checkpoint Directory: $legacyCheckpointDir
|
|Please move the data from the incorrect directory to the correct one, delete the
|incorrect directory, and then restart this query. If you believe you are receiving
|this message in error, you can disable it with the SQL conf
|${SQLConf.STREAMING_CHECKPOINT_ESCAPED_PATH_CHECK_ENABLED.key}.
""".stripMargin)
}
def subprocessExitedError(
exitCode: Int, stderrBuffer: CircularBuffer, cause: Throwable): Throwable = {
new SparkException(s"Subprocess exited with status $exitCode. " +
s"Error: ${stderrBuffer.toString}", cause)
}
def outputDataTypeUnsupportedByNodeWithoutSerdeError(
nodeName: String, dt: DataType): Throwable = {
new SparkException(s"$nodeName without serde does not support " +
s"${dt.getClass.getSimpleName} as output data type")
}
def invalidStartIndexError(numRows: Int, startIndex: Int): Throwable = {
new ArrayIndexOutOfBoundsException(
"Invalid `startIndex` provided for generating iterator over the array. " +
s"Total elements: $numRows, requested `startIndex`: $startIndex")
}
def concurrentModificationOnExternalAppendOnlyUnsafeRowArrayError(
className: String): Throwable = {
new ConcurrentModificationException(
s"The backing $className has been modified since the creation of this Iterator")
}
def doExecuteBroadcastNotImplementedError(nodeName: String): Throwable = {
new UnsupportedOperationException(s"$nodeName does not implement doExecuteBroadcast")
}
def databaseNameConflictWithSystemPreservedDatabaseError(globalTempDB: String): Throwable = {
new SparkException(
s"""
|$globalTempDB is a system preserved database, please rename your existing database
|to resolve the name conflict, or set a different value for
|${GLOBAL_TEMP_DATABASE.key}, and launch your Spark application again.
""".stripMargin.split("\\n").mkString(" "))
}
def commentOnTableUnsupportedError(): Throwable = {
new SQLFeatureNotSupportedException("comment on table is not supported")
}
def unsupportedUpdateColumnNullabilityError(): Throwable = {
new SQLFeatureNotSupportedException("UpdateColumnNullability is not supported")
}
def renameColumnUnsupportedForOlderMySQLError(): Throwable = {
new SQLFeatureNotSupportedException(
"Rename column is only supported for MySQL version 8.0 and above.")
}
def failedToExecuteQueryError(e: Throwable): QueryExecutionException = {
val message = "Hit an error when executing a query" +
(if (e.getMessage == null) "" else s": ${e.getMessage}")
new QueryExecutionException(message, e)
}
def nestedFieldUnsupportedError(colName: String): Throwable = {
new UnsupportedOperationException(s"Nested field $colName is not supported.")
}
def transformationsAndActionsNotInvokedByDriverError(): Throwable = {
new SparkException(
"""
|Dataset transformations and actions can only be invoked by the driver, not inside of
|other Dataset transformations; for example, dataset1.map(x => dataset2.values.count()
|* x) is invalid because the values transformation and count action cannot be
|performed inside of the dataset1.map transformation. For more information,
|see SPARK-28702.
""".stripMargin.split("\\n").mkString(" "))
}
def repeatedPivotsUnsupportedError(): Throwable = {
new UnsupportedOperationException("repeated pivots are not supported")
}
def pivotNotAfterGroupByUnsupportedError(): Throwable = {
new UnsupportedOperationException("pivot is only supported after a groupBy")
}
def invalidAesKeyLengthError(actualLength: Int): RuntimeException = {
new RuntimeException("The key length of aes_encrypt/aes_decrypt should be " +
s"one of 16, 24 or 32 bytes, but got: $actualLength")
}
def aesModeUnsupportedError(mode: String, padding: String): RuntimeException = {
new UnsupportedOperationException(
s"The AES mode $mode with the padding $padding is not supported")
}
def hiveTableWithAnsiIntervalsError(tableName: String): Throwable = {
new UnsupportedOperationException(s"Hive table $tableName with ANSI intervals is not supported")
}
def cannotConvertOrcTimestampToTimestampNTZError(): Throwable = {
new RuntimeException("Unable to convert timestamp of Orc to data type 'timestamp_ntz'")
}
def writePartitionExceedConfigSizeWhenDynamicPartitionError(
numWrittenParts: Int,
maxDynamicPartitions: Int,
maxDynamicPartitionsKey: String): Throwable = {
new SparkException(
s"Number of dynamic partitions created is $numWrittenParts" +
s", which is more than $maxDynamicPartitions" +
s". To solve this try to set $maxDynamicPartitionsKey" +
s" to at least $numWrittenParts.")
}
def invalidNumberFormatError(format: String): Throwable = {
new IllegalArgumentException(
s"Format '$format' used for parsing string to number or " +
"formatting number to string is invalid")
}
}
| holdenk/spark | sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryExecutionErrors.scala | Scala | apache-2.0 | 80,349 |
package com.atomist.rug.runtime.plans
import com.atomist.param.SimpleParameterValues
import com.atomist.project.archive.RugResolver
import com.atomist.project.edit._
import com.atomist.project.generate.ProjectGenerator
import com.atomist.rug.runtime._
import com.atomist.rug.runtime.js.RugContext
import com.atomist.rug.spi.Handlers.Instruction._
import com.atomist.rug.spi.Handlers.Status.{Failure, Success}
import com.atomist.rug.spi.Handlers.{Instruction, Response, Status}
import com.atomist.rug.spi.{Body, RugFunctionRegistry}
import com.atomist.rug.{BadPlanException, BadRugFunctionResponseException}
import org.slf4j.{Logger, LoggerFactory}
import scala.util.{Try, Failure => ScalaFailure, Success => ScalaSuccess}
/**
* Run instructions synchronously in this JVM
*
* TODO - ensure we blow up if there are rugs or instructions with duplicate names
* and don't have different GA's
*/
class LocalInstructionRunner(currentRug: Rug,
projectManagement: ProjectManagement,
rugContext: RugContext,
secretResolver: SecretResolver,
rugFunctionRegistry: RugFunctionRegistry = DefaultRugFunctionRegistry,
rugResolver: Option[RugResolver] = None,
loggerOption: Option[Logger] = None)
extends InstructionRunner {
private val logger: Logger = loggerOption.getOrElse(LoggerFactory getLogger getClass.getName)
private def doWithProjectName(instruction: Instruction, action: (String) => Response) = {
instruction.detail.projectName match {
case Some(projectName) => action(projectName)
case _ => throw new BadPlanException(s"Project name required for $instruction.")
}
}
override def run(instruction: Instruction, callbackInput: Option[Response]): Response = {
val parameters = SimpleParameterValues(instruction.detail.parameters)
instruction match {
case Execute(detail) =>
rugFunctionRegistry.find(detail.name) match {
case Some(fn) =>
val replaced = secretResolver.replaceSecretTokens(detail.parameters)
val resolved = SimpleParameterValues(replaced ++ secretResolver.resolveSecrets(fn.secrets))
Try {
fn.run(resolved)
} match {
case ScalaSuccess(response) =>
//ensure the body is String or byte[]!
val thedata = response.body match {
case Some(Body(Some(str), None)) => Some(str)
case Some(Body(None, Some(bytes))) => Some(bytes)
case Some(Body(_, _)) => throw new BadRugFunctionResponseException(s"Function `${fn.name}` should return a string body or a byte array, but not both")
case _ => None
}
Response(response.status, response.msg, response.code, thedata)
case ScalaFailure(throwaball) =>
val msg = s"Rug Function ${detail.name} threw exception: ${throwaball.getMessage}"
logger.warn(msg, throwaball)
Response(Status.Failure, Some(msg), None, Some(throwaball))
}
case _ =>
val msg = s"Cannot find Rug Function ${detail.name}"
logger.warn(msg)
Response(Status.Failure, Some(msg), None, None)
}
case _ =>
rugResolver match {
case Some(resolver) =>
resolver.resolve(currentRug, extractName(instruction.detail)) match {
case Some(rug: ProjectGenerator) =>
doWithProjectName(instruction, (projectName: String) => {
val as = projectManagement.generate(rug, parameters, projectName)
if (as != null) Response(Success)
else Response(Failure, Some(s"failed to run generator ${rug.name} to create $projectName"))
})
case Some(rug: ProjectEditor) =>
doWithProjectName(instruction, (projectName: String) => {
projectManagement.edit(rug, parameters, projectName, instruction.detail.editorTarget) match {
case _: SuccessfulModification => Response(Success)
case success: NoModificationNeeded => Response(Success, Some(success.comment))
case failure: FailedModificationAttempt => Response(Failure, Some(failure.failureExplanation))
}
})
case Some(rug: CommandHandler) =>
val planOption = rug.handle(rugContext, parameters)
Response(Success, None, None, planOption)
case Some(rug: ResponseHandler) =>
callbackInput match {
case Some(response) =>
val planOption = rug.handle(rugContext, response, parameters)
Response(Success, None, None, planOption)
case c =>
throw new BadPlanException(s"Callback input was not recognized: $c")
}
case Some(rug) => throw new BadPlanException(s"Unrecognized rug type: $rug")
case None => throw new BadPlanException(s"Could not find rug with name: ${instruction.detail.name}")
}
case _ => throw new IllegalArgumentException(s"Could not find rug with name: ${instruction.detail.name} because no RugResolver supplied")
}
}
}
/**
* Convert Instruction.Detail name/coords to a string for resolver.
*/
def extractName(detail: Instruction.Detail): String = {
detail.coordinates match {
case Some(coords) =>
s"${coords.group}:${coords.artifact}:${detail.name}"
case _ => detail.name
}
}
}
| atomist/rug | src/main/scala/com/atomist/rug/runtime/plans/LocalInstructionRunner.scala | Scala | gpl-3.0 | 5,762 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.api.scala.manual
import java.io._
import java.util.Random
import org.apache.flink.api.common.ExecutionConfig
import org.apache.flink.api.common.typeinfo.TypeInformation
import org.apache.flink.api.common.typeutils.CompositeType
import org.apache.flink.api.java.typeutils.runtime.RuntimeSerializerFactory
import org.apache.flink.api.scala._
import org.apache.flink.runtime.io.disk.iomanager.IOManagerAsync
import org.apache.flink.runtime.memory.MemoryManager
import org.apache.flink.runtime.operators.sort.UnilateralSortMerger
import org.apache.flink.runtime.operators.testutils.DummyInvokable
import org.apache.flink.util.{MutableObjectIterator, TestLogger}
import org.junit.Assert._
/**
* This test is wrote as manual test.
*/
class MassiveCaseClassSortingITCase extends TestLogger {
val SEED : Long = 347569784659278346L
def testStringTuplesSorting() {
val NUM_STRINGS = 3000000
var input: File = null
var sorted: File = null
try {
input = generateFileWithStringTuples(NUM_STRINGS,
"http://some-uri.com/that/is/a/common/prefix/to/all")
sorted = File.createTempFile("sorted_strings", "txt")
val command = Array("/bin/bash", "-c", "export LC_ALL=\\"C\\" && cat \\""
+ input.getAbsolutePath + "\\" | sort > \\"" + sorted.getAbsolutePath + "\\"")
var p: Process = null
try {
p = Runtime.getRuntime.exec(command)
val retCode = p.waitFor()
if (retCode != 0) {
throw new Exception("Command failed with return code " + retCode)
}
p = null
}
finally {
if (p != null) {
p.destroy()
}
}
var sorter: UnilateralSortMerger[StringTuple] = null
var reader: BufferedReader = null
var verifyReader: BufferedReader = null
try {
reader = new BufferedReader(new FileReader(input))
val inputIterator = new StringTupleReader(reader)
val typeInfo = implicitly[TypeInformation[StringTuple]]
.asInstanceOf[CompositeType[StringTuple]]
val serializer = typeInfo.createSerializer(new ExecutionConfig)
val comparator = typeInfo.createComparator(
Array(0, 1),
Array(true, true),
0,
new ExecutionConfig)
val mm = new MemoryManager(1024 * 1024, 1)
val ioMan = new IOManagerAsync()
sorter = new UnilateralSortMerger[StringTuple](mm, ioMan, inputIterator,
new DummyInvokable(),
new RuntimeSerializerFactory[StringTuple](serializer, classOf[StringTuple]),
comparator, 1.0, 4, 0.8f, true /*use large record handler*/, false)
val sortedData = sorter.getIterator
reader.close()
verifyReader = new BufferedReader(new FileReader(sorted))
val verifyIterator = new StringTupleReader(verifyReader)
var num = 0
var hasMore = true
while (hasMore) {
val next = verifyIterator.next(null)
if (next != null ) {
num += 1
val nextFromFlinkSort = sortedData.next(null)
assertNotNull(nextFromFlinkSort)
assertEquals(next.key1, nextFromFlinkSort.key1)
assertEquals(next.key2, nextFromFlinkSort.key2)
// assert array equals does not work here
assertEquals(next.value.length, nextFromFlinkSort.value.length)
for (i <- 0 until next.value.length) {
assertEquals(next.value(i), nextFromFlinkSort.value(i))
}
}
else {
hasMore = false
}
}
assertNull(sortedData.next(null))
assertEquals(NUM_STRINGS, num)
}
finally {
if (reader != null) {
reader.close()
}
if (verifyReader != null) {
verifyReader.close()
}
if (sorter != null) {
sorter.close()
}
}
}
catch {
case e: Exception => {
System.err.println(e.getMessage)
e.printStackTrace()
e.getMessage
}
}
finally {
if (input != null) {
input.delete()
}
if (sorted != null) {
sorted.delete()
}
}
}
private def generateFileWithStringTuples(numStrings: Int, prefix: String): File = {
val rnd = new Random(SEED)
val bld = new StringBuilder()
val f = File.createTempFile("strings", "txt")
var wrt: BufferedWriter = null
try {
wrt = new BufferedWriter(new FileWriter(f))
for (i <- 0 until numStrings) {
bld.setLength(0)
val numComps = rnd.nextInt(5) + 2
for (z <- 0 until numComps) {
if (z > 0) {
bld.append(' ')
}
bld.append(prefix)
val len = rnd.nextInt(20) + 10
for (k <- 0 until len) {
val c = (rnd.nextInt(80) + 40).toChar
bld.append(c)
}
}
val str = bld.toString
wrt.write(str)
wrt.newLine()
}
}
finally {
wrt.close()
}
f
}
}
object MassiveCaseClassSortingITCase {
def main(args: Array[String]) {
new MassiveCaseClassSortingITCase().testStringTuplesSorting()
}
}
case class StringTuple(key1: String, key2: String, value: Array[String])
class StringTupleReader(val reader: BufferedReader) extends MutableObjectIterator[StringTuple] {
override def next(reuse: StringTuple): StringTuple = {
val line = reader.readLine()
if (line == null) {
return null
}
val parts = line.split(" ")
StringTuple(parts(0), parts(1), parts)
}
override def next(): StringTuple = {
val line = reader.readLine()
if (line == null) {
return null
}
val parts = line.split(" ")
StringTuple(parts(0), parts(1), parts)
}
}
| ueshin/apache-flink | flink-tests/src/test/scala/org/apache/flink/api/scala/manual/MassiveCaseClassSortingITCase.scala | Scala | apache-2.0 | 6,906 |
/* ------------------- sse-jmx ------------------- *\\
* Licensed under the Apache License, Version 2.0. *
* Author: Spiros Tzavellas *
\\* ----------------------------------------------- */
package com.tzavellas.sse.jmx
import java.util.concurrent.ConcurrentHashMap
import javax.management.{ObjectName, InstanceNotFoundException}
/**
* Tracks MBean registrations.
*/
trait MBeanRegistrationTracker extends MBeanRegistrationSupport {
private val registered = new ConcurrentHashMap[ObjectName, Unit]
abstract override def registerMBean(mbean: AnyRef, name: ObjectName, behavior: IfAlreadyExists.Enum = IfAlreadyExists.Fail): Unit = {
super.registerMBean(mbean, name, behavior)
registered.putIfAbsent(name, ())
}
abstract override def unregisterMBean(name: ObjectName, ignore: Boolean = false): Unit = {
super.unregisterMBean(name)
registered.remove(name)
}
/**
* Unregister all the MBeans that have been registered using this instance.
*
* @see [[registerMBean]]
*/
def unregisterAll(): Unit = {
val names = registered.keySet.iterator
while(names.hasNext) {
try {
unregisterMBean(names.next)
} catch {
case e: InstanceNotFoundException =>
}
names.remove()
}
}
}
| sptz45/sse-jmx | src/main/scala/com/tzavellas/sse/jmx/MBeanRegistrationTracker.scala | Scala | apache-2.0 | 1,288 |
/*
* Copyright 2016 Uncharted Software Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package software.uncharted.salt.core.generation.output
import scala.collection.mutable.Map
import software.uncharted.salt.core.generation.Series
/**
* Container for the output of a tile job, where every "tile" has
* within it the results of the individual Series, grouped by
* coordinate. SeriesData is retrieved from a Tile by using
* SeriesData.apply(Tile).
*/
class Tile[TC] private[salt] (
val coords: TC,
private[salt] val seriesData: Map[String,SeriesData[TC,_,_,_]]
) extends Serializable
| unchartedsoftware/salt | src/main/scala/software/uncharted/salt/core/generation/output/Tile.scala | Scala | apache-2.0 | 1,115 |
package eu.inn.metrics.modules
import com.codahale.metrics.MetricRegistry
import eu.inn.metrics.{MetricsTracker, MetricsTrackerImpl}
import scaldi.Module
class MetricsModule extends Module {
bind [MetricRegistry] to injected[MetricRegistry]
bind [MetricsTracker] to injected[MetricsTrackerImpl]
}
| InnovaCo/service-metrics | src/main/scala/eu/inn/metrics/modules/MetricsModule.scala | Scala | bsd-3-clause | 303 |
package se.lu.nateko.cp.data.formats.wdcgg
import org.slf4j.LoggerFactory
import se.lu.nateko.cp.data.api.WdcggParsingException
import se.lu.nateko.cp.data.formats._
import scala.collection.immutable.ListMap
object WdcggParser {
case class Header(
headerLength: Int,
totLength: Int,
columnNames: Array[String],
parameter: String,
offsetFromUtc: Int,
kvPairs: ListMap[String, String]
){
def nRows = totLength - headerLength
}
case class Accumulator(
header: Header,
lineNumber: Int,
cells: Array[String],
formats: Array[Option[ValueFormat]],
error: Option[Throwable]
) extends ParsingAccumulator{
def incrementLine = copy(lineNumber = lineNumber + 1)
def isOnData = (header.headerLength > 0 && lineNumber > header.headerLength)
def changeHeader(
headerLength: Int = header.headerLength,
totLength: Int = header.totLength,
columnNames: Array[String] = header.columnNames,
parameter: String = header.parameter,
offsetFromUtc: Int = header.offsetFromUtc,
kvPairs: ListMap[String, String] = header.kvPairs
): Accumulator =
copy(header = header.copy(headerLength, totLength, columnNames, parameter, offsetFromUtc, kvPairs))
}
val ParamKey = "PARAMETER"
val CountryKey = "COUNTRY/TERRITORY"
val SamplingTypeKey = "SAMPLING TYPE"
val MeasUnitKey = "MEASUREMENT UNIT"
private val headerPattern = """C\\d\\d(.*)""".r
private val headerKvPattern = """C\\d\\d ([^:]+): ?(.*)""".r
private val totLinesPattern = """C\\d\\d TOTAL LINES: (\\d+)""".r
private val headLinesPattern = """C\\d\\d HEADER LINES: (\\d+)""".r
private val wsPattern = "\\\\s+".r
private val TimeZoneKey = "TIME ZONE"
private val logger = LoggerFactory.getLogger(getClass)
def seed = Accumulator(Header(0, 0, Array.empty, "", 0, ListMap.empty), 0, Array.empty, Array.empty, None)
def parseLine(columnsMeta: ColumnsMeta)(acc: Accumulator, line: String): Accumulator = {
if(acc.error.isDefined) acc
else if(acc.header.headerLength > 0 && acc.lineNumber >= acc.header.headerLength)
acc.copy(cells = wsPattern.split(line), lineNumber = acc.lineNumber + 1)
else if(acc.lineNumber == acc.header.headerLength - 1) {
val paramName = acc.header.parameter
val colNamesAttempt = wsPattern.split(line)
if(colNamesAttempt.length > 1 && colNamesAttempt(1) == "DATE"){
if(colNamesAttempt.contains(paramName)) {
//the correct column names line is present
val colNames = mapColNames(colNamesAttempt.drop(1), paramName)
val formats = colNames.map(columnsMeta.matchColumn)
acc.changeHeader(columnNames = colNames).copy(formats = formats).incrementLine
} else acc.copy(error = Some(new WdcggParsingException(
s"Unsupported WDCGG file format; column names row was: $line"
)))
}else{
val fileName = acc.header.kvPairs.getOrElse("FILE NAME", "(unknown file!)")
logger.warn(s"File $fileName is missing the column names row; amending it with standard column names")
acc.changeHeader(
headerLength = acc.header.headerLength - 1,
columnNames = Array("DATE", "TIME", "DATE", "TIME", ParamKey, "ND", "SD", "F", "CS", "REM")
).copy(cells = colNamesAttempt).incrementLine
}
}
else (line match {
case headLinesPattern(n) =>
acc.changeHeader(headerLength = n.toInt)
case totLinesPattern(n) =>
acc.changeHeader(totLength = n.toInt)
case headerKvPattern(key, value) =>
val withSpecialKvs =
if(key == TimeZoneKey)
acc.changeHeader(offsetFromUtc = parseUtcOffset(value))
else if(key == ParamKey)
acc.changeHeader(parameter = value)
else acc
val harmonizedKey = keyRenamings.getOrElse(key, key)
val updatedKvs = acc.header.kvPairs + makeKv(harmonizedKey, value)
withSpecialKvs.changeHeader(kvPairs = updatedKvs)
case headerPattern(value) =>
val (lastKey, currentValue) = acc.header.kvPairs.last
val newKvs = acc.header.kvPairs + ((lastKey, currentValue + value))
acc.changeHeader(kvPairs = newKvs)
}).incrementLine
}
private def parseUtcOffset(offset: String): Int = {
val stripped = offset.stripPrefix("Other").stripPrefix("Local time").trim.stripPrefix("UTC").trim
//TODO Check if absent time zone info does imply UTC
if(stripped.isEmpty) 0 else stripped.toInt
}
private def mapColNames(origColNames: Array[String], paramColName: String) = {
origColNames.map(col => if(col == paramColName) ParamKey else col)
}
private val keyRenamings = Map("COUNTRY/TERITORY" -> CountryKey)
private val countryRenamings = Map(
"Hong Kong" -> "Hong Kong, China",
"Korea, Republic Of" -> "Republic of Korea",
"N/a" -> "N/A",
"Netherlands (the)" -> "Netherlands",
"United Kingdom" -> "United Kingdom of Great Britain and Northern Ireland",
"United States" -> "United States of America"
)
private def makeKv(harmonizedKey: String, value: String): (String, String) = harmonizedKey match {
case CountryKey =>
val country = value.split(' ').map(_.toLowerCase.capitalize).mkString(" ")
(harmonizedKey, countryRenamings.getOrElse(country, country).trim)
case SamplingTypeKey =>
(harmonizedKey, if(value == "Remote sensing") "remote sensing" else value)
case MeasUnitKey =>
val harmValue = if(value.endsWith("in dry air)")) "ppm"
else if (value == "Bq/m³") "Bq/m3" else value
(harmonizedKey, harmValue)
case _ =>
(harmonizedKey, value)
}
private val floatNullRegex = "\\\\-9+\\\\.9*".r
private val timeRegex = "(\\\\d\\\\d):(\\\\d\\\\d)".r
private val nullDates = Set("99-99", "02-30", "04-31", "06-31", "09-31", "11-31")
def isNull(value: String, format: ValueFormat): Boolean = format match {
case IntValue => value == "-9999"
case FloatValue | DoubleValue => floatNullRegex.findFirstIn(value).isDefined
case Utf16CharValue => value.isEmpty
case StringValue => value == null
case Iso8601Date => nullDates.contains(value.substring(5))
case Iso8601DateTime | EtcDate => throw new Exception("Did not expect these value types (Iso8601DateTime | EtcDate) in WDCGG data")
case Iso8601TimeOfDay => value == "99:99" || value.startsWith("25:") || value.startsWith("26:")
case IsoLikeLocalDateTime => throw new Exception("Did not expect this value type (IsoLikeLocalDateTime) in WDCGG data")
case EtcLocalDateTime => throw new Exception("Did not expect this value type (EtcLocalDateTime) in WDCGG data")
}
def amend(value: String, format: ValueFormat): String = format match {
case Iso8601TimeOfDay => value match{
case timeRegex(hourStr, minStr) =>
minStr.toInt match{
case 60 =>
val hours = "%02d".format(hourStr.toInt + 1)
s"$hours:00"
case mins if mins > 60 => s"$hourStr:00"
case _ => value
}
case _ => value
}
case _ => value
}
}
| ICOS-Carbon-Portal/data | src/main/scala/se/lu/nateko/cp/data/formats/wdcgg/WdcggParser.scala | Scala | gpl-3.0 | 6,708 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.