code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
|---|---|---|---|---|---|
/* Copyright (c) 2008 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.gdata
package calendar
import com.google.gdata.data.util.DateTime
/**
* A calendar query. It adds methods for specifying whether recurring events should
* be expanded, which events should be returned, the timezone for the returned times, etc.
*
* @author Iulian Dragos
* @see http://code.google.com/apis/calendar/reference.html#Parameters
*/
class CalendarQuery extends Query {
/**
* Specifies how to sort events in the search result set. Valid values for this
* parameter are 'lastmodified' and 'starttime'. Default is 'lastmodified'.
*/
def orderBy(ordering: String): this.type =
addParam("orderby", ordering)
/** Request all future events. */
def futureEvents(b: Boolean): this.type =
addParam("futureevents", String.valueOf(b))
/**
* Interval for which recurring events are expanded. The beginning of the interval
* is inclusive, the end is exclusive.
*/
def expandRecurrence(start: DateTime, end: DateTime): this.type = {
expandStart(start)
expandEnd(end)
}
/** Start date for recurrent event expansion (inclusive). */
def expandStart(start: DateTime): this.type =
addParam("recurrence-expansion-start", start.toString)
/** End date for recurrent event expansion (exclusive). */
def expandEnd(end: DateTime): this.type =
addParam("recurrence-expansion-end", end.toString)
/** Indicates whether recurring events should be expanded or represented as a single event. */
def singleEvents(b: Boolean): this.type =
addParam("singleevents", String.valueOf(b))
/**
* Specifies direction of sorting. Valid values are 'ascend', 'ascending', 'a' and
* 'descend', 'descending', 'd'.
*/
def order(direction: String): this.type =
addParam("sortorder", direction)
/**
* Together with start-max creates a timespan such that only events that are
* within the timespan are returned. If not specified, default start-min is
* 1970-01-01. This bound is inclusive.
*/
def startMin(start: DateTime): this.type =
addParam("start-min", start.toString)
/**
* Together with start-min creates a timespan such that only events that are
* within the timespan are returned. If not specified, default start-max is
* 2031-01-01. This bound is exclusive.
*/
def startMax(end: DateTime): this.type =
addParam("start-max", end.toString)
/**
* Return events who start in the given interval. The interval is inclusive at the
* beginning and exclusive at the end.
*/
def startBetween(start: DateTime, end: DateTime): this.type = {
startMin(start)
startMax(end)
}
/**
* The current timezone. If not specified, times are returned in UTC.
* Replace all spaces with underscores (e.g. "ctz=America/Los_Angeles").
*/
def timeZone(tz: String): this.type =
addParam("ctz", tz)
}
|
mjanson/gdata-scala-client
|
src/com/google/gdata/calendar/CalendarQuery.scala
|
Scala
|
apache-2.0
| 3,477
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.api.python
import java.io._
import java.net._
import java.nio.charset.StandardCharsets
import java.nio.charset.StandardCharsets.UTF_8
import java.nio.file.{Files => JavaFiles, Path}
import java.util.concurrent.ConcurrentHashMap
import java.util.concurrent.atomic.AtomicBoolean
import scala.collection.JavaConverters._
import scala.util.control.NonFatal
import org.apache.spark._
import org.apache.spark.internal.Logging
import org.apache.spark.internal.config.{BUFFER_SIZE, EXECUTOR_CORES}
import org.apache.spark.internal.config.Python._
import org.apache.spark.resource.ResourceProfile.{EXECUTOR_CORES_LOCAL_PROPERTY, PYSPARK_MEMORY_LOCAL_PROPERTY}
import org.apache.spark.security.SocketAuthHelper
import org.apache.spark.util._
/**
* Enumerate the type of command that will be sent to the Python worker
*/
private[spark] object PythonEvalType {
val NON_UDF = 0
val SQL_BATCHED_UDF = 100
val SQL_SCALAR_PANDAS_UDF = 200
val SQL_GROUPED_MAP_PANDAS_UDF = 201
val SQL_GROUPED_AGG_PANDAS_UDF = 202
val SQL_WINDOW_AGG_PANDAS_UDF = 203
val SQL_SCALAR_PANDAS_ITER_UDF = 204
val SQL_MAP_PANDAS_ITER_UDF = 205
val SQL_COGROUPED_MAP_PANDAS_UDF = 206
val SQL_MAP_ARROW_ITER_UDF = 207
def toString(pythonEvalType: Int): String = pythonEvalType match {
case NON_UDF => "NON_UDF"
case SQL_BATCHED_UDF => "SQL_BATCHED_UDF"
case SQL_SCALAR_PANDAS_UDF => "SQL_SCALAR_PANDAS_UDF"
case SQL_GROUPED_MAP_PANDAS_UDF => "SQL_GROUPED_MAP_PANDAS_UDF"
case SQL_GROUPED_AGG_PANDAS_UDF => "SQL_GROUPED_AGG_PANDAS_UDF"
case SQL_WINDOW_AGG_PANDAS_UDF => "SQL_WINDOW_AGG_PANDAS_UDF"
case SQL_SCALAR_PANDAS_ITER_UDF => "SQL_SCALAR_PANDAS_ITER_UDF"
case SQL_MAP_PANDAS_ITER_UDF => "SQL_MAP_PANDAS_ITER_UDF"
case SQL_COGROUPED_MAP_PANDAS_UDF => "SQL_COGROUPED_MAP_PANDAS_UDF"
case SQL_MAP_ARROW_ITER_UDF => "SQL_MAP_ARROW_ITER_UDF"
}
}
private object BasePythonRunner {
private lazy val faultHandlerLogDir = Utils.createTempDir(namePrefix = "faulthandler")
private def faultHandlerLogPath(pid: Int): Path = {
new File(faultHandlerLogDir, pid.toString).toPath
}
}
/**
* A helper class to run Python mapPartition/UDFs in Spark.
*
* funcs is a list of independent Python functions, each one of them is a list of chained Python
* functions (from bottom to top).
*/
private[spark] abstract class BasePythonRunner[IN, OUT](
funcs: Seq[ChainedPythonFunctions],
evalType: Int,
argOffsets: Array[Array[Int]])
extends Logging {
require(funcs.length == argOffsets.length, "argOffsets should have the same length as funcs")
private val conf = SparkEnv.get.conf
protected val bufferSize: Int = conf.get(BUFFER_SIZE)
protected val authSocketTimeout = conf.get(PYTHON_AUTH_SOCKET_TIMEOUT)
private val reuseWorker = conf.get(PYTHON_WORKER_REUSE)
private val faultHandlerEnabled = conf.get(PYTHON_WORKER_FAULTHANLDER_ENABLED)
protected val simplifiedTraceback: Boolean = false
// All the Python functions should have the same exec, version and envvars.
protected val envVars: java.util.Map[String, String] = funcs.head.funcs.head.envVars
protected val pythonExec: String = funcs.head.funcs.head.pythonExec
protected val pythonVer: String = funcs.head.funcs.head.pythonVer
// TODO: support accumulator in multiple UDF
protected val accumulator: PythonAccumulatorV2 = funcs.head.funcs.head.accumulator
// Python accumulator is always set in production except in tests. See SPARK-27893
private val maybeAccumulator: Option[PythonAccumulatorV2] = Option(accumulator)
// Expose a ServerSocket to support method calls via socket from Python side.
private[spark] var serverSocket: Option[ServerSocket] = None
// Authentication helper used when serving method calls via socket from Python side.
private lazy val authHelper = new SocketAuthHelper(conf)
// each python worker gets an equal part of the allocation. the worker pool will grow to the
// number of concurrent tasks, which is determined by the number of cores in this executor.
private def getWorkerMemoryMb(mem: Option[Long], cores: Int): Option[Long] = {
mem.map(_ / cores)
}
def compute(
inputIterator: Iterator[IN],
partitionIndex: Int,
context: TaskContext): Iterator[OUT] = {
val startTime = System.currentTimeMillis
val env = SparkEnv.get
// Get the executor cores and pyspark memory, they are passed via the local properties when
// the user specified them in a ResourceProfile.
val execCoresProp = Option(context.getLocalProperty(EXECUTOR_CORES_LOCAL_PROPERTY))
val memoryMb = Option(context.getLocalProperty(PYSPARK_MEMORY_LOCAL_PROPERTY)).map(_.toLong)
val localdir = env.blockManager.diskBlockManager.localDirs.map(f => f.getPath()).mkString(",")
// if OMP_NUM_THREADS is not explicitly set, override it with the number of cores
if (conf.getOption("spark.executorEnv.OMP_NUM_THREADS").isEmpty) {
// SPARK-28843: limit the OpenMP thread pool to the number of cores assigned to this executor
// this avoids high memory consumption with pandas/numpy because of a large OpenMP thread pool
// see https://github.com/numpy/numpy/issues/10455
execCoresProp.foreach(envVars.put("OMP_NUM_THREADS", _))
}
envVars.put("SPARK_LOCAL_DIRS", localdir) // it's also used in monitor thread
if (reuseWorker) {
envVars.put("SPARK_REUSE_WORKER", "1")
}
if (simplifiedTraceback) {
envVars.put("SPARK_SIMPLIFIED_TRACEBACK", "1")
}
// SPARK-30299 this could be wrong with standalone mode when executor
// cores might not be correct because it defaults to all cores on the box.
val execCores = execCoresProp.map(_.toInt).getOrElse(conf.get(EXECUTOR_CORES))
val workerMemoryMb = getWorkerMemoryMb(memoryMb, execCores)
if (workerMemoryMb.isDefined) {
envVars.put("PYSPARK_EXECUTOR_MEMORY_MB", workerMemoryMb.get.toString)
}
envVars.put("SPARK_AUTH_SOCKET_TIMEOUT", authSocketTimeout.toString)
envVars.put("SPARK_BUFFER_SIZE", bufferSize.toString)
if (faultHandlerEnabled) {
envVars.put("PYTHON_FAULTHANDLER_DIR", BasePythonRunner.faultHandlerLogDir.toString)
}
val (worker: Socket, pid: Option[Int]) = env.createPythonWorker(
pythonExec, envVars.asScala.toMap)
// Whether is the worker released into idle pool or closed. When any codes try to release or
// close a worker, they should use `releasedOrClosed.compareAndSet` to flip the state to make
// sure there is only one winner that is going to release or close the worker.
val releasedOrClosed = new AtomicBoolean(false)
// Start a thread to feed the process input from our parent's iterator
val writerThread = newWriterThread(env, worker, inputIterator, partitionIndex, context)
context.addTaskCompletionListener[Unit] { _ =>
writerThread.shutdownOnTaskCompletion()
if (!reuseWorker || releasedOrClosed.compareAndSet(false, true)) {
try {
worker.close()
} catch {
case e: Exception =>
logWarning("Failed to close worker socket", e)
}
}
}
writerThread.start()
if (reuseWorker) {
val key = (worker, context.taskAttemptId)
// SPARK-35009: avoid creating multiple monitor threads for the same python worker
// and task context
if (PythonRunner.runningMonitorThreads.add(key)) {
new MonitorThread(SparkEnv.get, worker, context).start()
}
} else {
new MonitorThread(SparkEnv.get, worker, context).start()
}
// Return an iterator that read lines from the process's stdout
val stream = new DataInputStream(new BufferedInputStream(worker.getInputStream, bufferSize))
val stdoutIterator = newReaderIterator(
stream, writerThread, startTime, env, worker, pid, releasedOrClosed, context)
new InterruptibleIterator(context, stdoutIterator)
}
protected def newWriterThread(
env: SparkEnv,
worker: Socket,
inputIterator: Iterator[IN],
partitionIndex: Int,
context: TaskContext): WriterThread
protected def newReaderIterator(
stream: DataInputStream,
writerThread: WriterThread,
startTime: Long,
env: SparkEnv,
worker: Socket,
pid: Option[Int],
releasedOrClosed: AtomicBoolean,
context: TaskContext): Iterator[OUT]
/**
* The thread responsible for writing the data from the PythonRDD's parent iterator to the
* Python process.
*/
abstract class WriterThread(
env: SparkEnv,
worker: Socket,
inputIterator: Iterator[IN],
partitionIndex: Int,
context: TaskContext)
extends Thread(s"stdout writer for $pythonExec") {
@volatile private var _exception: Throwable = null
private val pythonIncludes = funcs.flatMap(_.funcs.flatMap(_.pythonIncludes.asScala)).toSet
private val broadcastVars = funcs.flatMap(_.funcs.flatMap(_.broadcastVars.asScala))
setDaemon(true)
/** Contains the throwable thrown while writing the parent iterator to the Python process. */
def exception: Option[Throwable] = Option(_exception)
/**
* Terminates the writer thread and waits for it to exit, ignoring any exceptions that may occur
* due to cleanup.
*/
def shutdownOnTaskCompletion(): Unit = {
assert(context.isCompleted)
this.interrupt()
// Task completion listeners that run after this method returns may invalidate
// `inputIterator`. For example, when `inputIterator` was generated by the off-heap vectorized
// reader, a task completion listener will free the underlying off-heap buffers. If the writer
// thread is still running when `inputIterator` is invalidated, it can cause a use-after-free
// bug that crashes the executor (SPARK-33277). Therefore this method must wait for the writer
// thread to exit before returning.
this.join()
}
/**
* Writes a command section to the stream connected to the Python worker.
*/
protected def writeCommand(dataOut: DataOutputStream): Unit
/**
* Writes input data to the stream connected to the Python worker.
*/
protected def writeIteratorToStream(dataOut: DataOutputStream): Unit
override def run(): Unit = Utils.logUncaughtExceptions {
try {
TaskContext.setTaskContext(context)
val stream = new BufferedOutputStream(worker.getOutputStream, bufferSize)
val dataOut = new DataOutputStream(stream)
// Partition index
dataOut.writeInt(partitionIndex)
// Python version of driver
PythonRDD.writeUTF(pythonVer, dataOut)
// Init a ServerSocket to accept method calls from Python side.
val isBarrier = context.isInstanceOf[BarrierTaskContext]
if (isBarrier) {
serverSocket = Some(new ServerSocket(/* port */ 0,
/* backlog */ 1,
InetAddress.getByName("localhost")))
// A call to accept() for ServerSocket shall block infinitely.
serverSocket.foreach(_.setSoTimeout(0))
new Thread("accept-connections") {
setDaemon(true)
override def run(): Unit = {
while (!serverSocket.get.isClosed()) {
var sock: Socket = null
try {
sock = serverSocket.get.accept()
// Wait for function call from python side.
sock.setSoTimeout(10000)
authHelper.authClient(sock)
val input = new DataInputStream(sock.getInputStream())
val requestMethod = input.readInt()
// The BarrierTaskContext function may wait infinitely, socket shall not timeout
// before the function finishes.
sock.setSoTimeout(0)
requestMethod match {
case BarrierTaskContextMessageProtocol.BARRIER_FUNCTION =>
barrierAndServe(requestMethod, sock)
case BarrierTaskContextMessageProtocol.ALL_GATHER_FUNCTION =>
val length = input.readInt()
val message = new Array[Byte](length)
input.readFully(message)
barrierAndServe(requestMethod, sock, new String(message, UTF_8))
case _ =>
val out = new DataOutputStream(new BufferedOutputStream(
sock.getOutputStream))
writeUTF(BarrierTaskContextMessageProtocol.ERROR_UNRECOGNIZED_FUNCTION, out)
}
} catch {
case e: SocketException if e.getMessage.contains("Socket closed") =>
// It is possible that the ServerSocket is not closed, but the native socket
// has already been closed, we shall catch and silently ignore this case.
} finally {
if (sock != null) {
sock.close()
}
}
}
}
}.start()
}
val secret = if (isBarrier) {
authHelper.secret
} else {
""
}
// Close ServerSocket on task completion.
serverSocket.foreach { server =>
context.addTaskCompletionListener[Unit](_ => server.close())
}
val boundPort: Int = serverSocket.map(_.getLocalPort).getOrElse(0)
if (boundPort == -1) {
val message = "ServerSocket failed to bind to Java side."
logError(message)
throw new SparkException(message)
} else if (isBarrier) {
logDebug(s"Started ServerSocket on port $boundPort.")
}
// Write out the TaskContextInfo
dataOut.writeBoolean(isBarrier)
dataOut.writeInt(boundPort)
val secretBytes = secret.getBytes(UTF_8)
dataOut.writeInt(secretBytes.length)
dataOut.write(secretBytes, 0, secretBytes.length)
dataOut.writeInt(context.stageId())
dataOut.writeInt(context.partitionId())
dataOut.writeInt(context.attemptNumber())
dataOut.writeLong(context.taskAttemptId())
dataOut.writeInt(context.cpus())
val resources = context.resources()
dataOut.writeInt(resources.size)
resources.foreach { case (k, v) =>
PythonRDD.writeUTF(k, dataOut)
PythonRDD.writeUTF(v.name, dataOut)
dataOut.writeInt(v.addresses.size)
v.addresses.foreach { case addr =>
PythonRDD.writeUTF(addr, dataOut)
}
}
val localProps = context.getLocalProperties.asScala
dataOut.writeInt(localProps.size)
localProps.foreach { case (k, v) =>
PythonRDD.writeUTF(k, dataOut)
PythonRDD.writeUTF(v, dataOut)
}
// sparkFilesDir
PythonRDD.writeUTF(SparkFiles.getRootDirectory(), dataOut)
// Python includes (*.zip and *.egg files)
dataOut.writeInt(pythonIncludes.size)
for (include <- pythonIncludes) {
PythonRDD.writeUTF(include, dataOut)
}
// Broadcast variables
val oldBids = PythonRDD.getWorkerBroadcasts(worker)
val newBids = broadcastVars.map(_.id).toSet
// number of different broadcasts
val toRemove = oldBids.diff(newBids)
val addedBids = newBids.diff(oldBids)
val cnt = toRemove.size + addedBids.size
val needsDecryptionServer = env.serializerManager.encryptionEnabled && addedBids.nonEmpty
dataOut.writeBoolean(needsDecryptionServer)
dataOut.writeInt(cnt)
def sendBidsToRemove(): Unit = {
for (bid <- toRemove) {
// remove the broadcast from worker
dataOut.writeLong(-bid - 1) // bid >= 0
oldBids.remove(bid)
}
}
if (needsDecryptionServer) {
// if there is encryption, we setup a server which reads the encrypted files, and sends
// the decrypted data to python
val idsAndFiles = broadcastVars.flatMap { broadcast =>
if (!oldBids.contains(broadcast.id)) {
Some((broadcast.id, broadcast.value.path))
} else {
None
}
}
val server = new EncryptedPythonBroadcastServer(env, idsAndFiles)
dataOut.writeInt(server.port)
logTrace(s"broadcast decryption server setup on ${server.port}")
PythonRDD.writeUTF(server.secret, dataOut)
sendBidsToRemove()
idsAndFiles.foreach { case (id, _) =>
// send new broadcast
dataOut.writeLong(id)
oldBids.add(id)
}
dataOut.flush()
logTrace("waiting for python to read decrypted broadcast data from server")
server.waitTillBroadcastDataSent()
logTrace("done sending decrypted data to python")
} else {
sendBidsToRemove()
for (broadcast <- broadcastVars) {
if (!oldBids.contains(broadcast.id)) {
// send new broadcast
dataOut.writeLong(broadcast.id)
PythonRDD.writeUTF(broadcast.value.path, dataOut)
oldBids.add(broadcast.id)
}
}
}
dataOut.flush()
dataOut.writeInt(evalType)
writeCommand(dataOut)
writeIteratorToStream(dataOut)
dataOut.writeInt(SpecialLengths.END_OF_STREAM)
dataOut.flush()
} catch {
case t: Throwable if (NonFatal(t) || t.isInstanceOf[Exception]) =>
if (context.isCompleted || context.isInterrupted) {
logDebug("Exception/NonFatal Error thrown after task completion (likely due to " +
"cleanup)", t)
if (!worker.isClosed) {
Utils.tryLog(worker.shutdownOutput())
}
} else {
// We must avoid throwing exceptions/NonFatals here, because the thread uncaught
// exception handler will kill the whole executor (see
// org.apache.spark.executor.Executor).
_exception = t
if (!worker.isClosed) {
Utils.tryLog(worker.shutdownOutput())
}
}
}
}
/**
* Gateway to call BarrierTaskContext methods.
*/
def barrierAndServe(requestMethod: Int, sock: Socket, message: String = ""): Unit = {
require(
serverSocket.isDefined,
"No available ServerSocket to redirect the BarrierTaskContext method call."
)
val out = new DataOutputStream(new BufferedOutputStream(sock.getOutputStream))
try {
val messages = requestMethod match {
case BarrierTaskContextMessageProtocol.BARRIER_FUNCTION =>
context.asInstanceOf[BarrierTaskContext].barrier()
Array(BarrierTaskContextMessageProtocol.BARRIER_RESULT_SUCCESS)
case BarrierTaskContextMessageProtocol.ALL_GATHER_FUNCTION =>
context.asInstanceOf[BarrierTaskContext].allGather(message)
}
out.writeInt(messages.length)
messages.foreach(writeUTF(_, out))
} catch {
case e: SparkException =>
writeUTF(e.getMessage, out)
} finally {
out.close()
}
}
def writeUTF(str: String, dataOut: DataOutputStream): Unit = {
val bytes = str.getBytes(UTF_8)
dataOut.writeInt(bytes.length)
dataOut.write(bytes)
}
}
abstract class ReaderIterator(
stream: DataInputStream,
writerThread: WriterThread,
startTime: Long,
env: SparkEnv,
worker: Socket,
pid: Option[Int],
releasedOrClosed: AtomicBoolean,
context: TaskContext)
extends Iterator[OUT] {
private var nextObj: OUT = _
private var eos = false
override def hasNext: Boolean = nextObj != null || {
if (!eos) {
nextObj = read()
hasNext
} else {
false
}
}
override def next(): OUT = {
if (hasNext) {
val obj = nextObj
nextObj = null.asInstanceOf[OUT]
obj
} else {
Iterator.empty.next()
}
}
/**
* Reads next object from the stream.
* When the stream reaches end of data, needs to process the following sections,
* and then returns null.
*/
protected def read(): OUT
protected def handleTimingData(): Unit = {
// Timing data from worker
val bootTime = stream.readLong()
val initTime = stream.readLong()
val finishTime = stream.readLong()
val boot = bootTime - startTime
val init = initTime - bootTime
val finish = finishTime - initTime
val total = finishTime - startTime
logInfo("Times: total = %s, boot = %s, init = %s, finish = %s".format(total, boot,
init, finish))
val memoryBytesSpilled = stream.readLong()
val diskBytesSpilled = stream.readLong()
context.taskMetrics.incMemoryBytesSpilled(memoryBytesSpilled)
context.taskMetrics.incDiskBytesSpilled(diskBytesSpilled)
}
protected def handlePythonException(): PythonException = {
// Signals that an exception has been thrown in python
val exLength = stream.readInt()
val obj = new Array[Byte](exLength)
stream.readFully(obj)
new PythonException(new String(obj, StandardCharsets.UTF_8),
writerThread.exception.getOrElse(null))
}
protected def handleEndOfDataSection(): Unit = {
// We've finished the data section of the output, but we can still
// read some accumulator updates:
val numAccumulatorUpdates = stream.readInt()
(1 to numAccumulatorUpdates).foreach { _ =>
val updateLen = stream.readInt()
val update = new Array[Byte](updateLen)
stream.readFully(update)
maybeAccumulator.foreach(_.add(update))
}
// Check whether the worker is ready to be re-used.
if (stream.readInt() == SpecialLengths.END_OF_STREAM) {
if (reuseWorker && releasedOrClosed.compareAndSet(false, true)) {
env.releasePythonWorker(pythonExec, envVars.asScala.toMap, worker)
}
}
eos = true
}
protected val handleException: PartialFunction[Throwable, OUT] = {
case e: Exception if context.isInterrupted =>
logDebug("Exception thrown after task interruption", e)
throw new TaskKilledException(context.getKillReason().getOrElse("unknown reason"))
case e: Exception if writerThread.exception.isDefined =>
logError("Python worker exited unexpectedly (crashed)", e)
logError("This may have been caused by a prior exception:", writerThread.exception.get)
throw writerThread.exception.get
case eof: EOFException if faultHandlerEnabled && pid.isDefined &&
JavaFiles.exists(BasePythonRunner.faultHandlerLogPath(pid.get)) =>
val path = BasePythonRunner.faultHandlerLogPath(pid.get)
val error = String.join("\\n", JavaFiles.readAllLines(path)) + "\\n"
JavaFiles.deleteIfExists(path)
throw new SparkException(s"Python worker exited unexpectedly (crashed): $error", eof)
case eof: EOFException =>
throw new SparkException("Python worker exited unexpectedly (crashed)", eof)
}
}
/**
* It is necessary to have a monitor thread for python workers if the user cancels with
* interrupts disabled. In that case we will need to explicitly kill the worker, otherwise the
* threads can block indefinitely.
*/
class MonitorThread(env: SparkEnv, worker: Socket, context: TaskContext)
extends Thread(s"Worker Monitor for $pythonExec") {
/** How long to wait before killing the python worker if a task cannot be interrupted. */
private val taskKillTimeout = env.conf.get(PYTHON_TASK_KILL_TIMEOUT)
setDaemon(true)
private def monitorWorker(): Unit = {
// Kill the worker if it is interrupted, checking until task completion.
// TODO: This has a race condition if interruption occurs, as completed may still become true.
while (!context.isInterrupted && !context.isCompleted) {
Thread.sleep(2000)
}
if (!context.isCompleted) {
Thread.sleep(taskKillTimeout)
if (!context.isCompleted) {
try {
// Mimic the task name used in `Executor` to help the user find out the task to blame.
val taskName = s"${context.partitionId}.${context.attemptNumber} " +
s"in stage ${context.stageId} (TID ${context.taskAttemptId})"
logWarning(s"Incomplete task $taskName interrupted: Attempting to kill Python Worker")
env.destroyPythonWorker(pythonExec, envVars.asScala.toMap, worker)
} catch {
case e: Exception =>
logError("Exception when trying to kill worker", e)
}
}
}
}
override def run(): Unit = {
try {
monitorWorker()
} finally {
if (reuseWorker) {
val key = (worker, context.taskAttemptId)
PythonRunner.runningMonitorThreads.remove(key)
}
}
}
}
}
private[spark] object PythonRunner {
// already running worker monitor threads for worker and task attempts ID pairs
val runningMonitorThreads = ConcurrentHashMap.newKeySet[(Socket, Long)]()
def apply(func: PythonFunction): PythonRunner = {
new PythonRunner(Seq(ChainedPythonFunctions(Seq(func))))
}
}
/**
* A helper class to run Python mapPartition in Spark.
*/
private[spark] class PythonRunner(funcs: Seq[ChainedPythonFunctions])
extends BasePythonRunner[Array[Byte], Array[Byte]](
funcs, PythonEvalType.NON_UDF, Array(Array(0))) {
protected override def newWriterThread(
env: SparkEnv,
worker: Socket,
inputIterator: Iterator[Array[Byte]],
partitionIndex: Int,
context: TaskContext): WriterThread = {
new WriterThread(env, worker, inputIterator, partitionIndex, context) {
protected override def writeCommand(dataOut: DataOutputStream): Unit = {
val command = funcs.head.funcs.head.command
dataOut.writeInt(command.length)
dataOut.write(command.toArray)
}
protected override def writeIteratorToStream(dataOut: DataOutputStream): Unit = {
PythonRDD.writeIteratorToStream(inputIterator, dataOut)
dataOut.writeInt(SpecialLengths.END_OF_DATA_SECTION)
}
}
}
protected override def newReaderIterator(
stream: DataInputStream,
writerThread: WriterThread,
startTime: Long,
env: SparkEnv,
worker: Socket,
pid: Option[Int],
releasedOrClosed: AtomicBoolean,
context: TaskContext): Iterator[Array[Byte]] = {
new ReaderIterator(
stream, writerThread, startTime, env, worker, pid, releasedOrClosed, context) {
protected override def read(): Array[Byte] = {
if (writerThread.exception.isDefined) {
throw writerThread.exception.get
}
try {
stream.readInt() match {
case length if length > 0 =>
val obj = new Array[Byte](length)
stream.readFully(obj)
obj
case 0 => Array.emptyByteArray
case SpecialLengths.TIMING_DATA =>
handleTimingData()
read()
case SpecialLengths.PYTHON_EXCEPTION_THROWN =>
throw handlePythonException()
case SpecialLengths.END_OF_DATA_SECTION =>
handleEndOfDataSection()
null
}
} catch handleException
}
}
}
}
private[spark] object SpecialLengths {
val END_OF_DATA_SECTION = -1
val PYTHON_EXCEPTION_THROWN = -2
val TIMING_DATA = -3
val END_OF_STREAM = -4
val NULL = -5
val START_ARROW_STREAM = -6
}
private[spark] object BarrierTaskContextMessageProtocol {
val BARRIER_FUNCTION = 1
val ALL_GATHER_FUNCTION = 2
val BARRIER_RESULT_SUCCESS = "success"
val ERROR_UNRECOGNIZED_FUNCTION = "Not recognized function call from python side."
}
|
ueshin/apache-spark
|
core/src/main/scala/org/apache/spark/api/python/PythonRunner.scala
|
Scala
|
apache-2.0
| 28,873
|
/*
* Copyright 2014–2017 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.api.services
import slamdata.Predef.{ -> => _, _ }
import quasar.api._
import quasar.api.ApiError._
import quasar.api.ToQResponse._
import quasar.api.ToQResponse.ops._
import quasar.contrib.pathy._
import quasar.effect.Failure
import quasar.fp._, numeric._
import quasar.fs.mount.module.Module
import quasar.sql.fixParser
import org.http4s.dsl._
import org.http4s.headers.Accept
import pathy.Path._
import scalaz._, Scalaz._
import scalaz.concurrent.Task
object invoke {
def service[S[_]](
implicit
I: Module.Ops[S],
S0: Task :<: S,
S1: Failure[Module.Error, ?] :<: S
): QHttpService[S] = QHttpService {
case req @ GET -> AsPath(path) :? Offset(offsetParam) +& Limit(limitParam) =>
respond {
(offsetOrInvalid(offsetParam) |@| limitOrInvalid(limitParam)) { (offset, limit) =>
refineType(path).fold(
dir => apiError(BadRequest withReason "Path must be a file").toResponse[S].point[Free[S, ?]],
file => {
val requestedFormat = MessageFormat.fromAccept(req.headers.get(Accept))
val relevantParams = req.params - "offset" - "limit"
invoke[S](requestedFormat, file, relevantParams, offset, limit)
})
}.sequence
}
}
////
private def invoke[S[_]](
format: MessageFormat,
filePath: AFile,
args: Map[String, String],
offset: Natural,
limit: Option[Positive]
)(implicit
I: Module.Ops[S],
S0: Failure[Module.Error, ?] :<: S,
S1: Task :<: S
): Free[S, QResponse[S]] =
args.traverse(fixParser.parseExpr).fold(
parseError => parseError.toResponse[S].point[Free[S, ?]],
parsedArgs => formattedDataResponse(format, I.invokeFunction(filePath, parsedArgs, offset, limit)))
}
|
drostron/quasar
|
web/src/main/scala/quasar/api/services/invoke.scala
|
Scala
|
apache-2.0
| 2,381
|
package xi.armatweet.nlp
import edu.stanford.nlp.ie.util.RelationTriple
import edu.stanford.nlp.ling.{CoreAnnotations, IndexedWord}
import edu.stanford.nlp.semgraph.SemanticGraph
import edu.stanford.nlp.trees.UniversalEnglishGrammaticalRelations
import net.sf.extjwnl.data.{POS, Synset}
import net.sf.extjwnl.dictionary.Dictionary
import scala.collection.JavaConversions._
import scala.collection.mutable.ListBuffer
/**
* Created by alberto on 20/05/16.
*/
class VerbLinker {
private val dict = Dictionary.getDefaultResourceInstance
private val NtRE = "n['`’]t$".r
private val WordRE = "[\\w '`’]+".r
def bestSynsetFor(verb: String): Option[(String, Synset)] = {
val wnWord_tmp = dict.getIndexWord(POS.VERB, verb)
if (wnWord_tmp == null) {
None
} else {
wnWord_tmp.sortSenses()
val bestSense = wnWord_tmp.getSenses
if (bestSense.isEmpty)
None
else {
Some((wnWord_tmp.getLemma, bestSense(0)))
}
} //if
} //bestSynsetFor
def linkVerb(tweetWithPOS: Array[(String, String)], idx: Int): Option[(String, Synset)] = {
val v = tweetWithPOS(idx)._1
val baseVerb = NtRE.replaceAllIn(v, "")
val currentBest = bestSynsetFor(baseVerb)
val bestSynset = currentBest match {
//composite verb?
case Some((verbMorphy, oneWordSset)) if (idx + 1 < tweetWithPOS.length && tweetWithPOS(idx + 1)._2 == "T") =>
val compositeVerb = verbMorphy + "_" + tweetWithPOS(idx + 1)._1
val compositeSynset = bestSynsetFor(compositeVerb)
if (compositeSynset.nonEmpty)
compositeSynset
else {
Some((verbMorphy, oneWordSset))
}
// not composite this time
case Some(verbLinked) =>
Some(verbLinked)
case None => None
} //case
bestSynset
} //linkVerb
def isAuxiliaryVerb(iw: IndexedWord, semanticGraph: SemanticGraph): Boolean = {
val parent = semanticGraph.getParent(iw)
if (parent == null)
false
else {
val relation = semanticGraph.getEdge(parent, iw).getRelation
relation == UniversalEnglishGrammaticalRelations.AUX_MODIFIER ||
relation == UniversalEnglishGrammaticalRelations.AUX_PASSIVE_MODIFIER ||
relation == UniversalEnglishGrammaticalRelations.COPULA
}
} //isAuxiliaryVerb
/**
*
* @param verb a non auxiliary verb (see isAuxiliaryVerb)
* @param semanticGraph
* @return an entry ((offset start, offset end), option(synset))
* IMPORTANT: the offset refer to the cleaned version of the tweet
* and need to be converted.
*/
def linkVerb_coreNLP(verb: IndexedWord, semanticGraph: SemanticGraph) = {
val phrasalParticles = semanticGraph.getChildrenWithReln(verb,
UniversalEnglishGrammaticalRelations.PHRASAL_VERB_PARTICLE)
// also check adverbial modifiers for phrasal verbs
// phrasalParticles = if (phrasalParticles.nonEmpty) phrasalParticles
// else
// semanticGraph.getChildrenWithReln(verb, UniversalEnglishGrammaticalRelations.ADVERBIAL_MODIFIER)
val verbMorphy = verb.lemma()
val phrasalLinked = phrasalParticles.map { phrasal =>
val offset = if (phrasal.index() - verb.index() == 1)
(verb.beginPosition(), phrasal.endPosition())
else
(verb.beginPosition(), verb.endPosition())
val phrasalVerb = verbMorphy + "_" + phrasal.lemma()
// println(s"trying to link '$phrasalVerb'")
val bestSynset = bestSynsetFor(phrasalVerb)
// if (bestSynset.nonEmpty) println("success")
// else println("failed")
(offset, bestSynset)
}.filter(_._2.nonEmpty)
// if some phrasal version of the verb was linked, return it (there should be only one)
// otherwise, link the base verb.
if (phrasalLinked.nonEmpty) {
phrasalLinked.head
} else {
((verb.beginPosition(), verb.endPosition()), bestSynsetFor(verbMorphy))
} //if
} //linkVerb_coreNLP
/**
* offsets are relative to the original version of the tweet.
*
* @param tweet
* @param semanticGraph
* @return
*/
def linkVerbs_offsets_coreNLP(tweet: Tweet, semanticGraph: SemanticGraph): IndexedSeq[Option[(Int, Int, String, Synset)]] = {
val allVerbs = semanticGraph.getAllNodesByPartOfSpeechPattern("V.*")
// don't link auxiliary verbs
val toLink = allVerbs.filterNot(iw => isAuxiliaryVerb(iw, semanticGraph))
val linked = toLink
.map { verb => linkVerb_coreNLP(verb, semanticGraph) }
.filter(_._2.nonEmpty)
val withOffsetConverted = linked.map { case ((offB, offE), Some((str, synset))) =>
// println(s"cleaned tweet offsets: $offB, $offE")
val convertedOffset = tweet.toOriginalStringOffset(offB, offE)
convertedOffset match {
case Some((b, e)) => Some(b, e, str, synset)
case None => Some(-1, -1, str, synset)
}
}
withOffsetConverted.toIndexedSeq
} //linkVerbs_offset_coreNLP
/**
* Links a relation returned by the stanford OpenIE annotator
*
* @param triple
*/
def linkRelation(triple: RelationTriple): Option[(String, Synset)] = {
val verbs = triple.relation
.map { cr => (cr, cr.get(classOf[CoreAnnotations.PartOfSpeechAnnotation])) }
.filter(_._2.startsWith("V"))
if (verbs.isEmpty) return None
assert(verbs.length < 2)
//assume there is only one verb
val verb = verbs.head._1
val baseVerb = if (verb.lemma() == null) NtRE.replaceAllIn(verb.originalText(), "") else verb.lemma()
if (triple.asDependencyTree().isPresent) {
// use the dependency graph to extracth phrasal verbs (e.g. "passed away")
val dependencies = triple.asDependencyTree().get()
val relationIndexes = triple.relation.map(_.index()).toSet
// do lemmatization
// val currentBest = bestSynsetFor(baseVerb)
val verbNode = dependencies.getNodeByIndex(verb.index())
val phrasalVerbs = dependencies.getChildren(verbNode)
.filter(w => relationIndexes.contains(w.index()))
.map { w => baseVerb + "_" + w.originalText() }
val phrasalSynsets = phrasalVerbs.map { pv => bestSynsetFor(pv) }.filter(_.nonEmpty)
if (phrasalSynsets.nonEmpty) phrasalSynsets.head
else bestSynsetFor(baseVerb)
} else {
// just link the first verb
bestSynsetFor(baseVerb)
} //if
} //linkRelation
def rdfWordnetId(synset: Synset): String = {
f"<http://wordnet-rdf.princeton.edu/wn31/2${
synset.getOffset
}%08d-v>"
} //rdfWordnetId
val RDFSynsetRE = "<?http://wordnet-rdf.princeton.edu/wn31/2(\\d+)-v>?".r
def rdfIdToSynset(wnetUri: String): Synset = {
wnetUri match {
case RDFSynsetRE(offset) =>
dict.getSynsetAt(POS.VERB, offset.toLong)
}
}
/**
*
* @param tweetWithPOS
* @return a list of pairs (lemma_verb, synset), where
* lemma_verb is the lemmatised version of the surface
* form of the verb.
*/
def linkVerbsInTweet(tweetWithPOS: Array[(String, String)]) = {
val allVerbs = ListBuffer[(String, String)]()
val verbsIdx = tweetWithPOS
.zipWithIndex.filter {
case ((text, pos), idx) => pos == "V" && (text match {
case WordRE() => true
case _ => false
})
}
for (((v, pos), idx) <- verbsIdx) {
val synset = linkVerb(tweetWithPOS, idx)
synset match {
case Some((processedVerb, synset)) =>
allVerbs += processedVerb -> rdfWordnetId(synset)
case None => // do nothing
}
} //for
allVerbs.result()
} //linkVerbsInTweet
}
//xi.armatweet.nlp.VerbLinker
|
eXascaleInfolab/2016-armatweet
|
NLP/src/main/scala/xi/armatweet/nlp/VerbLinker.scala
|
Scala
|
mit
| 7,675
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.server
import java.nio.ByteBuffer
import java.lang.{Long => JLong, Short => JShort}
import kafka.admin.AdminUtils
import kafka.api._
import kafka.cluster.Partition
import kafka.common._
import kafka.controller.KafkaController
import kafka.coordinator.{GroupCoordinator, JoinGroupResult}
import kafka.log._
import kafka.message.MessageSet
import kafka.network._
import kafka.network.RequestChannel.{Session, Response}
import kafka.security.auth.{Authorizer, ClusterAction, Group, Create, Describe, Operation, Read, Resource, Topic, Write}
import kafka.utils.{Logging, SystemTime, ZKGroupTopicDirs, ZkUtils}
import org.apache.kafka.common.errors.{InvalidTopicException, NotLeaderForPartitionException, UnknownTopicOrPartitionException,
ClusterAuthorizationException}
import org.apache.kafka.common.metrics.Metrics
import org.apache.kafka.common.protocol.{ApiKeys, Errors, SecurityProtocol}
import org.apache.kafka.common.requests.{ListOffsetRequest, ListOffsetResponse, GroupCoordinatorRequest, GroupCoordinatorResponse, ListGroupsResponse,
DescribeGroupsRequest, DescribeGroupsResponse, HeartbeatRequest, HeartbeatResponse, JoinGroupRequest, JoinGroupResponse,
LeaveGroupRequest, LeaveGroupResponse, ResponseHeader, ResponseSend, SyncGroupRequest, SyncGroupResponse, LeaderAndIsrRequest, LeaderAndIsrResponse,
StopReplicaRequest, StopReplicaResponse}
import org.apache.kafka.common.utils.Utils
import org.apache.kafka.common.{TopicPartition, Node}
import scala.collection._
import scala.collection.JavaConverters._
/**
* Logic to handle the various Kafka requests
*/
class KafkaApis(val requestChannel: RequestChannel,
val replicaManager: ReplicaManager,
val coordinator: GroupCoordinator,
val controller: KafkaController,
val zkUtils: ZkUtils,
val brokerId: Int,
val config: KafkaConfig,
val metadataCache: MetadataCache,
val metrics: Metrics,
val authorizer: Option[Authorizer]) extends Logging {
this.logIdent = "[KafkaApi-%d] ".format(brokerId)
// Store all the quota managers for each type of request
val quotaManagers: Map[Short, ClientQuotaManager] = instantiateQuotaManagers(config)
/**
* Top-level method that handles all requests and multiplexes to the right api
*/
def handle(request: RequestChannel.Request) {
try{
trace("Handling request:%s from connection %s;securityProtocol:%s,principal:%s".
format(request.requestObj, request.connectionId, request.securityProtocol, request.session.principal))
ApiKeys.forId(request.requestId) match {
case ApiKeys.PRODUCE => handleProducerRequest(request)
case ApiKeys.FETCH => handleFetchRequest(request)
case ApiKeys.LIST_OFFSETS => handleOffsetRequest(request)
case ApiKeys.METADATA => handleTopicMetadataRequest(request)
case ApiKeys.LEADER_AND_ISR => handleLeaderAndIsrRequest(request)
case ApiKeys.STOP_REPLICA => handleStopReplicaRequest(request)
case ApiKeys.UPDATE_METADATA_KEY => handleUpdateMetadataRequest(request)
case ApiKeys.CONTROLLED_SHUTDOWN_KEY => handleControlledShutdownRequest(request)
case ApiKeys.OFFSET_COMMIT => handleOffsetCommitRequest(request)
case ApiKeys.OFFSET_FETCH => handleOffsetFetchRequest(request)
case ApiKeys.GROUP_COORDINATOR => handleGroupCoordinatorRequest(request)
case ApiKeys.JOIN_GROUP => handleJoinGroupRequest(request)
case ApiKeys.HEARTBEAT => handleHeartbeatRequest(request)
case ApiKeys.LEAVE_GROUP => handleLeaveGroupRequest(request)
case ApiKeys.SYNC_GROUP => handleSyncGroupRequest(request)
case ApiKeys.DESCRIBE_GROUPS => handleDescribeGroupRequest(request)
case ApiKeys.LIST_GROUPS => handleListGroupsRequest(request)
case requestId => throw new KafkaException("Unknown api code " + requestId)
}
} catch {
case e: Throwable =>
if (request.requestObj != null) {
request.requestObj.handleError(e, requestChannel, request)
error("Error when handling request %s".format(request.requestObj), e)
} else {
val response = request.body.getErrorResponse(request.header.apiVersion, e)
val respHeader = new ResponseHeader(request.header.correlationId)
/* If request doesn't have a default error response, we just close the connection.
For example, when produce request has acks set to 0 */
if (response == null)
requestChannel.closeConnection(request.processor, request)
else
requestChannel.sendResponse(new Response(request, new ResponseSend(request.connectionId, respHeader, response)))
error("Error when handling request %s".format(request.body), e)
}
} finally
request.apiLocalCompleteTimeMs = SystemTime.milliseconds
}
def handleLeaderAndIsrRequest(request: RequestChannel.Request) {
// ensureTopicExists is only for client facing requests
// We can't have the ensureTopicExists check here since the controller sends it as an advisory to all brokers so they
// stop serving data to clients for the topic being deleted
val correlationId = request.header.correlationId
val leaderAndIsrRequest = request.body.asInstanceOf[LeaderAndIsrRequest]
try {
def onLeadershipChange(updatedLeaders: Iterable[Partition], updatedFollowers: Iterable[Partition]) {
// for each new leader or follower, call coordinator to handle consumer group migration.
// this callback is invoked under the replica state change lock to ensure proper order of
// leadership changes
updatedLeaders.foreach { partition =>
if (partition.topic == GroupCoordinator.GroupMetadataTopicName)
coordinator.handleGroupImmigration(partition.partitionId)
}
updatedFollowers.foreach { partition =>
if (partition.topic == GroupCoordinator.GroupMetadataTopicName)
coordinator.handleGroupEmigration(partition.partitionId)
}
}
val responseHeader = new ResponseHeader(correlationId)
val leaderAndIsrResponse=
if (authorize(request.session, ClusterAction, Resource.ClusterResource)) {
val result = replicaManager.becomeLeaderOrFollower(correlationId, leaderAndIsrRequest, metadataCache, onLeadershipChange)
new LeaderAndIsrResponse(result.errorCode, result.responseMap.mapValues(new JShort(_)).asJava)
} else {
val result = leaderAndIsrRequest.partitionStates.asScala.keys.map((_, new JShort(Errors.CLUSTER_AUTHORIZATION_FAILED.code))).toMap
new LeaderAndIsrResponse(Errors.CLUSTER_AUTHORIZATION_FAILED.code, result.asJava)
}
requestChannel.sendResponse(new Response(request, new ResponseSend(request.connectionId, responseHeader, leaderAndIsrResponse)))
} catch {
case e: KafkaStorageException =>
fatal("Disk error during leadership change.", e)
Runtime.getRuntime.halt(1)
}
}
def handleStopReplicaRequest(request: RequestChannel.Request) {
// ensureTopicExists is only for client facing requests
// We can't have the ensureTopicExists check here since the controller sends it as an advisory to all brokers so they
// stop serving data to clients for the topic being deleted
val stopReplicaRequest = request.body.asInstanceOf[StopReplicaRequest]
val responseHeader = new ResponseHeader(request.header.correlationId)
val response =
if (authorize(request.session, ClusterAction, Resource.ClusterResource)) {
val (result, error) = replicaManager.stopReplicas(stopReplicaRequest)
new StopReplicaResponse(error, result.asInstanceOf[Map[TopicPartition, JShort]].asJava)
} else {
val result = stopReplicaRequest.partitions.asScala.map((_, new JShort(Errors.CLUSTER_AUTHORIZATION_FAILED.code))).toMap
new StopReplicaResponse(Errors.CLUSTER_AUTHORIZATION_FAILED.code, result.asJava)
}
requestChannel.sendResponse(new RequestChannel.Response(request, new ResponseSend(request.connectionId, responseHeader, response)))
replicaManager.replicaFetcherManager.shutdownIdleFetcherThreads()
}
def handleUpdateMetadataRequest(request: RequestChannel.Request) {
val updateMetadataRequest = request.requestObj.asInstanceOf[UpdateMetadataRequest]
authorizeClusterAction(request)
replicaManager.maybeUpdateMetadataCache(updateMetadataRequest, metadataCache)
val updateMetadataResponse = new UpdateMetadataResponse(updateMetadataRequest.correlationId)
requestChannel.sendResponse(new Response(request, new RequestOrResponseSend(request.connectionId, updateMetadataResponse)))
}
def handleControlledShutdownRequest(request: RequestChannel.Request) {
// ensureTopicExists is only for client facing requests
// We can't have the ensureTopicExists check here since the controller sends it as an advisory to all brokers so they
// stop serving data to clients for the topic being deleted
val controlledShutdownRequest = request.requestObj.asInstanceOf[ControlledShutdownRequest]
authorizeClusterAction(request)
val partitionsRemaining = controller.shutdownBroker(controlledShutdownRequest.brokerId)
val controlledShutdownResponse = new ControlledShutdownResponse(controlledShutdownRequest.correlationId,
Errors.NONE.code, partitionsRemaining)
requestChannel.sendResponse(new Response(request, new RequestOrResponseSend(request.connectionId, controlledShutdownResponse)))
}
/**
* Handle an offset commit request
*/
def handleOffsetCommitRequest(request: RequestChannel.Request) {
val offsetCommitRequest = request.requestObj.asInstanceOf[OffsetCommitRequest]
// reject the request immediately if not authorized to the group
if (!authorize(request.session, Read, new Resource(Group, offsetCommitRequest.groupId))) {
val errors = offsetCommitRequest.requestInfo.mapValues(_ => Errors.GROUP_AUTHORIZATION_FAILED.code)
val response = OffsetCommitResponse(errors, offsetCommitRequest.correlationId)
requestChannel.sendResponse(new Response(request, new RequestOrResponseSend(request.connectionId, response)))
return
}
// filter non-exist topics
val invalidRequestsInfo = offsetCommitRequest.requestInfo.filter { case (topicAndPartition, offsetMetadata) =>
!metadataCache.contains(topicAndPartition.topic)
}
val filteredRequestInfo = (offsetCommitRequest.requestInfo -- invalidRequestsInfo.keys)
val (authorizedRequestInfo, unauthorizedRequestInfo) = filteredRequestInfo.partition {
case (topicAndPartition, offsetMetadata) =>
authorize(request.session, Read, new Resource(Topic, topicAndPartition.topic))
}
// the callback for sending an offset commit response
def sendResponseCallback(commitStatus: immutable.Map[TopicAndPartition, Short]) {
val mergedCommitStatus = commitStatus ++ unauthorizedRequestInfo.mapValues(_ => Errors.TOPIC_AUTHORIZATION_FAILED.code)
mergedCommitStatus.foreach { case (topicAndPartition, errorCode) =>
if (errorCode != Errors.NONE.code) {
debug("Offset commit request with correlation id %d from client %s on partition %s failed due to %s"
.format(offsetCommitRequest.correlationId, offsetCommitRequest.clientId,
topicAndPartition, Errors.forCode(errorCode).exceptionName))
}
}
val combinedCommitStatus = mergedCommitStatus ++ invalidRequestsInfo.map(_._1 -> Errors.UNKNOWN_TOPIC_OR_PARTITION.code)
val response = OffsetCommitResponse(combinedCommitStatus, offsetCommitRequest.correlationId)
requestChannel.sendResponse(new RequestChannel.Response(request, new RequestOrResponseSend(request.connectionId, response)))
}
if (authorizedRequestInfo.isEmpty)
sendResponseCallback(Map.empty)
else if (offsetCommitRequest.versionId == 0) {
// for version 0 always store offsets to ZK
val responseInfo = authorizedRequestInfo.map {
case (topicAndPartition, metaAndError) => {
val topicDirs = new ZKGroupTopicDirs(offsetCommitRequest.groupId, topicAndPartition.topic)
try {
if (metadataCache.getTopicMetadata(Set(topicAndPartition.topic), request.securityProtocol).size <= 0) {
(topicAndPartition, Errors.UNKNOWN_TOPIC_OR_PARTITION.code)
} else if (metaAndError.metadata != null && metaAndError.metadata.length > config.offsetMetadataMaxSize) {
(topicAndPartition, Errors.OFFSET_METADATA_TOO_LARGE.code)
} else {
zkUtils.updatePersistentPath(topicDirs.consumerOffsetDir + "/" +
topicAndPartition.partition, metaAndError.offset.toString)
(topicAndPartition, Errors.NONE.code)
}
} catch {
case e: Throwable => (topicAndPartition, Errors.forException(e).code)
}
}
}
sendResponseCallback(responseInfo)
} else {
// for version 1 and beyond store offsets in offset manager
// compute the retention time based on the request version:
// if it is v1 or not specified by user, we can use the default retention
val offsetRetention =
if (offsetCommitRequest.versionId <= 1 ||
offsetCommitRequest.retentionMs == org.apache.kafka.common.requests.OffsetCommitRequest.DEFAULT_RETENTION_TIME) {
coordinator.offsetConfig.offsetsRetentionMs
} else {
offsetCommitRequest.retentionMs
}
// commit timestamp is always set to now.
// "default" expiration timestamp is now + retention (and retention may be overridden if v2)
// expire timestamp is computed differently for v1 and v2.
// - If v1 and no explicit commit timestamp is provided we use default expiration timestamp.
// - If v1 and explicit commit timestamp is provided we calculate retention from that explicit commit timestamp
// - If v2 we use the default expiration timestamp
val currentTimestamp = SystemTime.milliseconds
val defaultExpireTimestamp = offsetRetention + currentTimestamp
val offsetData = authorizedRequestInfo.mapValues(offsetAndMetadata =>
offsetAndMetadata.copy(
commitTimestamp = currentTimestamp,
expireTimestamp = {
if (offsetAndMetadata.commitTimestamp == org.apache.kafka.common.requests.OffsetCommitRequest.DEFAULT_TIMESTAMP)
defaultExpireTimestamp
else
offsetRetention + offsetAndMetadata.commitTimestamp
}
)
)
// call coordinator to handle commit offset
coordinator.handleCommitOffsets(
offsetCommitRequest.groupId,
offsetCommitRequest.memberId,
offsetCommitRequest.groupGenerationId,
offsetData,
sendResponseCallback)
}
}
private def authorize(session: Session, operation: Operation, resource: Resource): Boolean =
authorizer.map(_.authorize(session, operation, resource)).getOrElse(true)
/**
* Handle a produce request
*/
def handleProducerRequest(request: RequestChannel.Request) {
val produceRequest = request.requestObj.asInstanceOf[ProducerRequest]
val numBytesAppended = produceRequest.sizeInBytes
val (authorizedRequestInfo, unauthorizedRequestInfo) = produceRequest.data.partition {
case (topicAndPartition, _) => authorize(request.session, Write, new Resource(Topic, topicAndPartition.topic))
}
// the callback for sending a produce response
def sendResponseCallback(responseStatus: Map[TopicAndPartition, ProducerResponseStatus]) {
val mergedResponseStatus = responseStatus ++ unauthorizedRequestInfo.mapValues(_ => ProducerResponseStatus(Errors.TOPIC_AUTHORIZATION_FAILED.code, -1))
var errorInResponse = false
mergedResponseStatus.foreach { case (topicAndPartition, status) =>
if (status.error != Errors.NONE.code) {
errorInResponse = true
debug("Produce request with correlation id %d from client %s on partition %s failed due to %s".format(
produceRequest.correlationId,
produceRequest.clientId,
topicAndPartition,
Errors.forCode(status.error).exceptionName))
}
}
def produceResponseCallback(delayTimeMs: Int) {
if (produceRequest.requiredAcks == 0) {
// no operation needed if producer request.required.acks = 0; however, if there is any error in handling
// the request, since no response is expected by the producer, the server will close socket server so that
// the producer client will know that some error has happened and will refresh its metadata
if (errorInResponse) {
val exceptionsSummary = mergedResponseStatus.map { case (topicAndPartition, status) =>
topicAndPartition -> Errors.forCode(status.error).exceptionName
}.mkString(", ")
info(
s"Closing connection due to error during produce request with correlation id ${produceRequest.correlationId} " +
s"from client id ${produceRequest.clientId} with ack=0\n" +
s"Topic and partition to exceptions: $exceptionsSummary"
)
requestChannel.closeConnection(request.processor, request)
} else {
requestChannel.noOperation(request.processor, request)
}
} else {
val response = ProducerResponse(produceRequest.correlationId,
mergedResponseStatus,
produceRequest.versionId,
delayTimeMs)
requestChannel.sendResponse(new RequestChannel.Response(request,
new RequestOrResponseSend(request.connectionId,
response)))
}
}
// When this callback is triggered, the remote API call has completed
request.apiRemoteCompleteTimeMs = SystemTime.milliseconds
quotaManagers(ApiKeys.PRODUCE.id).recordAndMaybeThrottle(produceRequest.clientId,
numBytesAppended,
produceResponseCallback)
}
if (authorizedRequestInfo.isEmpty)
sendResponseCallback(Map.empty)
else {
val internalTopicsAllowed = produceRequest.clientId == AdminUtils.AdminClientId
// call the replica manager to append messages to the replicas
replicaManager.appendMessages(
produceRequest.ackTimeoutMs.toLong,
produceRequest.requiredAcks,
internalTopicsAllowed,
authorizedRequestInfo,
sendResponseCallback)
// if the request is put into the purgatory, it will have a held reference
// and hence cannot be garbage collected; hence we clear its data here in
// order to let GC re-claim its memory since it is already appended to log
produceRequest.emptyData()
}
}
/**
* Handle a fetch request
*/
def handleFetchRequest(request: RequestChannel.Request) {
val fetchRequest = request.requestObj.asInstanceOf[FetchRequest]
val (authorizedRequestInfo, unauthorizedRequestInfo) = fetchRequest.requestInfo.partition {
case (topicAndPartition, _) => authorize(request.session, Read, new Resource(Topic, topicAndPartition.topic))
}
val unauthorizedResponseStatus = unauthorizedRequestInfo.mapValues(_ => FetchResponsePartitionData(Errors.TOPIC_AUTHORIZATION_FAILED.code, -1, MessageSet.Empty))
// the callback for sending a fetch response
def sendResponseCallback(responsePartitionData: Map[TopicAndPartition, FetchResponsePartitionData]) {
val mergedResponseStatus = responsePartitionData ++ unauthorizedResponseStatus
mergedResponseStatus.foreach { case (topicAndPartition, data) =>
if (data.error != Errors.NONE.code) {
debug("Fetch request with correlation id %d from client %s on partition %s failed due to %s"
.format(fetchRequest.correlationId, fetchRequest.clientId,
topicAndPartition, Errors.forCode(data.error).exceptionName))
}
// record the bytes out metrics only when the response is being sent
BrokerTopicStats.getBrokerTopicStats(topicAndPartition.topic).bytesOutRate.mark(data.messages.sizeInBytes)
BrokerTopicStats.getBrokerAllTopicsStats().bytesOutRate.mark(data.messages.sizeInBytes)
}
def fetchResponseCallback(delayTimeMs: Int) {
val response = FetchResponse(fetchRequest.correlationId, mergedResponseStatus, fetchRequest.versionId, delayTimeMs)
requestChannel.sendResponse(new RequestChannel.Response(request, new FetchResponseSend(request.connectionId, response)))
}
// When this callback is triggered, the remote API call has completed
request.apiRemoteCompleteTimeMs = SystemTime.milliseconds
// Do not throttle replication traffic
if (fetchRequest.isFromFollower) {
fetchResponseCallback(0)
} else {
quotaManagers(ApiKeys.FETCH.id).recordAndMaybeThrottle(fetchRequest.clientId,
FetchResponse.responseSize(responsePartitionData
.groupBy(_._1.topic),
fetchRequest.versionId),
fetchResponseCallback)
}
}
if (authorizedRequestInfo.isEmpty)
sendResponseCallback(Map.empty)
else {
// call the replica manager to fetch messages from the local replica
replicaManager.fetchMessages(
fetchRequest.maxWait.toLong,
fetchRequest.replicaId,
fetchRequest.minBytes,
authorizedRequestInfo,
sendResponseCallback)
}
}
/**
* Handle an offset request
*/
def handleOffsetRequest(request: RequestChannel.Request) {
val correlationId = request.header.correlationId
val clientId = request.header.clientId
val offsetRequest = request.body.asInstanceOf[ListOffsetRequest]
val (authorizedRequestInfo, unauthorizedRequestInfo) = offsetRequest.offsetData.asScala.partition {
case (topicPartition, _) => authorize(request.session, Describe, new Resource(Topic, topicPartition.topic))
}
val unauthorizedResponseStatus = unauthorizedRequestInfo.mapValues(_ =>
new ListOffsetResponse.PartitionData(Errors.TOPIC_AUTHORIZATION_FAILED.code, List[JLong]().asJava)
)
val responseMap = authorizedRequestInfo.map(elem => {
val (topicPartition, partitionData) = elem
try {
// ensure leader exists
val localReplica = if (offsetRequest.replicaId != ListOffsetRequest.DEBUGGING_REPLICA_ID)
replicaManager.getLeaderReplicaIfLocal(topicPartition.topic, topicPartition.partition)
else
replicaManager.getReplicaOrException(topicPartition.topic, topicPartition.partition)
val offsets = {
val allOffsets = fetchOffsets(replicaManager.logManager,
topicPartition,
partitionData.timestamp,
partitionData.maxNumOffsets)
if (offsetRequest.replicaId != ListOffsetRequest.CONSUMER_REPLICA_ID) {
allOffsets
} else {
val hw = localReplica.highWatermark.messageOffset
if (allOffsets.exists(_ > hw))
hw +: allOffsets.dropWhile(_ > hw)
else
allOffsets
}
}
(topicPartition, new ListOffsetResponse.PartitionData(Errors.NONE.code, offsets.map(new JLong(_)).asJava))
} catch {
// NOTE: UnknownTopicOrPartitionException and NotLeaderForPartitionException are special cased since these error messages
// are typically transient and there is no value in logging the entire stack trace for the same
case utpe: UnknownTopicOrPartitionException =>
debug("Offset request with correlation id %d from client %s on partition %s failed due to %s".format(
correlationId, clientId, topicPartition, utpe.getMessage))
(topicPartition, new ListOffsetResponse.PartitionData(Errors.forException(utpe).code, List[JLong]().asJava))
case nle: NotLeaderForPartitionException =>
debug("Offset request with correlation id %d from client %s on partition %s failed due to %s".format(
correlationId, clientId, topicPartition,nle.getMessage))
(topicPartition, new ListOffsetResponse.PartitionData(Errors.forException(nle).code, List[JLong]().asJava))
case e: Throwable =>
error("Error while responding to offset request", e)
(topicPartition, new ListOffsetResponse.PartitionData(Errors.forException(e).code, List[JLong]().asJava))
}
})
val mergedResponseMap = responseMap ++ unauthorizedResponseStatus
val responseHeader = new ResponseHeader(correlationId)
val response = new ListOffsetResponse(mergedResponseMap.asJava)
requestChannel.sendResponse(new RequestChannel.Response(request, new ResponseSend(request.connectionId, responseHeader, response)))
}
def fetchOffsets(logManager: LogManager, topicPartition: TopicPartition, timestamp: Long, maxNumOffsets: Int): Seq[Long] = {
logManager.getLog(TopicAndPartition(topicPartition.topic, topicPartition.partition)) match {
case Some(log) =>
fetchOffsetsBefore(log, timestamp, maxNumOffsets)
case None =>
if (timestamp == ListOffsetRequest.LATEST_TIMESTAMP || timestamp == ListOffsetRequest.EARLIEST_TIMESTAMP)
Seq(0L)
else
Nil
}
}
private def fetchOffsetsBefore(log: Log, timestamp: Long, maxNumOffsets: Int): Seq[Long] = {
val segsArray = log.logSegments.toArray
var offsetTimeArray: Array[(Long, Long)] = null
if (segsArray.last.size > 0)
offsetTimeArray = new Array[(Long, Long)](segsArray.length + 1)
else
offsetTimeArray = new Array[(Long, Long)](segsArray.length)
for(i <- 0 until segsArray.length)
offsetTimeArray(i) = (segsArray(i).baseOffset, segsArray(i).lastModified)
if (segsArray.last.size > 0)
offsetTimeArray(segsArray.length) = (log.logEndOffset, SystemTime.milliseconds)
var startIndex = -1
timestamp match {
case ListOffsetRequest.LATEST_TIMESTAMP =>
startIndex = offsetTimeArray.length - 1
case ListOffsetRequest.EARLIEST_TIMESTAMP =>
startIndex = 0
case _ =>
var isFound = false
debug("Offset time array = " + offsetTimeArray.foreach(o => "%d, %d".format(o._1, o._2)))
startIndex = offsetTimeArray.length - 1
while (startIndex >= 0 && !isFound) {
if (offsetTimeArray(startIndex)._2 <= timestamp)
isFound = true
else
startIndex -=1
}
}
val retSize = maxNumOffsets.min(startIndex + 1)
val ret = new Array[Long](retSize)
for(j <- 0 until retSize) {
ret(j) = offsetTimeArray(startIndex)._1
startIndex -= 1
}
// ensure that the returned seq is in descending order of offsets
ret.toSeq.sortBy(- _)
}
private def getTopicMetadata(topics: Set[String], securityProtocol: SecurityProtocol): Seq[TopicMetadata] = {
val topicResponses = metadataCache.getTopicMetadata(topics, securityProtocol)
if (topics.size > 0 && topicResponses.size != topics.size) {
val nonExistentTopics = topics -- topicResponses.map(_.topic).toSet
val responsesForNonExistentTopics = nonExistentTopics.map { topic =>
if (topic == GroupCoordinator.GroupMetadataTopicName || config.autoCreateTopicsEnable) {
try {
if (topic == GroupCoordinator.GroupMetadataTopicName) {
val aliveBrokers = metadataCache.getAliveBrokers
val offsetsTopicReplicationFactor =
if (aliveBrokers.length > 0)
Math.min(config.offsetsTopicReplicationFactor.toInt, aliveBrokers.length)
else
config.offsetsTopicReplicationFactor.toInt
AdminUtils.createTopic(zkUtils, topic, config.offsetsTopicPartitions,
offsetsTopicReplicationFactor,
coordinator.offsetsTopicConfigs)
info("Auto creation of topic %s with %d partitions and replication factor %d is successful!"
.format(topic, config.offsetsTopicPartitions, offsetsTopicReplicationFactor))
}
else {
AdminUtils.createTopic(zkUtils, topic, config.numPartitions, config.defaultReplicationFactor)
info("Auto creation of topic %s with %d partitions and replication factor %d is successful!"
.format(topic, config.numPartitions, config.defaultReplicationFactor))
}
new TopicMetadata(topic, Seq.empty[PartitionMetadata], Errors.LEADER_NOT_AVAILABLE.code)
} catch {
case e: TopicExistsException => // let it go, possibly another broker created this topic
new TopicMetadata(topic, Seq.empty[PartitionMetadata], Errors.LEADER_NOT_AVAILABLE.code)
case itex: InvalidTopicException =>
new TopicMetadata(topic, Seq.empty[PartitionMetadata], Errors.INVALID_TOPIC_EXCEPTION.code)
}
} else {
new TopicMetadata(topic, Seq.empty[PartitionMetadata], Errors.UNKNOWN_TOPIC_OR_PARTITION.code)
}
}
topicResponses.appendAll(responsesForNonExistentTopics)
}
topicResponses
}
/**
* Handle a topic metadata request
*/
def handleTopicMetadataRequest(request: RequestChannel.Request) {
val metadataRequest = request.requestObj.asInstanceOf[TopicMetadataRequest]
//if topics is empty -> fetch all topics metadata but filter out the topic response that are not authorized
val topics = if (metadataRequest.topics.isEmpty) {
val topicResponses = metadataCache.getTopicMetadata(metadataRequest.topics.toSet, request.securityProtocol)
topicResponses.map(_.topic).filter(topic => authorize(request.session, Describe, new Resource(Topic, topic))).toSet
} else {
metadataRequest.topics.toSet
}
//when topics is empty this will be a duplicate authorization check but given this should just be a cache lookup, it should not matter.
var (authorizedTopics, unauthorizedTopics) = topics.partition(topic => authorize(request.session, Describe, new Resource(Topic, topic)))
if (!authorizedTopics.isEmpty) {
val topicResponses = metadataCache.getTopicMetadata(authorizedTopics, request.securityProtocol)
if (config.autoCreateTopicsEnable && topicResponses.size != authorizedTopics.size) {
val nonExistentTopics: Set[String] = topics -- topicResponses.map(_.topic).toSet
authorizer.foreach {
az => if (!az.authorize(request.session, Create, Resource.ClusterResource)) {
authorizedTopics --= nonExistentTopics
unauthorizedTopics ++= nonExistentTopics
}
}
}
}
val unauthorizedTopicMetaData = unauthorizedTopics.map(topic => new TopicMetadata(topic, Seq.empty[PartitionMetadata], Errors.TOPIC_AUTHORIZATION_FAILED.code))
val topicMetadata = if (authorizedTopics.isEmpty) Seq.empty[TopicMetadata] else getTopicMetadata(authorizedTopics, request.securityProtocol)
val brokers = metadataCache.getAliveBrokers
trace("Sending topic metadata %s and brokers %s for correlation id %d to client %s".format(topicMetadata.mkString(","), brokers.mkString(","), metadataRequest.correlationId, metadataRequest.clientId))
val response = new TopicMetadataResponse(brokers.map(_.getBrokerEndPoint(request.securityProtocol)), topicMetadata ++ unauthorizedTopicMetaData, metadataRequest.correlationId)
requestChannel.sendResponse(new RequestChannel.Response(request, new RequestOrResponseSend(request.connectionId, response)))
}
/*
* Handle an offset fetch request
*/
def handleOffsetFetchRequest(request: RequestChannel.Request) {
val offsetFetchRequest = request.requestObj.asInstanceOf[OffsetFetchRequest]
// reject the request immediately if not authorized to the group
if (!authorize(request.session, Read, new Resource(Group, offsetFetchRequest.groupId))) {
val authorizationError = OffsetMetadataAndError(OffsetMetadata.InvalidOffsetMetadata, Errors.GROUP_AUTHORIZATION_FAILED.code)
val response = OffsetFetchResponse(offsetFetchRequest.requestInfo.map{ _ -> authorizationError}.toMap)
requestChannel.sendResponse(new Response(request, new RequestOrResponseSend(request.connectionId, response)))
return
}
val (authorizedTopicPartitions, unauthorizedTopicPartitions) = offsetFetchRequest.requestInfo.partition { topicAndPartition =>
authorize(request.session, Describe, new Resource(Topic, topicAndPartition.topic))
}
val authorizationError = OffsetMetadataAndError(OffsetMetadata.InvalidOffsetMetadata, Errors.TOPIC_AUTHORIZATION_FAILED.code)
val unauthorizedStatus = unauthorizedTopicPartitions.map(topicAndPartition => (topicAndPartition, authorizationError)).toMap
val response = if (offsetFetchRequest.versionId == 0) {
// version 0 reads offsets from ZK
val responseInfo = authorizedTopicPartitions.map( topicAndPartition => {
val topicDirs = new ZKGroupTopicDirs(offsetFetchRequest.groupId, topicAndPartition.topic)
try {
if (metadataCache.getTopicMetadata(Set(topicAndPartition.topic), request.securityProtocol).size <= 0) {
(topicAndPartition, OffsetMetadataAndError.UnknownTopicOrPartition)
} else {
val payloadOpt = zkUtils.readDataMaybeNull(topicDirs.consumerOffsetDir + "/" + topicAndPartition.partition)._1
payloadOpt match {
case Some(payload) => (topicAndPartition, OffsetMetadataAndError(payload.toLong))
case None => (topicAndPartition, OffsetMetadataAndError.UnknownTopicOrPartition)
}
}
} catch {
case e: Throwable =>
(topicAndPartition, OffsetMetadataAndError(OffsetMetadata.InvalidOffsetMetadata,
Errors.forException(e).code))
}
})
OffsetFetchResponse(collection.immutable.Map(responseInfo: _*) ++ unauthorizedStatus, offsetFetchRequest.correlationId)
} else {
// version 1 reads offsets from Kafka;
val offsets = coordinator.handleFetchOffsets(offsetFetchRequest.groupId, authorizedTopicPartitions).toMap
// Note that we do not need to filter the partitions in the
// metadata cache as the topic partitions will be filtered
// in coordinator's offset manager through the offset cache
OffsetFetchResponse(offsets ++ unauthorizedStatus, offsetFetchRequest.correlationId)
}
trace("Sending offset fetch response %s for correlation id %d to client %s."
.format(response, offsetFetchRequest.correlationId, offsetFetchRequest.clientId))
requestChannel.sendResponse(new RequestChannel.Response(request, new RequestOrResponseSend(request.connectionId, response)))
}
def handleGroupCoordinatorRequest(request: RequestChannel.Request) {
val groupCoordinatorRequest = request.body.asInstanceOf[GroupCoordinatorRequest]
val responseHeader = new ResponseHeader(request.header.correlationId)
if (!authorize(request.session, Describe, new Resource(Group, groupCoordinatorRequest.groupId))) {
val responseBody = new GroupCoordinatorResponse(Errors.GROUP_AUTHORIZATION_FAILED.code, Node.noNode)
requestChannel.sendResponse(new RequestChannel.Response(request, new ResponseSend(request.connectionId, responseHeader, responseBody)))
} else {
val partition = coordinator.partitionFor(groupCoordinatorRequest.groupId)
// get metadata (and create the topic if necessary)
val offsetsTopicMetadata = getTopicMetadata(Set(GroupCoordinator.GroupMetadataTopicName), request.securityProtocol).head
val coordinatorEndpoint = offsetsTopicMetadata.partitionsMetadata.find(_.partitionId == partition).flatMap {
partitionMetadata => partitionMetadata.leader
}
val responseBody = coordinatorEndpoint match {
case None =>
new GroupCoordinatorResponse(Errors.GROUP_COORDINATOR_NOT_AVAILABLE.code, Node.noNode())
case Some(endpoint) =>
new GroupCoordinatorResponse(Errors.NONE.code, new Node(endpoint.id, endpoint.host, endpoint.port))
}
trace("Sending consumer metadata %s for correlation id %d to client %s."
.format(responseBody, request.header.correlationId, request.header.clientId))
requestChannel.sendResponse(new RequestChannel.Response(request, new ResponseSend(request.connectionId, responseHeader, responseBody)))
}
}
def handleDescribeGroupRequest(request: RequestChannel.Request) {
import JavaConverters._
val describeRequest = request.body.asInstanceOf[DescribeGroupsRequest]
val responseHeader = new ResponseHeader(request.header.correlationId)
val groups = describeRequest.groupIds().asScala.map {
case groupId =>
if (!authorize(request.session, Describe, new Resource(Group, groupId))) {
groupId -> DescribeGroupsResponse.GroupMetadata.forError(Errors.GROUP_AUTHORIZATION_FAILED)
} else {
val (error, summary) = coordinator.handleDescribeGroup(groupId)
val members = summary.members.map { member =>
val metadata = ByteBuffer.wrap(member.metadata)
val assignment = ByteBuffer.wrap(member.assignment)
new DescribeGroupsResponse.GroupMember(member.memberId, member.clientId, member.clientHost, metadata, assignment)
}
groupId -> new DescribeGroupsResponse.GroupMetadata(error.code, summary.state, summary.protocolType,
summary.protocol, members.asJava)
}
}.toMap
val responseBody = new DescribeGroupsResponse(groups.asJava)
requestChannel.sendResponse(new RequestChannel.Response(request, new ResponseSend(request.connectionId, responseHeader, responseBody)))
}
def handleListGroupsRequest(request: RequestChannel.Request) {
import JavaConverters._
val responseHeader = new ResponseHeader(request.header.correlationId)
val responseBody = if (!authorize(request.session, Describe, Resource.ClusterResource)) {
ListGroupsResponse.fromError(Errors.CLUSTER_AUTHORIZATION_FAILED)
} else {
val (error, groups) = coordinator.handleListGroups()
val allGroups = groups.map{ group => new ListGroupsResponse.Group(group.groupId, group.protocolType) }
new ListGroupsResponse(error.code, allGroups.asJava)
}
requestChannel.sendResponse(new RequestChannel.Response(request, new ResponseSend(request.connectionId, responseHeader, responseBody)))
}
def handleJoinGroupRequest(request: RequestChannel.Request) {
import JavaConversions._
val joinGroupRequest = request.body.asInstanceOf[JoinGroupRequest]
val responseHeader = new ResponseHeader(request.header.correlationId)
// the callback for sending a join-group response
def sendResponseCallback(joinResult: JoinGroupResult) {
val members = joinResult.members map { case (memberId, metadataArray) => (memberId, ByteBuffer.wrap(metadataArray)) }
val responseBody = new JoinGroupResponse(joinResult.errorCode, joinResult.generationId, joinResult.subProtocol,
joinResult.memberId, joinResult.leaderId, members)
trace("Sending join group response %s for correlation id %d to client %s."
.format(responseBody, request.header.correlationId, request.header.clientId))
requestChannel.sendResponse(new RequestChannel.Response(request, new ResponseSend(request.connectionId, responseHeader, responseBody)))
}
if (!authorize(request.session, Read, new Resource(Group, joinGroupRequest.groupId()))) {
val responseBody = new JoinGroupResponse(
Errors.GROUP_AUTHORIZATION_FAILED.code,
JoinGroupResponse.UNKNOWN_GENERATION_ID,
JoinGroupResponse.UNKNOWN_PROTOCOL,
JoinGroupResponse.UNKNOWN_MEMBER_ID, // memberId
JoinGroupResponse.UNKNOWN_MEMBER_ID, // leaderId
Map.empty[String, ByteBuffer])
requestChannel.sendResponse(new RequestChannel.Response(request, new ResponseSend(request.connectionId, responseHeader, responseBody)))
} else {
// let the coordinator to handle join-group
val protocols = joinGroupRequest.groupProtocols().map(protocol =>
(protocol.name, Utils.toArray(protocol.metadata))).toList
coordinator.handleJoinGroup(
joinGroupRequest.groupId,
joinGroupRequest.memberId,
request.header.clientId,
request.session.clientAddress.toString,
joinGroupRequest.sessionTimeout,
joinGroupRequest.protocolType,
protocols,
sendResponseCallback)
}
}
def handleSyncGroupRequest(request: RequestChannel.Request) {
import JavaConversions._
val syncGroupRequest = request.body.asInstanceOf[SyncGroupRequest]
def sendResponseCallback(memberState: Array[Byte], errorCode: Short) {
val responseBody = new SyncGroupResponse(errorCode, ByteBuffer.wrap(memberState))
val responseHeader = new ResponseHeader(request.header.correlationId)
requestChannel.sendResponse(new Response(request, new ResponseSend(request.connectionId, responseHeader, responseBody)))
}
if (!authorize(request.session, Read, new Resource(Group, syncGroupRequest.groupId()))) {
sendResponseCallback(Array[Byte](), Errors.GROUP_AUTHORIZATION_FAILED.code)
} else {
coordinator.handleSyncGroup(
syncGroupRequest.groupId(),
syncGroupRequest.generationId(),
syncGroupRequest.memberId(),
syncGroupRequest.groupAssignment().mapValues(Utils.toArray(_)),
sendResponseCallback
)
}
}
def handleHeartbeatRequest(request: RequestChannel.Request) {
val heartbeatRequest = request.body.asInstanceOf[HeartbeatRequest]
val respHeader = new ResponseHeader(request.header.correlationId)
// the callback for sending a heartbeat response
def sendResponseCallback(errorCode: Short) {
val response = new HeartbeatResponse(errorCode)
trace("Sending heartbeat response %s for correlation id %d to client %s."
.format(response, request.header.correlationId, request.header.clientId))
requestChannel.sendResponse(new RequestChannel.Response(request, new ResponseSend(request.connectionId, respHeader, response)))
}
if (!authorize(request.session, Read, new Resource(Group, heartbeatRequest.groupId))) {
val heartbeatResponse = new HeartbeatResponse(Errors.GROUP_AUTHORIZATION_FAILED.code)
requestChannel.sendResponse(new Response(request, new ResponseSend(request.connectionId, respHeader, heartbeatResponse)))
}
else {
// let the coordinator to handle heartbeat
coordinator.handleHeartbeat(
heartbeatRequest.groupId(),
heartbeatRequest.memberId(),
heartbeatRequest.groupGenerationId(),
sendResponseCallback)
}
}
/*
* Returns a Map of all quota managers configured. The request Api key is the key for the Map
*/
private def instantiateQuotaManagers(cfg: KafkaConfig): Map[Short, ClientQuotaManager] = {
val producerQuotaManagerCfg = ClientQuotaManagerConfig(
quotaBytesPerSecondDefault = cfg.producerQuotaBytesPerSecondDefault,
numQuotaSamples = cfg.numQuotaSamples,
quotaWindowSizeSeconds = cfg.quotaWindowSizeSeconds
)
val consumerQuotaManagerCfg = ClientQuotaManagerConfig(
quotaBytesPerSecondDefault = cfg.consumerQuotaBytesPerSecondDefault,
numQuotaSamples = cfg.numQuotaSamples,
quotaWindowSizeSeconds = cfg.quotaWindowSizeSeconds
)
val quotaManagers = Map[Short, ClientQuotaManager](
ApiKeys.PRODUCE.id ->
new ClientQuotaManager(producerQuotaManagerCfg, metrics, ApiKeys.PRODUCE.name, new org.apache.kafka.common.utils.SystemTime),
ApiKeys.FETCH.id ->
new ClientQuotaManager(consumerQuotaManagerCfg, metrics, ApiKeys.FETCH.name, new org.apache.kafka.common.utils.SystemTime)
)
quotaManagers
}
def handleLeaveGroupRequest(request: RequestChannel.Request) {
val leaveGroupRequest = request.body.asInstanceOf[LeaveGroupRequest]
val respHeader = new ResponseHeader(request.header.correlationId)
// the callback for sending a leave-group response
def sendResponseCallback(errorCode: Short) {
val response = new LeaveGroupResponse(errorCode)
trace("Sending leave group response %s for correlation id %d to client %s."
.format(response, request.header.correlationId, request.header.clientId))
requestChannel.sendResponse(new RequestChannel.Response(request, new ResponseSend(request.connectionId, respHeader, response)))
}
if (!authorize(request.session, Read, new Resource(Group, leaveGroupRequest.groupId))) {
val leaveGroupResponse = new LeaveGroupResponse(Errors.GROUP_AUTHORIZATION_FAILED.code)
requestChannel.sendResponse(new Response(request, new ResponseSend(request.connectionId, respHeader, leaveGroupResponse)))
} else {
// let the coordinator to handle leave-group
coordinator.handleLeaveGroup(
leaveGroupRequest.groupId(),
leaveGroupRequest.memberId(),
sendResponseCallback)
}
}
def close() {
quotaManagers.foreach { case (apiKey, quotaManager) =>
quotaManager.shutdown()
}
info("Shutdown complete.")
}
def authorizeClusterAction(request: RequestChannel.Request): Unit = {
if (!authorize(request.session, ClusterAction, Resource.ClusterResource))
throw new ClusterAuthorizationException(s"Request $request is not authorized.")
}
}
|
Mszak/kafka
|
core/src/main/scala/kafka/server/KafkaApis.scala
|
Scala
|
apache-2.0
| 47,260
|
/*
* Copyright 2011-2018 GatlingCorp (http://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.http.request.builder.ws
import io.gatling.core.CoreComponents
import io.gatling.core.session.Session
import io.gatling.http.protocol.HttpComponents
import io.gatling.http.request.builder.{ CommonAttributes, RequestExpressionBuilder }
class WsRequestExpressionBuilder(commonAttributes: CommonAttributes, coreComponents: CoreComponents, httpComponents: HttpComponents)
extends RequestExpressionBuilder(commonAttributes, coreComponents, httpComponents) {
override protected def baseUrl: Session => Option[String] = httpCaches.wsBaseUrl
}
|
wiacekm/gatling
|
gatling-http/src/main/scala/io/gatling/http/request/builder/ws/WsRequestExpressionBuilder.scala
|
Scala
|
apache-2.0
| 1,179
|
package io.github.shogowada.scala.jsonrpc.server
import io.github.shogowada.scala.jsonrpc.Models._
import io.github.shogowada.scala.jsonrpc.serializers.UpickleJSONSerializer
import io.github.shogowada.scala.jsonrpc.{BaseSpec, Constants, api}
import org.scalatest.Assertion
import scala.collection.mutable.ListBuffer
import scala.concurrent.{ExecutionContext, Future}
trait FakeAPI {
def foo(bar: String, baz: Int): Future[String]
@api.JSONRPCMethod(name = "bar")
def bar: Future[String]
def notify(message: String): Unit
}
class FakeAPIImpl extends FakeAPI {
import scala.concurrent.ExecutionContext.Implicits.global
val notifiedMessages = ListBuffer.empty[String]
override def foo(bar: String, baz: Int): Future[String] = {
Future(s"$bar$baz")
}
override def bar = Future("bar")
override def notify(message: String): Unit = {
notifiedMessages += message
}
}
class JSONRPCServerTest extends BaseSpec {
override def newInstance = new JSONRPCServerTest
override implicit def executionContext = ExecutionContext.Implicits.global
"given I have an API bound" - {
val api = new FakeAPIImpl
val jsonSerializer = UpickleJSONSerializer()
val target = JSONRPCServer(jsonSerializer)
target.bindAPI[FakeAPI](api)
def responseShouldEqual[T]
(
futureMaybeJSON: Future[Option[String]],
deserializer: (String) => Option[T],
expected: T
): Future[Assertion] = {
futureMaybeJSON
.map((maybeJSON: Option[String]) => {
maybeJSON.flatMap(json => deserializer(json))
})
.map((maybeActual: Option[T]) => maybeActual should equal(Some(expected)))
}
def responseShouldEqualError
(
futureMaybeJSON: Future[Option[String]],
expected: JSONRPCErrorResponse[String]
): Future[Assertion] = {
responseShouldEqual(
futureMaybeJSON,
(json) => jsonSerializer.deserialize[JSONRPCErrorResponse[String]](json),
expected
)
}
"when I received request for API method" - {
val requestId = Left("request ID")
val request: JSONRPCRequest[(String, Int)] = JSONRPCRequest(
jsonrpc = Constants.JSONRPC,
id = requestId,
method = classOf[FakeAPI].getName + ".foo",
params = ("bar", 1)
)
val requestJSON: String = jsonSerializer.serialize(request).get
val futureMaybeResponseJSON: Future[Option[String]] = target.receive(requestJSON)
"then it should return the response" in {
responseShouldEqual(
futureMaybeResponseJSON,
(json) => jsonSerializer.deserialize[JSONRPCResultResponse[String]](json),
JSONRPCResultResponse(
jsonrpc = Constants.JSONRPC,
id = requestId,
result = "bar1"
)
)
}
}
"when I received request for user-named API method" - {
val id = Left("id")
val request = JSONRPCRequest[Unit](
jsonrpc = Constants.JSONRPC,
id = id,
method = "bar",
params = ()
)
val requestJSON = jsonSerializer.serialize(request).get
val futureMaybeResponseJSON = target.receive(requestJSON)
"then it should return the response" in {
responseShouldEqual(
futureMaybeResponseJSON,
(json) => jsonSerializer.deserialize[JSONRPCResultResponse[String]](json),
JSONRPCResultResponse(
jsonrpc = Constants.JSONRPC,
id = id,
result = "bar"
)
)
}
}
"when I received notification method" - {
val message = "Hello, World!"
val notification = JSONRPCNotification[Tuple1[String]](
jsonrpc = Constants.JSONRPC,
method = classOf[FakeAPI].getName + ".notify",
params = Tuple1(message)
)
val notificationJSON = jsonSerializer.serialize(notification).get
val futureMaybeResponseJSON = target.receive(notificationJSON)
"then it should notify the server" in {
futureMaybeResponseJSON
.map(maybeResponse => api.notifiedMessages should equal(List(message)))
}
"then it should not return the response" in {
futureMaybeResponseJSON
.map(maybeResponse => maybeResponse should equal(None))
}
}
"when I receive request with unknown method" - {
val id = Left("id")
val request = JSONRPCRequest[(String, String)](
jsonrpc = Constants.JSONRPC,
id = id,
method = "unknown",
params = ("foo", "bar")
)
val requestJSON = jsonSerializer.serialize(request).get
val futureMaybeResponseJSON = target.receive(requestJSON)
"then it should respond method not found error" in {
responseShouldEqualError(
futureMaybeResponseJSON,
JSONRPCErrorResponse(
jsonrpc = Constants.JSONRPC,
id = id,
error = JSONRPCErrors.methodNotFound
)
)
}
}
"when I receive JSON without method name" - {
val id = Left("id")
val requestJSON = """{"jsonrpc":"2.0","id":"id"}"""
val futureMaybeResponseJSON = target.receive(requestJSON)
"then it should respond JSON parse error" in {
responseShouldEqualError(
futureMaybeResponseJSON,
JSONRPCErrorResponse(
jsonrpc = Constants.JSONRPC,
id = id,
error = JSONRPCErrors.parseError
)
)
}
}
"when I receive request with mismatching JSON-RPC version" - {
val id = Left("id")
val requestJSON = jsonSerializer.serialize(
JSONRPCRequest(
jsonrpc = "1.0",
id = id,
method = "foo",
params = ("bar", "baz")
)
).get
val futureMaybeResponseJSON = target.receive(requestJSON)
"then it should respond invalid request" in {
responseShouldEqualError(
futureMaybeResponseJSON,
JSONRPCErrorResponse(
jsonrpc = Constants.JSONRPC,
id = id,
error = JSONRPCErrors.invalidRequest
)
)
}
}
"when I receive request with invalid params" - {
val id = Left("id")
val request: JSONRPCRequest[Tuple1[String]] = JSONRPCRequest(
jsonrpc = Constants.JSONRPC,
id = id,
method = classOf[FakeAPI].getName + ".foo",
params = Tuple1("bar")
)
val requestJSON = jsonSerializer.serialize(request).get
val futureMaybeResponseJSON = target.receive(requestJSON)
"then it should respond invalid params" in {
responseShouldEqualError(
futureMaybeResponseJSON,
JSONRPCErrorResponse(
jsonrpc = Constants.JSONRPC,
id = id,
error = JSONRPCErrors.invalidParams
)
)
}
}
}
}
|
shogowada/scala-json-rpc
|
shared/src/test/scala/io/github/shogowada/scala/jsonrpc/server/JSONRPCServerTest.scala
|
Scala
|
mit
| 6,892
|
/*
* Copyright 2013 James Shade
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.shade.time
trait Clock {
def now: Instant
}
object SystemClock extends Clock {
override def now = Instant(System.currentTimeMillis)
}
|
jamesshade/time
|
src/main/scala/org/shade/time/Clock.scala
|
Scala
|
apache-2.0
| 757
|
package scala.c.engine
import org.eclipse.cdt.core.dom.ast.{IASTCaseStatement, IASTDeclarationStatement, IASTEqualsInitializer, _}
import scala.collection.mutable.ListBuffer
import java.nio.ByteBuffer
import java.nio.ByteOrder
import org.eclipse.cdt.core.dom.ast.IASTBinaryExpression.op_assign
import org.eclipse.cdt.internal.core.dom.parser.c._
import scala.c.engine.ast.{Declarator, Expressions}
object Interpreter {
implicit class CounterSC(val sc: StringContext) extends AnyVal {
// Define functions that we want to use with string interpolation syntax
def c(args: Any*)(implicit state: State): Unit = {
Gcc.runCode(sc.parts.iterator.next, state, Iterator())
}
def func(args: Any*)(implicit state: State): Unit = {
Gcc.runGlobalCode(sc.parts.iterator.next, state, List())
}
}
}
class Memory(size: Int) {
import org.eclipse.cdt.core.dom.ast.IBasicType.Kind._
var insertIndex = 0
// turing tape
val tape = ByteBuffer.allocate(size)
tape.order(ByteOrder.LITTLE_ENDIAN)
def clearMemory(startingAddress: Int, numBytes: Int) = {
var address = startingAddress
for (i <- 0 until numBytes) {
tape.put(address, 0.toByte)
address += 1
}
}
// use Address type to prevent messing up argument order
def writeToMemory(newVal: AnyVal, address: Int, theType: IType, bitOffset: Int = 0, sizeInBits: Int = 0): Unit = {
TypeHelper.stripSyntheticTypeInfo(theType) match {
case basic: IBasicType if basic.getKind == eInt && basic.isShort =>
newVal match {
case int: Int => tape.putShort(address, int.asInstanceOf[Short])
case short: Short => tape.putShort(address, short)
}
case basic: IBasicType if basic.getKind == eInt && basic.isLongLong =>
newVal match {
case long: Long => tape.putLong(address, long)
}
case basic: IBasicType if basic.getKind == eInt && basic.isLong =>
newVal match {
case int: Int => tape.putInt(address, int)
case long: Long => tape.putInt(address, long.toInt)
}
case basic: IBasicType if basic.getKind == eInt || basic.getKind == eVoid =>
newVal match {
case int: Int =>
val x = if (bitOffset != 0) {
val currentVal = tape.getInt(address)
val right = currentVal << (32 - bitOffset) >>> (32 - bitOffset)
val left = currentVal >>> (sizeInBits + bitOffset) << (sizeInBits + bitOffset)
val newVal = int << bitOffset
left + newVal + right
} else {
int
}
tape.putInt(address, x)
case long: Long => tape.putInt(address, long.toInt)
}
case basic: IBasicType if basic.getKind == eDouble =>
newVal match {
case double: Double => tape.putDouble(address, double)
}
case basic: IBasicType if basic.getKind == eFloat =>
newVal match {
case float: Float => tape.putFloat(address, float)
}
case basic: IBasicType if basic.getKind == eChar =>
newVal match {
case char: char => tape.put(address, char)
case int: Int => tape.put(address, int.toByte)
}
case basic: IBasicType if basic.getKind == eBoolean =>
tape.putInt(address, newVal.asInstanceOf[Int])
case _: IFunctionType =>
writePointerToMemory(newVal, address)
case _: CStructure =>
writePointerToMemory(newVal, address)
case _: IPointerType =>
writePointerToMemory(newVal, address)
case _: IArrayType =>
writePointerToMemory(newVal, address)
}
}
private def writePointerToMemory(newVal: AnyVal, address: Int) = {
newVal match {
case int: Int => tape.putInt(address, int)
case long: Long => tape.putInt(address, long.toInt)
}
}
def readFromMemory(address: Int, theType: IType, bitOffset: Int = 0, sizeInBits: Int = 0): RValue = {
val result: AnyVal = theType match {
case basic: IBasicType =>
if (basic.getKind == eInt && basic.isShort) {
var result = tape.getShort(address)
if (sizeInBits != 0) {
result = (result << (16 - sizeInBits - bitOffset) >>> (16 - sizeInBits)).toShort
}
result
} else if (basic.getKind == eInt && basic.isLongLong) {
var result = tape.getLong(address)
if (sizeInBits != 0) {
result = result << (64 - sizeInBits - bitOffset) >>> (64 - sizeInBits)
}
result
} else if (basic.getKind == eInt && basic.isLong) {
var result = tape.getInt(address)
if (sizeInBits != 0) {
result = result << (32 - sizeInBits - bitOffset) >>> (32 - sizeInBits)
}
result
} else if (basic.getKind == eInt) {
var result = tape.getInt(address)
if (sizeInBits != 0) {
result = result << (32 - sizeInBits - bitOffset) >>> (32 - sizeInBits)
}
result
} else if (basic.getKind == eBoolean) {
var result = tape.getInt(address)
if (sizeInBits != 0) {
result = result << (32 - sizeInBits - bitOffset) >> (32 - sizeInBits)
}
result
} else if (basic.getKind == eDouble) {
tape.getDouble(address)
} else if (basic.getKind == eFloat) {
tape.getFloat(address)
} else if (basic.getKind == eChar) {
tape.get(address) // a C 'char' is a Java 'byte'
}
case ptr: IPointerType => tape.getInt(address)
case fcn: IFunctionType => tape.getInt(address)
case struct: CStructure => tape.getInt(address)
case typedef: CTypedef => readFromMemory(address, typedef.getType).value
}
TypeHelper.castSign(theType, result)
}
}
case class ReturnFromFunction() extends Exception("returning")
case class CachedRValue(expr2: IASTExpression) {
var cachedValue: RValue = null
}
case class JmpIfNotEqual(expr: IASTExpression, relativeJump: Int)
case class JmpToLabelIfNotZero(expr: IASTExpression, label: Label)
case class JmpToLabelIfZero(expr: IASTExpression, label: Label)
case class JmpToLabelIfEqual(expr1: IASTExpression, expr2: CachedRValue, label: Label)
case class Jmp(relativeJump: Int)
case class JmpLabel(label: Label)
case class JmpName(label: String) {
var destAddress = 0
}
abstract class Label {
var address = 0
}
case class PushVariableStack()
case class PopVariableStack()
case class GotoLabel(name: String) extends Label
case class BreakLabel() extends Label
case class ContinueLabel() extends Label
case class GenericLabel() extends Label
case class CaseLabel(caseStatement: IASTCaseStatement) extends Label
case class DefaultLabel(default: IASTDefaultStatement) extends Label
object State {
def flattenNode(tUnit: IASTNode)(implicit state: State): List[Any] = {
def recurse(node: IASTNode): List[Any] = {
node match {
case null => List()
case ifStatement: IASTIfStatement =>
val contents = PushVariableStack() +: recurse(ifStatement.getThenClause) :+ PopVariableStack()
val elseContents = PushVariableStack() +: List(Option(ifStatement.getElseClause)).flatten.flatMap(recurse) :+ PopVariableStack()
val jmp = if (ifStatement.getElseClause != null) {
List(Jmp(elseContents.size))
} else {
List()
}
// add +1 for the jmp statement
JmpIfNotEqual(ifStatement.getConditionExpression, (contents ++ jmp).size) +: ((contents ++ jmp) ++ elseContents)
case forStatement: IASTForStatement =>
val breakLabel = BreakLabel()
state.breakLabelStack = breakLabel +: state.breakLabelStack
val continueLabel = ContinueLabel()
state.continueLabelStack = continueLabel +: state.continueLabelStack
val init = List(forStatement.getInitializerStatement)
val contents = recurse(forStatement.getBody)
val iter = forStatement.getIterationExpression
val beginLabel = new GotoLabel("")
state.breakLabelStack = state.breakLabelStack.tail
state.continueLabelStack = state.continueLabelStack.tail
val execution = contents ++ List(continueLabel, iter)
val result = (if (forStatement.getConditionExpression != null) {
init ++ (beginLabel +: JmpToLabelIfNotZero(forStatement.getConditionExpression, breakLabel) +: execution :+ JmpLabel(beginLabel)) :+ breakLabel
} else {
init ++ (beginLabel +: execution :+ JmpLabel(beginLabel)) :+ breakLabel
})
PushVariableStack() +: result :+ PopVariableStack()
case whileStatement: IASTWhileStatement =>
val breakLabel = BreakLabel()
state.breakLabelStack = breakLabel +: state.breakLabelStack
val continueLabel = ContinueLabel()
state.continueLabelStack = continueLabel +: state.continueLabelStack
val contents = recurse(whileStatement.getBody)
val begin = new GotoLabel("")
val end = new GotoLabel("")
state.breakLabelStack = state.breakLabelStack.tail
state.continueLabelStack = state.continueLabelStack.tail
val result = List(JmpLabel(end), begin) ++ contents ++ List(end, continueLabel, JmpToLabelIfZero(whileStatement.getCondition, begin)) :+ breakLabel
PushVariableStack() +: result :+ PopVariableStack()
case doWhileStatement: IASTDoStatement =>
val breakLabel = BreakLabel()
state.breakLabelStack = breakLabel +: state.breakLabelStack
val continueLabel = ContinueLabel()
state.continueLabelStack = continueLabel +: state.continueLabelStack
val contents = recurse(doWhileStatement.getBody)
val begin = new GenericLabel()
state.breakLabelStack = state.breakLabelStack.tail
state.continueLabelStack = state.continueLabelStack.tail
val result = List(begin) ++ contents ++ List(continueLabel, JmpToLabelIfZero(doWhileStatement.getCondition, begin)) :+ breakLabel
PushVariableStack() +: result :+ PopVariableStack()
case switch: IASTSwitchStatement =>
val breakLabel = BreakLabel()
state.breakLabelStack = breakLabel +: state.breakLabelStack
val descendants = recurse(switch.getBody)
def getParentSwitchBody(node: IASTNode): IASTStatement = {
if (node.getParent.isInstanceOf[IASTSwitchStatement]) {
node.getParent.asInstanceOf[IASTSwitchStatement].getBody
} else {
getParentSwitchBody(node.getParent)
}
}
val cached = CachedRValue(switch.getControllerExpression)
val jumpTable = descendants.flatMap{
case x @ CaseLabel(caseStatement) if (switch.getBody == getParentSwitchBody(caseStatement)) =>
List(JmpToLabelIfEqual(caseStatement.getExpression, cached, x))
case x @ DefaultLabel(default) if (switch.getBody == getParentSwitchBody(default)) =>
List(JmpLabel(x))
case _ =>
List()
} :+ JmpLabel(breakLabel)
state.breakLabelStack = state.breakLabelStack.tail
val result = cached +: (jumpTable ++ descendants :+ breakLabel)
PushVariableStack() +: result :+ PopVariableStack()
case x: IASTCaseStatement =>
List(CaseLabel(x))
case x: IASTDefaultStatement =>
List(DefaultLabel(x))
case _: IASTContinueStatement =>
List(JmpLabel(state.continueLabelStack.head))
case _: IASTBreakStatement =>
List(JmpLabel(state.breakLabelStack.head))
case _: IASTElaboratedTypeSpecifier =>
List()
case goto: IASTGotoStatement =>
List(JmpName(goto.getName.toString))
case fcn: IASTFunctionDefinition =>
List(fcn)
case compound: IASTCompoundStatement =>
val isTypicalCompound = compound.getParent() match {
case x: IASTSwitchStatement => true
case x: CASTFunctionDefinition => true
case x: CASTForStatement => true
case x: CASTIfStatement => true
case x: CASTDoStatement => true
case x: CASTWhileStatement => true
case _ => false
}
if (isTypicalCompound) {
compound.getStatements.flatMap(recurse).toList
} else {
PushVariableStack() +: compound.getStatements.flatMap(recurse).toList :+ PopVariableStack()
}
case decl: IASTDeclarationStatement =>
decl.getChildren.toList.flatMap(recurse)
case decl: CASTSimpleDeclaration =>
List(decl)
case _: IASTSimpleDeclSpecifier =>
List()
case _: CASTTypedefNameSpecifier =>
List()
case decl: IASTDeclarator =>
List(decl)
case label: IASTLabelStatement =>
GotoLabel(label.getName.toString) +: recurse(label.getNestedStatement)
case exprState: CASTExpressionStatement =>
List(exprState.getExpression)
case _ =>
//println("SPLITTING: " + node.getClass.getSimpleName + " : " + node.getRawSignature)
node +: node.getChildren.toList
}
}
tUnit.getChildren.flatMap{recurse}.toList
}
}
abstract sealed trait NumBits
case object ThirtyTwoBits extends NumBits
case object SixtyFourBits extends NumBits
class State(pointerSize: NumBits) {
object Stack extends Memory(500000)
private var heapInsertIndex = 250000
var functionContexts = List[FunctionScope]()
def context: FunctionScope = functionContexts.head
val functionList = new ListBuffer[Function]()
val functionPointers = scala.collection.mutable.LinkedHashMap[String, Variable]()
val stdout = new ListBuffer[Char]()
private var functionCount = 0
private var breakLabelStack = List[Label]()
private var continueLabelStack = List[Label]()
val declarations = new ListBuffer[CStructure]()
val pointerType = pointerSize match {
case ThirtyTwoBits => TypeHelper.intType
case SixtyFourBits => new CBasicType(IBasicType.Kind.eInt, IBasicType.IS_LONG_LONG)
}
def pushScope(scope: FunctionScope): Unit = {
functionContexts = scope +: functionContexts
}
def getFunctionScope = {
functionContexts.collect{case fcnScope: FunctionScope => fcnScope}.head
}
def popFunctionContext = {
Stack.insertIndex = functionContexts.head.startingStackAddr
functionContexts = functionContexts.tail
}
def hasFunction(name: String): Boolean = functionList.exists{fcn => fcn.name == name}
def getFunctionByIndex(index: Int): Function = functionList.find{fcn => fcn.index == index}.get
Functions.scalaFunctions.foreach{fcn =>
addScalaFunctionDef(fcn)
}
pushScope(new FunctionScope(List(), null, null) {})
def init(codes: Seq[String], includePaths: List[String]): IASTNode = {
val tUnit = Utils.getTranslationUnit(codes, includePaths)
val fcns = tUnit.getChildren.collect{case x:IASTFunctionDefinition => x}
.filter(fcn => fcn.getDeclSpecifier.getStorageClass != IASTDeclSpecifier.sc_extern)
fcns.foreach { fcnDef =>
addFunctionDef(fcnDef, fcnDef.getDeclarator.getName.toString == "main")
}
functionContexts = List[FunctionScope]()
declarations ++= tUnit.getDeclarations.collect{case simp: CASTSimpleDeclaration => simp.getDeclSpecifier}
.collect{case comp: CASTCompositeTypeSpecifier => comp}
.map{x => x.getName.resolveBinding().asInstanceOf[CStructure]}
tUnit
}
private def addScalaFunctionDef(fcn: Function) = {
fcn.index = functionCount
functionList += fcn
val fcnType = new CFunctionType(new CBasicType(IBasicType.Kind.eVoid, 0), null)
val newVar = Variable(fcn.name, State.this, fcnType)
Stack.writeToMemory(functionCount, newVar.address, fcnType)
functionPointers += fcn.name -> newVar
functionCount += 1
}
private def addStaticFunctionVars(node: IASTNode)(implicit state: State): List[Variable] = {
node match {
case decl: IASTDeclarator =>
val nameBinding = decl.getName.resolveBinding()
nameBinding match {
case vari: IVariable =>
if (vari.isStatic) {
val theType = TypeHelper.stripSyntheticTypeInfo(nameBinding.asInstanceOf[IVariable].getType)
val variable = Variable(decl.getName.toString, state, vari.getType)
if (decl.getInitializer != null) {
val initVals = Declarator.getRValues(decl.getInitializer.asInstanceOf[IASTEqualsInitializer].getInitializerClause, theType)
Declarator.assign(variable, initVals, null, op_assign)
}
variable.isInitialized = true
List(variable)
} else {
List()
}
case _ => List()
}
case x => x.getChildren.toList.flatMap{x => addStaticFunctionVars(x)}
}
}
private def addFunctionDef(fcnDef: IASTFunctionDefinition, isMain: Boolean) = {
val name = fcnDef.getDeclarator.getName
val fcnType = fcnDef.getDeclarator.getName.resolveBinding().asInstanceOf[IFunction].getType
functionList += new Function(name.toString, true) {
index = functionCount
node = fcnDef
override val staticVars = addStaticFunctionVars(fcnDef)(State.this)
def parameters = fcnType.getParameterTypes.toList
def run(formattedOutputParams: Array[RValue], state: State): Option[RValue] = {
None
}
}
if (!isMain) {
val newVar = Variable(name.toString, State.this, fcnType)
Stack.writeToMemory(functionCount, newVar.address, fcnType)
functionPointers += name.toString -> newVar
}
functionCount += 1
}
def callFunctionFromScala(name: String, args: Array[RValue]): Seq[IASTNode] = {
functionList.find(_.name == name).map { fcn =>
// this is a function simulated in scala
fcn.run(args.reverse, this).foreach(context.pushOntoStack)
}
Seq()
}
def callTheFunction(name: String, call: IASTFunctionCallExpression, scope: Option[FunctionScope], isApi: Boolean = false)(implicit state: State): Option[ValueType] = {
functionList.find(_.name == name).flatMap{ function =>
if (!function.isNative) {
// this is a function simulated in scala
val stackPos = Stack.insertIndex
val args = call.getArguments.map{x => Expressions.evaluate(x)}
val resolvedArgs: Array[RValue] = args.flatten.map{ TypeHelper.resolve }
val returnVal = function.run(resolvedArgs.reverse, this)
Stack.insertIndex = stackPos // pop the stack
returnVal.map{ rVal =>
rVal match {
case file @ FileRValue(_) => println("RETURNING FILE: "); file
case rValue => RValue(rValue.value, TypeHelper.unsignedIntType)
}
}
} else {
if (function.name == "main" && isApi) {
scope.get.init(function.node, this, !scope.isDefined)
functionContexts = List(scope.get)
context.run(this)
None
} else {
val newScope = scope.getOrElse {
val expressionType = call.getExpressionType
new FunctionScope(function.staticVars, functionContexts.headOption.getOrElse(null), expressionType)
}
newScope.init(function.node, this, !scope.isDefined)
val args: List[ValueType] = call.getArguments.map { x => Expressions.evaluate(x).head }.toList
val resolvedArgs = args.map{ TypeHelper.resolve }
// printf assumes all floating point numbers are doubles
// and shorts are 4 bytes
val promoted = resolvedArgs.map{arg =>
if (arg.theType.isInstanceOf[IBasicType] && arg.theType.asInstanceOf[IBasicType].getKind == IBasicType.Kind.eFloat) {
TypeHelper.cast(TypeHelper.doubleType, arg.value)
} else if (arg.theType.isInstanceOf[IBasicType] && arg.theType.asInstanceOf[IBasicType].isShort) {
TypeHelper.cast(TypeHelper.intType, arg.value)
} else if (arg.theType.isInstanceOf[IBasicType] && arg.theType.asInstanceOf[IBasicType].getKind == IBasicType.Kind.eChar) {
TypeHelper.cast(TypeHelper.intType, arg.value)
} else {
arg
}
}
newScope.pushOntoStack(promoted)
newScope.pushOntoStack(RValue(resolvedArgs.size, TypeHelper.unsignedIntType))
functionContexts = newScope +: functionContexts
newScope.run(this)
newScope.getReturnValue.map{ retVal =>
val valuesToPush: Option[Array[Byte]] = retVal match {
case structure @ LValue(_, _: CStructure) =>
Some(structure.toByteArray)
case _ => None
}
popFunctionContext
valuesToPush.foreach { byteArray =>
val newAddr = state.allocateSpace(byteArray.size)
state.writeDataBlock(byteArray, newAddr)
}
retVal
}.orElse{
popFunctionContext
None
}
}
}
}
}
def allocateSpace(numBytes: Int): Int = {
val result = Stack.insertIndex
Stack.insertIndex += Math.max(0, numBytes)
result
}
def allocateHeapSpace(numBytes: Int): Int = {
val result = heapInsertIndex
heapInsertIndex += Math.max(0, numBytes)
result
}
def copy(dst: Int, src: Int, numBytes: Int) = {
if (numBytes != 0) {
for (i <- (0 until numBytes)) {
Stack.tape.put(dst + i, Stack.tape.get(src + i))
}
}
}
def set(dst: Int, value: Byte, numBytes: Int) = {
if (numBytes != 0) {
for (i <- (0 until numBytes)) {
Stack.tape.put(dst + i, value)
}
}
}
def readPtrVal(address: Int): Int = {
Stack.readFromMemory(address, pointerType).value.asInstanceOf[Int]
}
def getString(str: String): RValue = {
val theStr = Utils.stripQuotes(str)
val withNull = (theStr.toCharArray() :+ 0.toChar).map(_.toByte) // terminating null char
val strAddr = allocateSpace(withNull.size)
writeDataBlock(withNull, strAddr)(this)
RValue(strAddr, pointerType)
}
def createStringArrayVariable(varName: String, str: String): Variable = {
val theStr = Utils.stripQuotes(str)
val translateLineFeed = theStr.replace("\\\\n", 10.asInstanceOf[Char].toString)
val withNull = (translateLineFeed.toCharArray() :+ 0.toChar)
.map { char => RValue(char.toByte, TypeHelper.charType)}.toList // terminating null char
val inferredArrayType = new CArrayType(TypeHelper.charType)
inferredArrayType.setModifier(new CASTArrayModifier(new CASTLiteralExpression(IASTLiteralExpression.lk_integer_constant, str.size.toString.toCharArray)))
val theArrayPtr = context.addArrayVariable(varName, inferredArrayType, withNull)
theArrayPtr
}
def writeDataBlock(array: List[RValue], startingAddress: Int)(implicit state: State): Unit = {
var address = startingAddress
array.foreach {
case RValue(newVal, theType) =>
Stack.writeToMemory(newVal, address, theType)
address += TypeHelper.sizeof(theType)(state)
}
}
def writeDataBlock(array: Array[Byte], startingAddress: Int)(implicit state: State): Unit = {
var address = startingAddress
array.foreach { byte =>
Stack.tape.put(address, byte)
address += 1
}
}
def readDataBlock(startingAddress: Int, length: Int)(implicit state: State): Array[Byte] = {
(0 until length).map { index =>
Stack.tape.get(startingAddress + index)
}.toArray
}
}
|
bdwashbu/cEngine
|
src/scala/c/engine/cEngine.scala
|
Scala
|
apache-2.0
| 23,838
|
package org.tlc.template.ui.layouts
import android.widget._
import macroid.ActivityContext
import macroid.FullDsl._
/**
* Author: @aguestuser
* Date: 4/22/15
* License: GPLv2 (https://www.gnu.org/licenses/gpl-2.0.html)
*/
// layouts are composable!
object MainLayouts {
def layout1(implicit ctx: ActivityContext) =
l[LinearLayout]( // `l` aliases `layout`
w[TextView], // `w` aliases `widget`
w[ImageView],
w[Button]
)
def layout2(implicit ctx: ActivityContext) =
l[FrameLayout](
w[ProgressBar]
)
def comboLayout(implicit ctx: ActivityContext) =
l[FrameLayout](
layout1,
layout2
)
}
|
aguestuser/macroid-template
|
src/main/scala/org/tlc/template/ui/layouts/MainLayouts.scala
|
Scala
|
gpl-3.0
| 674
|
package grid.engine
import cats.Eq
import enumeratum.values._
object Nagios extends Nagios
trait Nagios {
sealed abstract class Severity(val value: Int) extends IntEnumEntry {
def exit(): Nothing
def prefix: String
final def println(msg: String): this.type = {
Console.out.println(s"$prefix $msg")
this
}
}
object Severity extends IntEnum[Severity] {
val values = findValues
case object OK extends Severity(0) {
def prefix = "OK"
def exit: Nothing =
Nagios.exit.ok
}
case object WARNING extends Severity(1) {
def prefix = "WARNING"
def exit(): Nothing =
Nagios.exit.warning
}
case object CRITICAL extends Severity(2) {
def prefix = "CRITICAL"
def exit(): Nothing =
Nagios.exit.critical
}
}
object exit {
def ok: Nothing =
sys exit 0
def warning: Nothing =
sys exit 1
def critical: Nothing =
sys exit 2
def unknown: Nothing =
sys exit 3
}
// --------------------------------------------------------------------------
// for command line arguments
// --------------------------------------------------------------------------
sealed trait Output
object Output {
case object CLI extends Output
case object Nagios extends Output
implicit val eq: Eq[Output] = Eq.fromUniversalEquals
implicit val OutputRead: scopt.Read[Output] =
scopt.Read reads {
_.toLowerCase match {
case "nagios" => Output.Nagios
case "cli" => Output.CLI
}
}
}
}
|
idiv-biodiversity/grid-engine-tools
|
src/main/scala/Nagios.scala
|
Scala
|
unlicense
| 1,596
|
import scala.util.control.NonFatal
trait NonFatalTests {
//NonFatals
val nonFatals: Seq[Throwable] =
Seq(new RuntimeException,
new Exception,
new Throwable,
new NotImplementedError)
//Fatals
val fatals: Seq[Throwable] =
Seq(new InterruptedException,
new StackOverflowError,
new OutOfMemoryError,
new LinkageError,
new VirtualMachineError {},
new scala.util.control.ControlThrowable {})
def testFatalsUsingApply(): Unit = {
fatals foreach { t => assert(NonFatal(t) == false) }
}
def testNonFatalsUsingApply(): Unit = {
nonFatals foreach { t => assert(NonFatal(t) == true) }
}
def testFatalsUsingUnapply(): Unit = {
fatals foreach { t => assert(NonFatal.unapply(t).isEmpty) }
}
def testNonFatalsUsingUnapply(): Unit = {
nonFatals foreach { t => assert(NonFatal.unapply(t).isDefined) }
}
testFatalsUsingApply()
testNonFatalsUsingApply()
testFatalsUsingUnapply()
testNonFatalsUsingUnapply()
}
object Test
extends App
with NonFatalTests {
System.exit(0)
}
|
scala/scala
|
test/files/jvm/non-fatal-tests.scala
|
Scala
|
apache-2.0
| 1,103
|
package org.jetbrains.plugins.scala.lang.completion
import com.intellij.codeInsight.completion._
import com.intellij.codeInsight.lookup._
import com.intellij.openapi.util.Iconable._
import com.intellij.patterns.PlatformPatterns
import com.intellij.psi._
import com.intellij.psi.filters._
import com.intellij.psi.filters.position.{FilterPattern, LeftNeighbour}
import com.intellij.psi.util.PsiTreeUtil
import com.intellij.util.ProcessingContext
import org.jetbrains.plugins.scala.lang.completion.filters.modifiers.ModifiersFilter
import org.jetbrains.plugins.scala.lang.psi.TypeAdjuster
import org.jetbrains.plugins.scala.lang.psi.api.statements._
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.ScTemplateDefinition
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.{ScModifierListOwner, ScTypedDefinition}
import org.jetbrains.plugins.scala.lang.psi.impl.ScalaPsiElementFactory
import org.jetbrains.plugins.scala.overrideImplement._
/**
* Created by kate
* on 3/1/16
* contibute override elements. May be called on override keyword (ove<caret>)
* or after override element definition (override def <caret>)
*/
class ScalaOverrideContributor extends ScalaCompletionContributor {
private def registerOverrideCompletion(filter: ElementFilter, keyword: String) {
extend(CompletionType.BASIC, PlatformPatterns.psiElement.
and(new FilterPattern(new AndFilter(new NotFilter(new LeftNeighbour(new TextContainFilter("override"))),
new AndFilter(new NotFilter(new LeftNeighbour(new TextFilter("."))), filter)))),
new CompletionProvider[CompletionParameters] {
def addCompletions(parameters: CompletionParameters, context: ProcessingContext, resultSet: CompletionResultSet) {
addCompletionsOnOverrideKeyWord(resultSet, parameters)
}
})
}
extend(CompletionType.BASIC, PlatformPatterns.psiElement(), new CompletionProvider[CompletionParameters] {
def addCompletions(parameters: CompletionParameters, context: ProcessingContext, resultSet: CompletionResultSet) {
addCompletionsAfterOverride(resultSet, parameters)
}
})
class MyElementRenderer(member: ScalaNamedMember) extends LookupElementRenderer[LookupElementDecorator[LookupElement]] {
def renderElement(element: LookupElementDecorator[LookupElement], presentation: LookupElementPresentation) = {
element.getDelegate.renderElement(presentation)
val (resultText, tailText) = member match {
case mm: ScMethodMember =>
(mm.text + " = {...}", mm.scType.presentableText)
case mVal: ScValueMember =>
(mVal.getText, mVal.scType.presentableText)
case mVar: ScVariableMember =>
(mVar.getText, mVar.scType.presentableText)
case ta: ScAliasMember =>
val aliasType = ta.getElement match {
case tad: ScTypeAliasDefinition =>
tad.aliasedTypeElement.calcType.presentableText
case _ => ""
}
(ta.getText, aliasType)
}
presentation.setTypeText(tailText)
presentation.setItemText(resultText)
}
}
private def addCompletionsOnOverrideKeyWord(resultSet: CompletionResultSet, parameters: CompletionParameters): Unit = {
val position = positionFromParameters(parameters)
val clazz = PsiTreeUtil.getParentOfType(position, classOf[ScTemplateDefinition], false)
if (clazz == null) return
val classMembers = ScalaOIUtil.getMembersToOverride(clazz, withSelfType = true)
if (classMembers.isEmpty) return
handleMembers(classMembers, clazz, (classMember, clazz) => createText(classMember, clazz, full = true), resultSet) { classMember =>
new MyInsertHandler()
}
}
private def addCompletionsAfterOverride(resultSet: CompletionResultSet, parameters: CompletionParameters): Unit = {
val position = positionFromParameters(parameters)
val clazz = PsiTreeUtil.getParentOfType(position, classOf[ScTemplateDefinition], /*strict = */ false)
if (clazz == null) return
val mlo = Option(PsiTreeUtil.getContextOfType(position, classOf[ScModifierListOwner]))
if (mlo.isEmpty) return
else if (mlo.isDefined && !mlo.get.hasModifierProperty("override")) return
val classMembers = ScalaOIUtil.getMembersToOverride(clazz, withSelfType = true)
if (classMembers.isEmpty) return
def membersToRender = position.getContext match {
case m: PsiMethod => classMembers.filter(_.isInstanceOf[ScMethodMember])
case typedDefinition: ScTypedDefinition if typedDefinition.isVal =>
classMembers.filter(_.isInstanceOf[ScValueMember])
case typedDefinition: ScTypedDefinition if typedDefinition.isVar =>
classMembers.filter(_.isInstanceOf[ScVariableMember])
case typeAlis: ScTypeAlias => classMembers.filter(_.isInstanceOf[ScAliasMember])
case _ => classMembers
}
handleMembers(membersToRender, clazz, (classMember, clazz) => createText(classMember, clazz), resultSet) { classMember =>
new MyInsertHandler()
}
}
class MyInsertHandler() extends InsertHandler[LookupElement] {
def handleInsert(context: InsertionContext, item: LookupElement) = {
def makeInsertion(): Unit = {
val elementOption = Option(PsiTreeUtil.getContextOfType(context.getFile.findElementAt(context.getStartOffset),
classOf[ScModifierListOwner]))
elementOption.foreach { element =>
TypeAdjuster.markToAdjust(element)
ScalaGenerationInfo.positionCaret(context.getEditor, element.asInstanceOf[PsiMember])
context.commitDocument()
}
}
makeInsertion()
}
}
private def createText(classMember: ClassMember, td: ScTemplateDefinition, full: Boolean = false): String = {
val needsInferType = ScalaGenerationInfo.needsInferType
val text: String = classMember match {
case mm: ScMethodMember =>
val mBody = ScalaGenerationInfo.getMethodBody(mm, td, isImplement = false)
val fun = if (full)
ScalaPsiElementFactory.createOverrideImplementMethod(mm.sign, mm.getElement.getManager,
needsOverrideModifier = false, needsInferType = needsInferType: Boolean, mBody)
else ScalaPsiElementFactory.createMethodFromSignature(mm.sign, mm.getElement.getManager,
needsInferType = needsInferType, mBody)
fun.getText
case tm: ScAliasMember =>
ScalaPsiElementFactory.getOverrideImplementTypeSign(tm.getElement,
tm.substitutor, "this.type", needsOverride = false)
case value: ScValueMember =>
ScalaPsiElementFactory.getOverrideImplementVariableSign(value.element, value.substitutor, "_",
needsOverride = false, isVal = true, needsInferType = needsInferType)
case variable: ScVariableMember =>
ScalaPsiElementFactory.getOverrideImplementVariableSign(variable.element, variable.substitutor, "_",
needsOverride = false, isVal = false, needsInferType = needsInferType)
case _ => " "
}
if (!full) text.indexOf(" ", 1) match { //remove val, var, def or type
case -1 => text
case part => text.substring(part + 1)
} else "override " + text
}
private def handleMembers(classMembers: Iterable[ClassMember], td: ScTemplateDefinition,
name: (ClassMember, ScTemplateDefinition) => String,
resultSet: CompletionResultSet)
(insertionHandler: ClassMember => InsertHandler[LookupElement]): Unit = {
classMembers.foreach {
case mm: ScalaNamedMember =>
val lookupItem = LookupElementBuilder.create(mm.getElement, name(mm, td))
.withIcon(mm.getPsiElement.getIcon(ICON_FLAG_VISIBILITY | ICON_FLAG_READ_STATUS))
.withInsertHandler(insertionHandler(mm))
val renderingDecorator = LookupElementDecorator.withRenderer(lookupItem, new MyElementRenderer(mm))
resultSet.consume(renderingDecorator)
case _ =>
}
}
registerOverrideCompletion(new ModifiersFilter, "override")
}
|
jeantil/intellij-scala
|
src/org/jetbrains/plugins/scala/lang/completion/ScalaOverrideContributor.scala
|
Scala
|
apache-2.0
| 8,037
|
package de.jasminelli.souffleuse.bench
/**
* Parameters to a run of a RpcBench instance
*
* @author Stefan Plantikow<Stefan.Plantikow@googlemail.com>
*
* Originally created by User: stepn Date: 13.02.2009 Time: 15:30:08
*/
final case class BenchParams(val load: RqLoad,
val workDur: Long,
val numStages: Int,
val warmUp: Int, val times: Int, val deferredSending: Boolean);
abstract sealed class RqLoad(val numRequests: Int) {
val requiredRequests = numRequests
val numObligations = numRequests
}
final case class LinRqLoad(requests: Int) extends RqLoad(requests) ;
final case class BulkRqLoad(requests: Int) extends RqLoad(requests) ;
final case class ParRqLoad(requests: Int, val numPartitions: Int) extends RqLoad(requests) {
assert(((numRequests/numPartitions) * numPartitions) == numRequests)
}
final case class NBParRqLoad(requests: Int, val numPartitions: Int) extends RqLoad(requests) {
assert(numPartitions > 0)
override val numObligations = numPartitions +
(if ((numRequests % numPartitions) == 0) 0 else 1)
}
|
boggle/souffleuse
|
src/test/scala/de/jasminelli/souffleuse/bench/BenchParam.scala
|
Scala
|
mit
| 1,132
|
package com.twitter.finagle.stats
import com.twitter.common.metrics.Metrics
import org.junit.runner.RunWith
import org.scalatest.FunSuite
import org.scalatest.junit.JUnitRunner
import scala.collection.JavaConverters._
@RunWith(classOf[JUnitRunner])
class StatsFormatterTest extends FunSuite {
private val metrics = Metrics.createDetached()
private val sr = new ImmediateMetricsStatsReceiver(metrics)
private val histo1 = sr.stat("histo1")
(0 to 100).foreach(histo1.add(_))
private val values = SampledValues(
Map.empty,
Map.empty,
metrics.sampleHistograms().asScala)
test("CommonsMetrics is formatted the same as Metrics.sample") {
val formatter = StatsFormatter.CommonsMetrics
val formatted = formatter(values)
assert(formatted === metrics.sample().asScala)
assert(formatted("histo1.p50") === 50)
assert(formatted("histo1.p90") === 90)
assert(formatted("histo1.p9990") === 100)
assert(formatted("histo1.p9999") === 100)
assert(formatted("histo1.count") === 101)
assert(formatted("histo1.max") === 100)
assert(formatted("histo1.min") === 0)
assert(formatted("histo1.avg") === 50)
}
test("Ostrich") {
val formatter = StatsFormatter.Ostrich
val formatted = formatter(values)
assert(formatted("histo1.p50") === 50)
assert(formatted("histo1.p90") === 90)
assert(formatted("histo1.p999") === 100)
assert(formatted("histo1.p9999") === 100)
assert(formatted("histo1.count") === 101)
assert(formatted("histo1.maximum") === 100)
assert(formatted("histo1.minimum") === 0)
assert(formatted("histo1.average") === 50)
}
}
|
kumasento/finagle
|
finagle-stats/src/test/scala/com/twitter/finagle/stats/StatsFormatterTest.scala
|
Scala
|
apache-2.0
| 1,636
|
package shared.api
import shared.dto.{CreateUserRequest, User}
import shared.forms.Forms.SubmitResponse
trait ServerApi {
def logIn(login: String, pass: String): Either[String, User]
def createUser(user: CreateUserRequest): SubmitResponse[CreateUserRequest,User]
def listUsers(u: Unit): Either[String, List[User]]
}
|
Igorocky/lesn
|
shared/src/main/scala/shared/api/ServerApi.scala
|
Scala
|
mit
| 324
|
package dbtarzan.gui
import scalafx.stage.Stage
import scalafx.scene.control.{Label, Menu, MenuBar, MenuItem, SplitPane, TextField}
import scalafx.scene.layout.{BorderPane, FlowPane}
import scalafx.scene.Parent
import scalafx.Includes._
import akka.actor.ActorRef
import scalafx.event.ActionEvent
import scalafx.geometry.Insets
import dbtarzan.gui.foreignkeys.{AdditionalForeignKeysEditor, AdditionalForeignKeysEditorStarter}
import dbtarzan.messages._
import dbtarzan.gui.util.JFXUtil
import dbtarzan.db.{DatabaseId, TableId, TableNames}
import dbtarzan.localization.Localization
/* A panel containing all the tabs related to a database */
class Database (dbActor : ActorRef, guiActor : ActorRef, databaseId : DatabaseId, localization : Localization, tableNames: TableNames) extends TControlBuilder {
private val log = new Logger(guiActor)
private val tableList = new TableList(tableNames)
private val tableTabs = new TableTabs(dbActor, guiActor, databaseId, localization)
private var additionalForeignKeyEditor : Option[AdditionalForeignKeysEditor] = Option.empty
tableList.onTableSelected(tableName => dbActor ! QueryColumns(TableId(databaseId, tableName)))
private val filterText = new TextField() {
promptText = localization.filter
margin = Insets(0,0,3,0)
text.onChange { (value , oldValue, newValue) => {
val optValue = Option(newValue)
optValue.foreach({ dbActor ! QueryTablesByPattern(databaseId, _) })
}}
}
private val pane = new SplitPane {
private val tableListWithTitle = new BorderPane {
top = new FlowPane {
children = List(buildMenu(), new Label(localization.tables))
}
center = new BorderPane {
top = filterText
center = tableList.control
}
}
items.addAll(tableListWithTitle, tableTabs.control)
dividerPositions = 0.20
SplitPane.setResizableWithParent(tableListWithTitle, value = false)
}
private def buildMenu() = new MenuBar {
menus = List(
new Menu(JFXUtil.threeLines) {
items = List(
new MenuItem(localization.connectionReset) {
onAction = {
e: ActionEvent => dbActor ! QueryReset(databaseId)
}
},
new MenuItem(localization.openAdditionalForeignKeys) {
onAction = {
e: ActionEvent => {
additionalForeignKeyEditor = Some(AdditionalForeignKeysEditorStarter.openAdditionalForeignKeysEditor(
stage(),
dbActor,
guiActor,
databaseId,
tableList.tableNames,
localization
))
}
}
}
)
}
)
stylesheets += "orderByMenuBar.css"
}
def control : Parent = pane
private def stage() : Stage =
new Stage(pane.scene.window().asInstanceOf[javafx.stage.Stage])
def handleQueryIdMessage(msg: TWithQueryId) : Unit =
tableTabs.handleQueryIdMessage(msg)
def handleDatabaseIdMessage(msg: TWithDatabaseId) : Unit = msg match {
case tables : ResponseTablesByPattern => tableList.addTableNames(tables.names)
case tables : ResponseCloseTables => tableTabs.removeTables(tables.ids)
case columns : ResponseColumnsForForeignKeys => additionalForeignKeyEditor.foreach(_.handleColumns(columns.tableName, columns.columns))
case _: RequestRemovalAllTabs => tableTabs.requestRemovalAllTabs()
case additionalKeys: ResponseAdditionalForeignKeys => additionalForeignKeyEditor.foreach(_.handleForeignKeys(additionalKeys.keys))
case _ => log.error(localization.errorDatabaseMessage(msg))
}
def handleTableIdMessage(msg: TWithTableId) : Unit = msg match {
case columns : ResponseColumns => tableTabs.addColumns(columns)
case columns : ResponseColumnsFollow => tableTabs.addColumnsFollow(columns)
case _ => log.error(localization.errorTableMessage(msg))
}
def getId : DatabaseId = databaseId
def currentTableId : Option[QueryId] = tableTabs.currentTableId
}
|
aferrandi/dbtarzan
|
src/main/scala/dbtarzan/gui/Database.scala
|
Scala
|
apache-2.0
| 4,049
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.server
import java.util
import java.util.Collections
import java.util.Map.Entry
import java.util.concurrent.{CompletableFuture, ExecutionException}
import java.util.concurrent.TimeUnit.{MILLISECONDS, NANOSECONDS}
import kafka.network.RequestChannel
import kafka.raft.RaftManager
import kafka.server.QuotaFactory.QuotaManagers
import kafka.utils.Logging
import org.apache.kafka.clients.admin.AlterConfigOp
import org.apache.kafka.common.Uuid.ZERO_UUID
import org.apache.kafka.common.acl.AclOperation.{ALTER, ALTER_CONFIGS, CLUSTER_ACTION, CREATE, DELETE, DESCRIBE}
import org.apache.kafka.common.config.ConfigResource
import org.apache.kafka.common.errors.{ApiException, ClusterAuthorizationException, InvalidRequestException, TopicDeletionDisabledException}
import org.apache.kafka.common.internals.FatalExitError
import org.apache.kafka.common.message.AlterConfigsResponseData.{AlterConfigsResourceResponse => OldAlterConfigsResourceResponse}
import org.apache.kafka.common.message.CreatePartitionsRequestData.CreatePartitionsTopic
import org.apache.kafka.common.message.CreatePartitionsResponseData.CreatePartitionsTopicResult
import org.apache.kafka.common.message.CreateTopicsRequestData
import org.apache.kafka.common.message.CreateTopicsResponseData.CreatableTopicResult
import org.apache.kafka.common.message.DeleteTopicsResponseData.{DeletableTopicResult, DeletableTopicResultCollection}
import org.apache.kafka.common.message.IncrementalAlterConfigsResponseData.AlterConfigsResourceResponse
import org.apache.kafka.common.message._
import org.apache.kafka.common.protocol.Errors._
import org.apache.kafka.common.protocol.{ApiKeys, ApiMessage, Errors}
import org.apache.kafka.common.requests._
import org.apache.kafka.common.resource.Resource.CLUSTER_NAME
import org.apache.kafka.common.resource.ResourceType.{CLUSTER, TOPIC}
import org.apache.kafka.common.utils.Time
import org.apache.kafka.common.{Node, Uuid}
import org.apache.kafka.controller.Controller
import org.apache.kafka.metadata.{BrokerHeartbeatReply, BrokerRegistrationReply, VersionRange}
import org.apache.kafka.server.authorizer.Authorizer
import org.apache.kafka.server.common.ApiMessageAndVersion
import scala.jdk.CollectionConverters._
/**
* Request handler for Controller APIs
*/
class ControllerApis(val requestChannel: RequestChannel,
val authorizer: Option[Authorizer],
val quotas: QuotaManagers,
val time: Time,
val supportedFeatures: Map[String, VersionRange],
val controller: Controller,
val raftManager: RaftManager[ApiMessageAndVersion],
val config: KafkaConfig,
val metaProperties: MetaProperties,
val controllerNodes: Seq[Node],
val apiVersionManager: ApiVersionManager) extends ApiRequestHandler with Logging {
val authHelper = new AuthHelper(authorizer)
val requestHelper = new RequestHandlerHelper(requestChannel, quotas, time)
private val aclApis = new AclApis(authHelper, authorizer, requestHelper, "controller", config)
def isClosed: Boolean = aclApis.isClosed
def close(): Unit = aclApis.close()
override def handle(request: RequestChannel.Request, requestLocal: RequestLocal): Unit = {
try {
request.header.apiKey match {
case ApiKeys.FETCH => handleFetch(request)
case ApiKeys.FETCH_SNAPSHOT => handleFetchSnapshot(request)
case ApiKeys.CREATE_TOPICS => handleCreateTopics(request)
case ApiKeys.DELETE_TOPICS => handleDeleteTopics(request)
case ApiKeys.API_VERSIONS => handleApiVersionsRequest(request)
case ApiKeys.ALTER_CONFIGS => handleLegacyAlterConfigs(request)
case ApiKeys.VOTE => handleVote(request)
case ApiKeys.BEGIN_QUORUM_EPOCH => handleBeginQuorumEpoch(request)
case ApiKeys.END_QUORUM_EPOCH => handleEndQuorumEpoch(request)
case ApiKeys.DESCRIBE_QUORUM => handleDescribeQuorum(request)
case ApiKeys.ALTER_ISR => handleAlterIsrRequest(request)
case ApiKeys.BROKER_REGISTRATION => handleBrokerRegistration(request)
case ApiKeys.BROKER_HEARTBEAT => handleBrokerHeartBeatRequest(request)
case ApiKeys.UNREGISTER_BROKER => handleUnregisterBroker(request)
case ApiKeys.ALTER_CLIENT_QUOTAS => handleAlterClientQuotas(request)
case ApiKeys.INCREMENTAL_ALTER_CONFIGS => handleIncrementalAlterConfigs(request)
case ApiKeys.ALTER_PARTITION_REASSIGNMENTS => handleAlterPartitionReassignments(request)
case ApiKeys.LIST_PARTITION_REASSIGNMENTS => handleListPartitionReassignments(request)
case ApiKeys.ENVELOPE => handleEnvelopeRequest(request, requestLocal)
case ApiKeys.SASL_HANDSHAKE => handleSaslHandshakeRequest(request)
case ApiKeys.SASL_AUTHENTICATE => handleSaslAuthenticateRequest(request)
case ApiKeys.ALLOCATE_PRODUCER_IDS => handleAllocateProducerIdsRequest(request)
case ApiKeys.CREATE_PARTITIONS => handleCreatePartitions(request)
case ApiKeys.DESCRIBE_ACLS => aclApis.handleDescribeAcls(request)
case ApiKeys.CREATE_ACLS => aclApis.handleCreateAcls(request)
case ApiKeys.DELETE_ACLS => aclApis.handleDeleteAcls(request)
case _ => throw new ApiException(s"Unsupported ApiKey ${request.context.header.apiKey}")
}
} catch {
case e: FatalExitError => throw e
case e: ExecutionException => requestHelper.handleError(request, e.getCause)
case e: Throwable => requestHelper.handleError(request, e)
}
}
def handleEnvelopeRequest(request: RequestChannel.Request, requestLocal: RequestLocal): Unit = {
if (!authHelper.authorize(request.context, CLUSTER_ACTION, CLUSTER, CLUSTER_NAME)) {
requestHelper.sendErrorResponseMaybeThrottle(request, new ClusterAuthorizationException(
s"Principal ${request.context.principal} does not have required CLUSTER_ACTION for envelope"))
} else {
EnvelopeUtils.handleEnvelopeRequest(request, requestChannel.metrics, handle(_, requestLocal))
}
}
def handleSaslHandshakeRequest(request: RequestChannel.Request): Unit = {
val responseData = new SaslHandshakeResponseData().setErrorCode(ILLEGAL_SASL_STATE.code)
requestHelper.sendResponseMaybeThrottle(request, _ => new SaslHandshakeResponse(responseData))
}
def handleSaslAuthenticateRequest(request: RequestChannel.Request): Unit = {
val responseData = new SaslAuthenticateResponseData()
.setErrorCode(ILLEGAL_SASL_STATE.code)
.setErrorMessage("SaslAuthenticate request received after successful authentication")
requestHelper.sendResponseMaybeThrottle(request, _ => new SaslAuthenticateResponse(responseData))
}
def handleFetch(request: RequestChannel.Request): Unit = {
authHelper.authorizeClusterOperation(request, CLUSTER_ACTION)
handleRaftRequest(request, response => new FetchResponse(response.asInstanceOf[FetchResponseData]))
}
def handleFetchSnapshot(request: RequestChannel.Request): Unit = {
authHelper.authorizeClusterOperation(request, CLUSTER_ACTION)
handleRaftRequest(request, response => new FetchSnapshotResponse(response.asInstanceOf[FetchSnapshotResponseData]))
}
def handleDeleteTopics(request: RequestChannel.Request): Unit = {
val deleteTopicsRequest = request.body[DeleteTopicsRequest]
val future = deleteTopics(deleteTopicsRequest.data,
request.context.apiVersion,
authHelper.authorize(request.context, DELETE, CLUSTER, CLUSTER_NAME),
names => authHelper.filterByAuthorized(request.context, DESCRIBE, TOPIC, names)(n => n),
names => authHelper.filterByAuthorized(request.context, DELETE, TOPIC, names)(n => n))
future.whenComplete { (results, exception) =>
requestHelper.sendResponseMaybeThrottle(request, throttleTimeMs => {
if (exception != null) {
deleteTopicsRequest.getErrorResponse(throttleTimeMs, exception)
} else {
val responseData = new DeleteTopicsResponseData().
setResponses(new DeletableTopicResultCollection(results.iterator)).
setThrottleTimeMs(throttleTimeMs)
new DeleteTopicsResponse(responseData)
}
})
}
}
def deleteTopics(request: DeleteTopicsRequestData,
apiVersion: Int,
hasClusterAuth: Boolean,
getDescribableTopics: Iterable[String] => Set[String],
getDeletableTopics: Iterable[String] => Set[String])
: CompletableFuture[util.List[DeletableTopicResult]] = {
// Check if topic deletion is enabled at all.
if (!config.deleteTopicEnable) {
if (apiVersion < 3) {
throw new InvalidRequestException("Topic deletion is disabled.")
} else {
throw new TopicDeletionDisabledException()
}
}
val deadlineNs = time.nanoseconds() + NANOSECONDS.convert(request.timeoutMs, MILLISECONDS);
// The first step is to load up the names and IDs that have been provided by the
// request. This is a bit messy because we support multiple ways of referring to
// topics (both by name and by id) and because we need to check for duplicates or
// other invalid inputs.
val responses = new util.ArrayList[DeletableTopicResult]
def appendResponse(name: String, id: Uuid, error: ApiError): Unit = {
responses.add(new DeletableTopicResult().
setName(name).
setTopicId(id).
setErrorCode(error.error.code).
setErrorMessage(error.message))
}
val providedNames = new util.HashSet[String]
val duplicateProvidedNames = new util.HashSet[String]
val providedIds = new util.HashSet[Uuid]
val duplicateProvidedIds = new util.HashSet[Uuid]
def addProvidedName(name: String): Unit = {
if (duplicateProvidedNames.contains(name) || !providedNames.add(name)) {
duplicateProvidedNames.add(name)
providedNames.remove(name)
}
}
request.topicNames.forEach(addProvidedName)
request.topics.forEach {
topic => if (topic.name == null) {
if (topic.topicId.equals(ZERO_UUID)) {
appendResponse(null, ZERO_UUID, new ApiError(INVALID_REQUEST,
"Neither topic name nor id were specified."))
} else if (duplicateProvidedIds.contains(topic.topicId) || !providedIds.add(topic.topicId)) {
duplicateProvidedIds.add(topic.topicId)
providedIds.remove(topic.topicId)
}
} else {
if (topic.topicId.equals(ZERO_UUID)) {
addProvidedName(topic.name)
} else {
appendResponse(topic.name, topic.topicId, new ApiError(INVALID_REQUEST,
"You may not specify both topic name and topic id."))
}
}
}
// Create error responses for duplicates.
duplicateProvidedNames.forEach(name => appendResponse(name, ZERO_UUID,
new ApiError(INVALID_REQUEST, "Duplicate topic name.")))
duplicateProvidedIds.forEach(id => appendResponse(null, id,
new ApiError(INVALID_REQUEST, "Duplicate topic id.")))
// At this point we have all the valid names and IDs that have been provided.
// However, the Authorizer needs topic names as inputs, not topic IDs. So
// we need to resolve all IDs to names.
val toAuthenticate = new util.HashSet[String]
toAuthenticate.addAll(providedNames)
val idToName = new util.HashMap[Uuid, String]
controller.findTopicNames(deadlineNs, providedIds).thenCompose { topicNames =>
topicNames.forEach { (id, nameOrError) =>
if (nameOrError.isError) {
appendResponse(null, id, nameOrError.error())
} else {
toAuthenticate.add(nameOrError.result())
idToName.put(id, nameOrError.result())
}
}
// Get the list of deletable topics (those we can delete) and the list of describeable
// topics.
val topicsToAuthenticate = toAuthenticate.asScala
val (describeable, deletable) = if (hasClusterAuth) {
(topicsToAuthenticate.toSet, topicsToAuthenticate.toSet)
} else {
(getDescribableTopics(topicsToAuthenticate), getDeletableTopics(topicsToAuthenticate))
}
// For each topic that was provided by ID, check if authentication failed.
// If so, remove it from the idToName map and create an error response for it.
val iterator = idToName.entrySet().iterator()
while (iterator.hasNext) {
val entry = iterator.next()
val id = entry.getKey
val name = entry.getValue
if (!deletable.contains(name)) {
if (describeable.contains(name)) {
appendResponse(name, id, new ApiError(TOPIC_AUTHORIZATION_FAILED))
} else {
appendResponse(null, id, new ApiError(TOPIC_AUTHORIZATION_FAILED))
}
iterator.remove()
}
}
// For each topic that was provided by name, check if authentication failed.
// If so, create an error response for it. Otherwise, add it to the idToName map.
controller.findTopicIds(deadlineNs, providedNames).thenCompose { topicIds =>
topicIds.forEach { (name, idOrError) =>
if (!describeable.contains(name)) {
appendResponse(name, ZERO_UUID, new ApiError(TOPIC_AUTHORIZATION_FAILED))
} else if (idOrError.isError) {
appendResponse(name, ZERO_UUID, idOrError.error)
} else if (deletable.contains(name)) {
val id = idOrError.result()
if (duplicateProvidedIds.contains(id) || idToName.put(id, name) != null) {
// This is kind of a weird case: what if we supply topic ID X and also a name
// that maps to ID X? In that case, _if authorization succeeds_, we end up
// here. If authorization doesn't succeed, we refrain from commenting on the
// situation since it would reveal topic ID mappings.
duplicateProvidedIds.add(id)
idToName.remove(id)
appendResponse(name, id, new ApiError(INVALID_REQUEST,
"The provided topic name maps to an ID that was already supplied."))
}
} else {
appendResponse(name, ZERO_UUID, new ApiError(TOPIC_AUTHORIZATION_FAILED))
}
}
// Finally, the idToName map contains all the topics that we are authorized to delete.
// Perform the deletion and create responses for each one.
controller.deleteTopics(deadlineNs, idToName.keySet).thenApply { idToError =>
idToError.forEach { (id, error) =>
appendResponse(idToName.get(id), id, error)
}
// Shuffle the responses so that users can not use patterns in their positions to
// distinguish between absent topics and topics we are not permitted to see.
Collections.shuffle(responses)
responses
}
}
}
}
def handleCreateTopics(request: RequestChannel.Request): Unit = {
val createTopicsRequest = request.body[CreateTopicsRequest]
val future = createTopics(createTopicsRequest.data(),
authHelper.authorize(request.context, CREATE, CLUSTER, CLUSTER_NAME),
names => authHelper.filterByAuthorized(request.context, CREATE, TOPIC, names)(identity))
future.whenComplete { (result, exception) =>
requestHelper.sendResponseMaybeThrottle(request, throttleTimeMs => {
if (exception != null) {
createTopicsRequest.getErrorResponse(throttleTimeMs, exception)
} else {
result.setThrottleTimeMs(throttleTimeMs)
new CreateTopicsResponse(result)
}
})
}
}
def createTopics(request: CreateTopicsRequestData,
hasClusterAuth: Boolean,
getCreatableTopics: Iterable[String] => Set[String])
: CompletableFuture[CreateTopicsResponseData] = {
val topicNames = new util.HashSet[String]()
val duplicateTopicNames = new util.HashSet[String]()
request.topics().forEach { topicData =>
if (!duplicateTopicNames.contains(topicData.name())) {
if (!topicNames.add(topicData.name())) {
topicNames.remove(topicData.name())
duplicateTopicNames.add(topicData.name())
}
}
}
val authorizedTopicNames = if (hasClusterAuth) {
topicNames.asScala
} else {
getCreatableTopics.apply(topicNames.asScala)
}
val effectiveRequest = request.duplicate()
val iterator = effectiveRequest.topics().iterator()
while (iterator.hasNext) {
val creatableTopic = iterator.next()
if (duplicateTopicNames.contains(creatableTopic.name()) ||
!authorizedTopicNames.contains(creatableTopic.name())) {
iterator.remove()
}
}
controller.createTopics(effectiveRequest).thenApply { response =>
duplicateTopicNames.forEach { name =>
response.topics().add(new CreatableTopicResult().
setName(name).
setErrorCode(INVALID_REQUEST.code).
setErrorMessage("Duplicate topic name."))
}
topicNames.forEach { name =>
if (!authorizedTopicNames.contains(name)) {
response.topics().add(new CreatableTopicResult().
setName(name).
setErrorCode(TOPIC_AUTHORIZATION_FAILED.code))
}
}
response
}
}
def handleApiVersionsRequest(request: RequestChannel.Request): Unit = {
// Note that broker returns its full list of supported ApiKeys and versions regardless of current
// authentication state (e.g., before SASL authentication on an SASL listener, do note that no
// Kafka protocol requests may take place on an SSL listener before the SSL handshake is finished).
// If this is considered to leak information about the broker version a workaround is to use SSL
// with client authentication which is performed at an earlier stage of the connection where the
// ApiVersionRequest is not available.
def createResponseCallback(requestThrottleMs: Int): ApiVersionsResponse = {
val apiVersionRequest = request.body[ApiVersionsRequest]
if (apiVersionRequest.hasUnsupportedRequestVersion) {
apiVersionRequest.getErrorResponse(requestThrottleMs, UNSUPPORTED_VERSION.exception)
} else if (!apiVersionRequest.isValid) {
apiVersionRequest.getErrorResponse(requestThrottleMs, INVALID_REQUEST.exception)
} else {
apiVersionManager.apiVersionResponse(requestThrottleMs)
}
}
requestHelper.sendResponseMaybeThrottle(request, createResponseCallback)
}
def authorizeAlterResource(requestContext: RequestContext,
resource: ConfigResource): ApiError = {
resource.`type` match {
case ConfigResource.Type.BROKER =>
if (authHelper.authorize(requestContext, ALTER_CONFIGS, CLUSTER, CLUSTER_NAME)) {
new ApiError(NONE)
} else {
new ApiError(CLUSTER_AUTHORIZATION_FAILED)
}
case ConfigResource.Type.TOPIC =>
if (authHelper.authorize(requestContext, ALTER_CONFIGS, TOPIC, resource.name)) {
new ApiError(NONE)
} else {
new ApiError(TOPIC_AUTHORIZATION_FAILED)
}
case rt => new ApiError(INVALID_REQUEST, s"Unexpected resource type $rt.")
}
}
def handleLegacyAlterConfigs(request: RequestChannel.Request): Unit = {
val response = new AlterConfigsResponseData()
val alterConfigsRequest = request.body[AlterConfigsRequest]
val duplicateResources = new util.HashSet[ConfigResource]
val configChanges = new util.HashMap[ConfigResource, util.Map[String, String]]()
alterConfigsRequest.data.resources.forEach { resource =>
val configResource = new ConfigResource(
ConfigResource.Type.forId(resource.resourceType), resource.resourceName())
if (configResource.`type`().equals(ConfigResource.Type.UNKNOWN)) {
response.responses().add(new OldAlterConfigsResourceResponse().
setErrorCode(UNSUPPORTED_VERSION.code()).
setErrorMessage("Unknown resource type " + resource.resourceType() + ".").
setResourceName(resource.resourceName()).
setResourceType(resource.resourceType()))
} else if (!duplicateResources.contains(configResource)) {
val configs = new util.HashMap[String, String]()
resource.configs().forEach(config => configs.put(config.name(), config.value()))
if (configChanges.put(configResource, configs) != null) {
duplicateResources.add(configResource)
configChanges.remove(configResource)
response.responses().add(new OldAlterConfigsResourceResponse().
setErrorCode(INVALID_REQUEST.code()).
setErrorMessage("Duplicate resource.").
setResourceName(resource.resourceName()).
setResourceType(resource.resourceType()))
}
}
}
val iterator = configChanges.keySet().iterator()
while (iterator.hasNext) {
val resource = iterator.next()
val apiError = authorizeAlterResource(request.context, resource)
if (apiError.isFailure) {
response.responses().add(new OldAlterConfigsResourceResponse().
setErrorCode(apiError.error().code()).
setErrorMessage(apiError.message()).
setResourceName(resource.name()).
setResourceType(resource.`type`().id()))
iterator.remove()
}
}
controller.legacyAlterConfigs(configChanges, alterConfigsRequest.data.validateOnly)
.whenComplete { (controllerResults, exception) =>
if (exception != null) {
requestHelper.handleError(request, exception)
} else {
controllerResults.entrySet().forEach(entry => response.responses().add(
new OldAlterConfigsResourceResponse().
setErrorCode(entry.getValue.error().code()).
setErrorMessage(entry.getValue.message()).
setResourceName(entry.getKey.name()).
setResourceType(entry.getKey.`type`().id())))
requestHelper.sendResponseMaybeThrottle(request, throttleMs =>
new AlterConfigsResponse(response.setThrottleTimeMs(throttleMs)))
}
}
}
def handleVote(request: RequestChannel.Request): Unit = {
authHelper.authorizeClusterOperation(request, CLUSTER_ACTION)
handleRaftRequest(request, response => new VoteResponse(response.asInstanceOf[VoteResponseData]))
}
def handleBeginQuorumEpoch(request: RequestChannel.Request): Unit = {
authHelper.authorizeClusterOperation(request, CLUSTER_ACTION)
handleRaftRequest(request, response => new BeginQuorumEpochResponse(response.asInstanceOf[BeginQuorumEpochResponseData]))
}
def handleEndQuorumEpoch(request: RequestChannel.Request): Unit = {
authHelper.authorizeClusterOperation(request, CLUSTER_ACTION)
handleRaftRequest(request, response => new EndQuorumEpochResponse(response.asInstanceOf[EndQuorumEpochResponseData]))
}
def handleDescribeQuorum(request: RequestChannel.Request): Unit = {
authHelper.authorizeClusterOperation(request, DESCRIBE)
handleRaftRequest(request, response => new DescribeQuorumResponse(response.asInstanceOf[DescribeQuorumResponseData]))
}
def handleAlterIsrRequest(request: RequestChannel.Request): Unit = {
val alterIsrRequest = request.body[AlterIsrRequest]
authHelper.authorizeClusterOperation(request, CLUSTER_ACTION)
val future = controller.alterIsr(alterIsrRequest.data)
future.whenComplete { (result, exception) =>
val response = if (exception != null) {
alterIsrRequest.getErrorResponse(exception)
} else {
new AlterIsrResponse(result)
}
requestHelper.sendResponseExemptThrottle(request, response)
}
}
def handleBrokerHeartBeatRequest(request: RequestChannel.Request): Unit = {
val heartbeatRequest = request.body[BrokerHeartbeatRequest]
authHelper.authorizeClusterOperation(request, CLUSTER_ACTION)
controller.processBrokerHeartbeat(heartbeatRequest.data).handle[Unit] { (reply, e) =>
def createResponseCallback(requestThrottleMs: Int,
reply: BrokerHeartbeatReply,
e: Throwable): BrokerHeartbeatResponse = {
if (e != null) {
new BrokerHeartbeatResponse(new BrokerHeartbeatResponseData().
setThrottleTimeMs(requestThrottleMs).
setErrorCode(Errors.forException(e).code))
} else {
new BrokerHeartbeatResponse(new BrokerHeartbeatResponseData().
setThrottleTimeMs(requestThrottleMs).
setErrorCode(NONE.code).
setIsCaughtUp(reply.isCaughtUp).
setIsFenced(reply.isFenced).
setShouldShutDown(reply.shouldShutDown))
}
}
requestHelper.sendResponseMaybeThrottle(request,
requestThrottleMs => createResponseCallback(requestThrottleMs, reply, e))
}
}
def handleUnregisterBroker(request: RequestChannel.Request): Unit = {
val decommissionRequest = request.body[UnregisterBrokerRequest]
authHelper.authorizeClusterOperation(request, ALTER)
controller.unregisterBroker(decommissionRequest.data().brokerId()).handle[Unit] { (_, e) =>
def createResponseCallback(requestThrottleMs: Int,
e: Throwable): UnregisterBrokerResponse = {
if (e != null) {
new UnregisterBrokerResponse(new UnregisterBrokerResponseData().
setThrottleTimeMs(requestThrottleMs).
setErrorCode(Errors.forException(e).code))
} else {
new UnregisterBrokerResponse(new UnregisterBrokerResponseData().
setThrottleTimeMs(requestThrottleMs))
}
}
requestHelper.sendResponseMaybeThrottle(request,
requestThrottleMs => createResponseCallback(requestThrottleMs, e))
}
}
def handleBrokerRegistration(request: RequestChannel.Request): Unit = {
val registrationRequest = request.body[BrokerRegistrationRequest]
authHelper.authorizeClusterOperation(request, CLUSTER_ACTION)
controller.registerBroker(registrationRequest.data).handle[Unit] { (reply, e) =>
def createResponseCallback(requestThrottleMs: Int,
reply: BrokerRegistrationReply,
e: Throwable): BrokerRegistrationResponse = {
if (e != null) {
new BrokerRegistrationResponse(new BrokerRegistrationResponseData().
setThrottleTimeMs(requestThrottleMs).
setErrorCode(Errors.forException(e).code))
} else {
new BrokerRegistrationResponse(new BrokerRegistrationResponseData().
setThrottleTimeMs(requestThrottleMs).
setErrorCode(NONE.code).
setBrokerEpoch(reply.epoch))
}
}
requestHelper.sendResponseMaybeThrottle(request,
requestThrottleMs => createResponseCallback(requestThrottleMs, reply, e))
}
}
private def handleRaftRequest(request: RequestChannel.Request,
buildResponse: ApiMessage => AbstractResponse): Unit = {
val requestBody = request.body[AbstractRequest]
val future = raftManager.handleRequest(request.header, requestBody.data, time.milliseconds())
future.whenComplete { (responseData, exception) =>
val response = if (exception != null) {
requestBody.getErrorResponse(exception)
} else {
buildResponse(responseData)
}
requestHelper.sendResponseExemptThrottle(request, response)
}
}
def handleAlterClientQuotas(request: RequestChannel.Request): Unit = {
val quotaRequest = request.body[AlterClientQuotasRequest]
authHelper.authorizeClusterOperation(request, ALTER_CONFIGS)
controller.alterClientQuotas(quotaRequest.entries, quotaRequest.validateOnly)
.whenComplete { (results, exception) =>
if (exception != null) {
requestHelper.handleError(request, exception)
} else {
requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs =>
AlterClientQuotasResponse.fromQuotaEntities(results, requestThrottleMs))
}
}
}
def handleIncrementalAlterConfigs(request: RequestChannel.Request): Unit = {
val response = new IncrementalAlterConfigsResponseData()
val alterConfigsRequest = request.body[IncrementalAlterConfigsRequest]
val duplicateResources = new util.HashSet[ConfigResource]
val configChanges = new util.HashMap[ConfigResource,
util.Map[String, Entry[AlterConfigOp.OpType, String]]]()
alterConfigsRequest.data.resources.forEach { resource =>
val configResource = new ConfigResource(
ConfigResource.Type.forId(resource.resourceType), resource.resourceName())
if (configResource.`type`().equals(ConfigResource.Type.UNKNOWN)) {
response.responses().add(new AlterConfigsResourceResponse().
setErrorCode(UNSUPPORTED_VERSION.code()).
setErrorMessage("Unknown resource type " + resource.resourceType() + ".").
setResourceName(resource.resourceName()).
setResourceType(resource.resourceType()))
} else if (!duplicateResources.contains(configResource)) {
val altersByName = new util.HashMap[String, Entry[AlterConfigOp.OpType, String]]()
resource.configs.forEach { config =>
altersByName.put(config.name, new util.AbstractMap.SimpleEntry[AlterConfigOp.OpType, String](
AlterConfigOp.OpType.forId(config.configOperation), config.value))
}
if (configChanges.put(configResource, altersByName) != null) {
duplicateResources.add(configResource)
configChanges.remove(configResource)
response.responses().add(new AlterConfigsResourceResponse().
setErrorCode(INVALID_REQUEST.code()).
setErrorMessage("Duplicate resource.").
setResourceName(resource.resourceName()).
setResourceType(resource.resourceType()))
}
}
}
val iterator = configChanges.keySet().iterator()
while (iterator.hasNext) {
val resource = iterator.next()
val apiError = authorizeAlterResource(request.context, resource)
if (apiError.isFailure) {
response.responses().add(new AlterConfigsResourceResponse().
setErrorCode(apiError.error().code()).
setErrorMessage(apiError.message()).
setResourceName(resource.name()).
setResourceType(resource.`type`().id()))
iterator.remove()
}
}
controller.incrementalAlterConfigs(configChanges, alterConfigsRequest.data.validateOnly)
.whenComplete { (controllerResults, exception) =>
if (exception != null) {
requestHelper.handleError(request, exception)
} else {
controllerResults.entrySet().forEach(entry => response.responses().add(
new AlterConfigsResourceResponse().
setErrorCode(entry.getValue.error().code()).
setErrorMessage(entry.getValue.message()).
setResourceName(entry.getKey.name()).
setResourceType(entry.getKey.`type`().id())))
requestHelper.sendResponseMaybeThrottle(request, throttleMs =>
new IncrementalAlterConfigsResponse(response.setThrottleTimeMs(throttleMs)))
}
}
}
def handleCreatePartitions(request: RequestChannel.Request): Unit = {
val future = createPartitions(request.body[CreatePartitionsRequest].data,
authHelper.authorize(request.context, CREATE, CLUSTER, CLUSTER_NAME),
names => authHelper.filterByAuthorized(request.context, CREATE, TOPIC, names)(n => n))
future.whenComplete { (responses, exception) =>
if (exception != null) {
requestHelper.handleError(request, exception)
} else {
requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs => {
val responseData = new CreatePartitionsResponseData().
setResults(responses).
setThrottleTimeMs(requestThrottleMs)
new CreatePartitionsResponse(responseData)
})
}
}
}
def createPartitions(request: CreatePartitionsRequestData,
hasClusterAuth: Boolean,
getCreatableTopics: Iterable[String] => Set[String])
: CompletableFuture[util.List[CreatePartitionsTopicResult]] = {
val deadlineNs = time.nanoseconds() + NANOSECONDS.convert(request.timeoutMs, MILLISECONDS);
val responses = new util.ArrayList[CreatePartitionsTopicResult]()
val duplicateTopicNames = new util.HashSet[String]()
val topicNames = new util.HashSet[String]()
request.topics().forEach {
topic =>
if (!topicNames.add(topic.name())) {
duplicateTopicNames.add(topic.name())
}
}
duplicateTopicNames.forEach { topicName =>
responses.add(new CreatePartitionsTopicResult().
setName(topicName).
setErrorCode(INVALID_REQUEST.code).
setErrorMessage("Duplicate topic name."))
topicNames.remove(topicName)
}
val authorizedTopicNames = {
if (hasClusterAuth) {
topicNames.asScala
} else {
getCreatableTopics(topicNames.asScala)
}
}
val topics = new util.ArrayList[CreatePartitionsTopic]
topicNames.forEach { topicName =>
if (authorizedTopicNames.contains(topicName)) {
topics.add(request.topics().find(topicName))
} else {
responses.add(new CreatePartitionsTopicResult().
setName(topicName).
setErrorCode(TOPIC_AUTHORIZATION_FAILED.code))
}
}
controller.createPartitions(deadlineNs, topics).thenApply { results =>
results.forEach(response => responses.add(response))
responses
}
}
def handleAlterPartitionReassignments(request: RequestChannel.Request): Unit = {
val alterRequest = request.body[AlterPartitionReassignmentsRequest]
authHelper.authorizeClusterOperation(request, ALTER)
val response = controller.alterPartitionReassignments(alterRequest.data()).get()
requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs =>
new AlterPartitionReassignmentsResponse(response.setThrottleTimeMs(requestThrottleMs)))
}
def handleListPartitionReassignments(request: RequestChannel.Request): Unit = {
val listRequest = request.body[ListPartitionReassignmentsRequest]
authHelper.authorizeClusterOperation(request, DESCRIBE)
val response = controller.listPartitionReassignments(listRequest.data()).get()
requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs =>
new ListPartitionReassignmentsResponse(response.setThrottleTimeMs(requestThrottleMs)))
}
def handleAllocateProducerIdsRequest(request: RequestChannel.Request): Unit = {
val allocatedProducerIdsRequest = request.body[AllocateProducerIdsRequest]
authHelper.authorizeClusterOperation(request, CLUSTER_ACTION)
controller.allocateProducerIds(allocatedProducerIdsRequest.data)
.whenComplete((results, exception) => {
if (exception != null) {
requestHelper.handleError(request, exception)
} else {
requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs => {
results.setThrottleTimeMs(requestThrottleMs)
new AllocateProducerIdsResponse(results)
})
}
})
}
}
|
guozhangwang/kafka
|
core/src/main/scala/kafka/server/ControllerApis.scala
|
Scala
|
apache-2.0
| 36,262
|
package teststate.util
import teststate.util.StdlibUtil._
trait StdlibUtil {
implicit def toStateTestEitherStringExt[A](e: Either[String, A]): StateTestEitherStringExt[A] =
new StateTestEitherStringExt(e)
implicit def TestStateOptionExt[A](a: Option[A]): TestStateOptionExt[A] =
new TestStateOptionExt(a)
type NamedOption[+A] = StdlibUtil.NamedOption[A]
implicit def toTestStateTraversableExt[A](as: Iterable[A]): TestStateTraversableExt[A] =
new TestStateTraversableExt(as)
type NamedVector[+A] = StdlibUtil.NamedVector[A]
implicit def TestStateMapExt[K, V](a: Map[K, V]): TestStateMapExt[K, V] =
new TestStateMapExt(a)
type NamedMap[K, +V] = StdlibUtil.NamedMap[K, V]
}
object StdlibUtil {
private implicit def toStateTestEitherStringExt[A](e: Either[String, A]): StateTestEitherStringExt[A] =
new StateTestEitherStringExt(e)
final class StateTestEitherStringExt[A](private val self: Either[String, A]) extends AnyVal {
def getOrThrow(): A =
self match {
case Right(a) => a
case Left(e) => sys.error(e)
}
}
// ===================================================================================================================
final class NamedOption[+A](val name: String, val underlying: Option[A]) {
private def errMsg = s"$name not available"
def filter(desc: String, f: A => Boolean): NamedOption[A] =
new NamedOption(s"$name($desc)", underlying filter f)
def get: A =
underlying match {
case Some(a) => a
case None => sys.error(errMsg + ". None.get")
}
def attempt: Either[String, A] =
underlying match {
case Some(a) => Right(a)
case None => Left(errMsg)
}
def map[B](f: A => B): NamedOption[B] =
new NamedOption[B](name, underlying map f)
def dist[F[_], B](implicit ev: NamedOption[A] <:< NamedOption[F[B]], F: Distributive[F]): F[NamedOption[B]] =
F.cosequence(this)
def distGet[F[_], B](implicit ev: NamedOption[A] <:< NamedOption[F[B]], F: Distributive[F]): F[B] =
F.cotraverse(ev(this))(_.get)
}
object NamedOption {
implicit def toOption[A](n: NamedOption[A]): Option[A] = n.underlying
implicit lazy val functor: Functor[NamedOption] =
new Functor[NamedOption] {
override def map[A, B](fa: NamedOption[A])(f: A => B) = fa map f
}
}
final class TestStateOptionExt[A](private val self: Option[A]) extends AnyVal {
def named(name: String): NamedOption[A] =
new NamedOption(name, self)
}
// ===================================================================================================================
final class NamedVector[+A](namePlural: String, val underlying: Vector[A]) {
def filter(desc: String, f: A => Boolean): NamedVector[A] =
new NamedVector(s"$namePlural($desc)", underlying filter f)
/** Expect exactly one element */
def attemptOne: Either[String, A] =
underlying.length match {
case 1 => Right(underlying.head)
case n => Left(s"$n $namePlural found. Expect exactly 1.")
}
def attemptAtIndex(i: Int): Either[String, A] =
if (underlying.indices.contains(i))
Right(underlying(i))
else
Left(s"$namePlural[$i] not found; ${underlying.length} available.")
def map[B](f: A => B): NamedVector[B] =
new NamedVector[B](namePlural, underlying map f)
/** Expect exactly one element */
def getOne: A =
attemptOne.getOrThrow()
def getAtIndex(i: Int): A =
attemptAtIndex(i).getOrThrow()
def dist[F[_], B](implicit ev: NamedVector[A] <:< NamedVector[F[B]], F: Distributive[F]): F[NamedVector[B]] =
F.cosequence(this)
def distGetOne[F[_], B](implicit ev: NamedVector[A] <:< NamedVector[F[B]], F: Distributive[F]): F[B] =
F.cotraverse(ev(this))(_.getOne)
}
object NamedVector {
implicit def toVector[A](n: NamedVector[A]): Vector[A] = n.underlying
implicit lazy val functor: Functor[NamedVector] =
new Functor[NamedVector] {
override def map[A, B](fa: NamedVector[A])(f: A => B) = fa map f
}
}
final class TestStateTraversableExt[A](private val self: Iterable[A]) extends AnyVal {
def named(namePlural: String): NamedVector[A] =
new NamedVector(namePlural, self.toVector)
}
// ===================================================================================================================
final class NamedMap[K, +V](namePlural: String, val underlying: Map[K, V]) {
def toVector: NamedVector[(K, V)] =
new NamedVector(namePlural, underlying.toVector)
def filter(desc: String, f: ((K, V)) => Boolean): NamedMap[K, V] =
new NamedMap(s"$namePlural($desc)", underlying filter f)
def get(k: K): NamedOption[V] =
new NamedOption(namePlural, underlying.get(k))
def apply(k: K): V =
underlying.get(k) match {
case Some(v) => v
case None => sys.error(s"$namePlural doesn't contain $k; it contains ${underlying.keys.mkString("[", ", ", "]")}")
}
}
implicit def NamedMapToMap[K, V](n: NamedMap[K, V]): Map[K, V] = n.underlying
final class TestStateMapExt[K, V](private val self: Map[K, V]) extends AnyVal {
def named(namePlural: String): NamedMap[K, V] =
new NamedMap(namePlural, self.toMap)
}
}
|
japgolly/test-state
|
util/shared/src/main/scala/teststate/util/StdlibUtil.scala
|
Scala
|
apache-2.0
| 5,348
|
package org.vaadin.addons.rinne
import java.util.Date
import com.vaadin.event.dd.DropHandler
import com.vaadin.ui.Calendar
import com.vaadin.ui.components.calendar.event.CalendarEventProvider
import org.vaadin.addons.rinne.mixins.AbstractComponentMixin
class VCalendar extends Calendar with AbstractComponentMixin {
def eventProvider: Option[CalendarEventProvider] = Option(getEventProvider)
def eventProvider_=(value: CalendarEventProvider): Unit = setEventProvider(value)
def dayNamesShort: Array[String] = getDayNamesShort
def dropHandler: Option[DropHandler] = Option(getDropHandler)
def dropHandler_=(value: DropHandler): Unit = setDropHandler(value)
def dropHandler_=(value: Option[DropHandler]): Unit = setDropHandler(value.orNull)
def calendarEndDate: Date = getEndDate
def calendarEndDate_=(value: Date): Unit = setEndDate(value)
def firstVisibleDayOfWeek: Int = getFirstVisibleDayOfWeek
def firstVisibleDayOfWeek_=(value: Int): Unit = setFirstVisibleDayOfWeek(value)
def firstVisibleHourOfDay: Int = getFirstVisibleHourOfDay
def firstVisibleHourOfDay_=(value: Int): Unit = setFirstVisibleHourOfDay(value)
def internalCalendar: java.util.Calendar = getInternalCalendar
def lastVisibleDayOfWeek: Int = getLastVisibleDayOfWeek
def lastVisibleHourOfDay: Int = getLastVisibleHourOfDay
def monthNamesShort: Array[String] = getMonthNamesShort
def calendarStartDate: Date = getStartDate
def calendarStartDate_=(value: Date): Unit = setStartDate(value)
def timeFormat: Calendar.TimeFormat = getTimeFormat
def timeFormat_=(value: Calendar.TimeFormat): Unit = setTimeFormat(value)
def timeZone: java.util.TimeZone = getTimeZone
def timeZone_=(value: java.util.TimeZone) = setTimeZone(value)
def weeklyCaptionFormat: String = getWeeklyCaptionFormat
def weeklyCaptionFormat_=(value: String): Unit = setWeeklyCaptionFormat(value)
}
|
LukaszByczynski/rinne
|
src/main/scala/org/vaadin/addons/rinne/VCalendar.scala
|
Scala
|
apache-2.0
| 1,905
|
import sbt._
import Process._
import com.twitter.sbt._
class CachetProject(info: ProjectInfo) extends StandardProject(info) {
val specs = "org.scala-tools.testing" % "specs" % "1.6.2.1" % "test"
val configgy = "net.lag" % "configgy" % "1.5"
val jetty = "org.mortbay.jetty" % "jetty" % "6.1.24"
val jetty_util = "org.mortbay.jetty" % "jetty-util" % "6.1.24"
val jetty_client = "org.mortbay.jetty" % "jetty-client" % "6.1.24"
val jetty_servlet_tester = "org.mortbay.jetty" % "jetty-servlet-tester" % "6.1.24"
val jetty_sslengine = "org.mortbay.jetty" % "jetty-sslengine" % "6.1.24"
val servlet_api = "javax.servlet" % "servlet-api" % "2.5"
val ehcache = "net.sf.ehcache" % "ehcache" % "1.5.0"
val asm = "asm" % "asm" % "1.5.3"
val cglib = "cglib" % "cglib" % "2.1_3"
val hamcrest = "org.hamcrest" % "hamcrest-all" % "1.1"
val jmock = "org.jmock" % "jmock" % "2.4.0"
val objenesis = "org.objenesis" % "objenesis" % "1.1"
val ostrich = "com.twitter" % "ostrich" % "1.1.16"
}
|
nkallen/cachet
|
project/build/Cachet.scala
|
Scala
|
mit
| 1,019
|
package model
import org.bson.types.ObjectId
/**
* The Class Tags.
*
* @author Nguyen Duc Dung
* @since 2/7/14 5:28 PM
*
*/
case class Tag(
_id: ObjectId = new ObjectId(),
name: String,
count: Long = 0,
read: Long = 0
)
|
SunriseSoftVN/hayhayblog
|
core/app/model/Tag.scala
|
Scala
|
gpl-2.0
| 308
|
/* The random case is simple - we generate a double and use this to choose between
* the two random samplers. The exhaustive case is trickier if we want to try
* to produce a stream that does a weighted interleave of the two exhaustive streams.
*/
def weighted[A](g1: (Gen[A],Double), g2: (Gen[A],Double)): Gen[A] = {
/* The probability we should pull from `g1`. */
val g1Threshold = g1._2.abs / (g1._2.abs + g2._2.abs)
/* Some random booleans to use for selecting between g1 and g2 in the exhaustive case.
* Making up a seed locally is fine here, since we just want a deterministic schedule
* with the right distribution. */
def bools: Stream[Boolean] =
randomStream(uniform.map(_ < g1Threshold))(RNG.simple(302837L))
Gen(State(RNG.double).flatMap(d => if (d < g1Threshold) g1._1.sample else g2._1.sample),
interleave(bools, g1._1.exhaustive, g2._1.exhaustive))
}
/* Produce an infinite random stream from a `Gen` and a starting `RNG`. */
def randomStream[A](g: Gen[A])(rng: RNG): Stream[A] =
Stream.unfold(rng)(rng => Some(g.sample.run(rng)))
/* Interleave the two streams, using `b` to control which stream to pull from at each step.
* A value of `true` attempts to pull from `s1`; `false` attempts to pull from `s1`.
* When either stream is exhausted, insert all remaining elements from the other stream.
*/
def interleave[A](b: Stream[Boolean], s1: Stream[A], s2: Stream[A]): Stream[A] = new Stream[A] {
def uncons = b.uncons flatMap { case (bh,bt) =>
if (bh) s1.uncons map { case (s1h,s1t) => (s1h, interleave(bt,s1t,s2)) } orElse s2.uncons
else s2.uncons map { case (s2h,s2t) => (s2h, interleave(bt,s1,s2t)) } orElse s1.uncons
}
}
|
ShokuninSan/fpinscala
|
answerkey/testing/11.answer.scala
|
Scala
|
mit
| 1,696
|
package org.jetbrains.plugins.scala.testingSupport.scalatest.scala2_10.scalatest2_1_7
import org.jetbrains.plugins.scala.SlowTests
import org.jetbrains.plugins.scala.testingSupport.scalatest.staticStringTest._
import org.junit.experimental.categories.Category
/**
* @author Roman.Shein
* @since 24.06.2015.
*/
@Category(Array(classOf[SlowTests]))
class Scalatest2_10_2_1_7_StaticStringTest extends Scalatest2_10_2_1_7_Base with FeatureSpecStaticStringTest with
FlatSpecStaticStringTest with FreeSpecStaticStringTest with FunSpecStaticStringTest with FunSuiteStaticStringTest with
PropSpecStaticStringTest with WordSpecStaticStringTest with MethodsStaticStringTest
|
triplequote/intellij-scala
|
scala/scala-impl/test/org/jetbrains/plugins/scala/testingSupport/scalatest/scala2_10/scalatest2_1_7/Scalatest2_10_2_1_7_StaticStringTest.scala
|
Scala
|
apache-2.0
| 673
|
package org.jetbrains.jps.incremental.scala
package remote
import java.io.{File, PrintStream}
import java.util.{Timer, TimerTask}
import com.intellij.util.Base64Converter
import com.martiansoftware.nailgun.NGContext
import org.jetbrains.jps.incremental.messages.BuildMessage.Kind
import org.jetbrains.jps.incremental.scala.local.{LocalServer, WorksheetInProcessRunnerFactory}
/**
* @author Pavel Fatin
* @author Dmitry Naydanov
*/
object Main {
private val Server = new LocalServer()
private val worksheetFactory = new WorksheetInProcessRunnerFactory
private var shutdownTimer: Timer = null
def nailMain(context: NGContext) {
cancelShutdown()
make(context.getArgs.toSeq, context.out, false)
resetShutdownTimer(context)
}
def main(args: Array[String]) {
make(args, System.out, true)
}
private def make(arguments: Seq[String], out: PrintStream, standalone: Boolean) {
var hasErrors = false
val client = {
val eventHandler = (event: Event) => {
val encode = Base64Converter.encode(event.toBytes)
out.write((if (standalone && !encode.endsWith("=")) encode + "=" else encode).getBytes)
}
new EventGeneratingClient(eventHandler, out.checkError) {
override def error(text: String, source: Option[File], line: Option[Long], column: Option[Long]) {
hasErrors = true
super.error(text, source, line, column)
}
override def message(kind: Kind, text: String, source: Option[File], line: Option[Long], column: Option[Long]) {
if (kind == Kind.ERROR) hasErrors = true
super.message(kind, text, source, line, column)
}
}
}
val oldOut = System.out
// Suppress any stdout data, interpret such data as error
System.setOut(System.err)
try {
val args = {
val strings = arguments.map {
arg =>
val s = new String(Base64Converter.decode(arg.getBytes), "UTF-8")
if (s == "#STUB#") "" else s
}
Arguments.from(strings)
}
Server.compile(args.sbtData, args.compilerData, args.compilationData, client)
if (!hasErrors) worksheetFactory.getRunner(out, standalone).loadAndRun(args, client)
} catch {
case e: Throwable =>
client.trace(e)
} finally {
System.setOut(oldOut)
}
}
private def cancelShutdown() = {
if (shutdownTimer != null) shutdownTimer.cancel()
}
private def resetShutdownTimer(context: NGContext) {
val delay = Option(System.getProperty("shutdown.delay")).map(_.toInt)
delay.foreach { t =>
val delayMs = t * 60 * 1000
val shutdownTask = new TimerTask {
override def run(): Unit = context.getNGServer.shutdown(true)
}
shutdownTimer = new Timer()
shutdownTimer.schedule(shutdownTask, delayMs)
}
}
}
|
JetBrains/intellij-scala-historical
|
jps-plugin/src/org/jetbrains/jps/incremental/scala/remote/Main.scala
|
Scala
|
apache-2.0
| 2,871
|
/**
* Copyright (C) 2007 Orbeon, Inc.
*
* This program is free software; you can redistribute it and/or modify it under the terms of the
* GNU Lesser General Public License as published by the Free Software Foundation; either version
* 2.1 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Lesser General Public License for more details.
*
* The full text of the license is available at http://www.gnu.org/copyleft/lesser.html
*/
package org.orbeon.oxf.xforms.analysis.controls
import org.orbeon.oxf.xforms.analysis.ElementAnalysis._
import org.orbeon.oxf.xforms.XFormsConstants
import org.orbeon.oxf.xforms.XFormsConstants._
trait SelectAppearanceTrait extends AppearanceTrait {
val isMultiple = localName == "select"
// Normalize appearances
override val appearances = {
// NOTE: Ignore no longer supported xxf:autocomplete (which would require selection="open" anyway)
// Ideally we would like to do this but we can't, see:
// https://issues.scala-lang.org/browse/SI-1938?focusedCommentId=55131#comment-55131
// val initialAppearances = super.appearances
val initialAppearances = attQNameSet(element, XFormsConstants.APPEARANCE_QNAME, namespaceMapping) - XXFORMS_AUTOCOMPLETE_APPEARANCE_QNAME
val size = initialAppearances.size
initialAppearances match {
case _ if isMultiple && initialAppearances(XFORMS_MINIMAL_APPEARANCE_QNAME) ⇒
// Select with minimal appearance is handled as a compact appearance
initialAppearances - XFORMS_MINIMAL_APPEARANCE_QNAME + XFORMS_COMPACT_APPEARANCE_QNAME
case _ if size > 0 ⇒
initialAppearances
case _ if isMultiple ⇒
Set(XFORMS_COMPACT_APPEARANCE_QNAME) // default for xf:select
case _ ⇒
Set(XFORMS_MINIMAL_APPEARANCE_QNAME) // default for xf:select1
}
}
val isFull = appearances(XFORMS_FULL_APPEARANCE_QNAME)
val isCompact = appearances(XFORMS_COMPACT_APPEARANCE_QNAME)
val isTree = appearances(XXFORMS_TREE_APPEARANCE_QNAME)
val isMenu = appearances(XXFORMS_MENU_APPEARANCE_QNAME)
}
|
martinluther/orbeon-forms
|
src/main/scala/org/orbeon/oxf/xforms/analysis/controls/SelectAppearanceTrait.scala
|
Scala
|
lgpl-2.1
| 2,394
|
package uk.co.pollett.flink.newsreader.rss
import org.apache.flink.streaming.util.serialization.SerializationSchema
class Serializer extends SerializationSchema[(String, Entry, Int)] {
override def serialize(element: (String, Entry, Int)): Array[Byte] = {
(element._1 + " - " + element._3 + "\\n").getBytes
}
}
|
pollett/flink-newsreader
|
src/main/scala/uk/co/pollett/flink/newsreader/rss/Serializer.scala
|
Scala
|
mit
| 320
|
package sims.collision.narrowphase
package gjk
import sims.collision._
import sims.math._
import scala.collection.mutable.ListBuffer
class GJK[A <: Collidable: ClassManifest] extends narrowphase.NarrowPhaseDetector[A] {
def penetration(pair: (A, A)): Option[Penetration] = {
val ms = new MinkowskiSum(pair)
val s = ms.support(Vector2D.i)
val simplex = new ListBuffer[Vector2D]
simplex prepend s
var direction = -s
while (true) {
val a = ms.support(direction)
if ((a dot direction) < 0) return None
simplex prepend a
val newDirection = checkSimplex(simplex, direction)
if (newDirection == null) return Some(EPA.penetration(simplex, ms))
else direction = newDirection
}
throw new IllegalArgumentException("Something went wrong, should not reach here.")
}
/** Checks whether the given simplex contains the origin. If it does, `null` is returned.
* Otherwise a new search direction is returned and the simplex is updated. */
private def checkSimplex(simplex: ListBuffer[Vector2D], direction: Vector2D): Vector2D = {
if (simplex.length == 2) { //simplex == 2
val a = simplex(0)
val b = simplex(1)
val ab = b - a
val ao = -a
if (ao directionOf ab) {
ab cross ao cross ab
} else {
simplex.remove(1)
ao
}
} // end simplex == 2
else if (simplex.length == 3) { //simplex == 3
val a = simplex(0)
val b = simplex(1)
val c = simplex(2)
val ab = b - a
val ac = c - a
val ao = -a
val winding = ab cross ac
if (ao directionOf (ab cross winding)) {
if (ao directionOf ab) {
simplex.remove(2)
ab cross ao cross ab
} else if (ao directionOf ac) {
simplex.remove(1)
ac cross ao cross ac
} else {
simplex.remove(2)
simplex.remove(1)
ao
}
} else {
if (ao directionOf (winding cross ac)) {
if (ao directionOf ac) {
simplex.remove(1)
ac cross ao cross ac
} else {
simplex.remove(2)
simplex.remove(1)
ao
}
} else {
null
}
}
} //end simplex == 3
else throw new IllegalArgumentException("Invalid simplex size.")
}
def collision(pair: (A, A)): Option[Collision[A]] = {
val p = penetration(pair)
if (p.isEmpty) return None
val manif = CS.getCollisionPoints(pair, p.get.normal)
Some(new Collision[A] {
val item1 = pair._1
val item2 = pair._2
val normal = manif.normal
val overlap = p.get.overlap
val points = manif.points
})
}
}
object GJK {
}
|
jodersky/sims2
|
src/main/scala/sims/collision/narrowphase/gjk/GJK.scala
|
Scala
|
bsd-3-clause
| 2,536
|
class ReturnNoOutput {
def foo(i: Int): Int = {
/*start*/
if (true) return i
println(i)
/*end*/
println()
42
}
}
/*
class ReturnNoOutput {
def foo(i: Int): Int = {
testMethodName(i) match {
case Some(toReturn) => return toReturn
case None =>
}
println()
42
}
def testMethodName(i: Int): Option[Int] = {
if (true) return Some(i)
println(i)
None
}
}
*/
|
ilinum/intellij-scala
|
testdata/extractMethod/output/ReturnNoOutput.scala
|
Scala
|
apache-2.0
| 421
|
package org.drooms.gui.swing
import org.scalatest.FunSuite
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
import java.awt.Color
@RunWith(classOf[JUnitRunner])
class PlayersListSuite extends FunSuite {
trait SamplePlayers {
var playersList = new PlayersList()
playersList = playersList.addPlayer("Luke")
playersList = playersList.addPlayer("Obi-Wan")
playersList = playersList.addPlayer("Vader")
val origSize = playersList.players.size
}
test("newly created PlayersList with no players is empty") {
val playersList = new PlayersList()
assert(playersList.players.size === 0)
}
test("correct player is returned for given name") {
new SamplePlayers {
val luke = playersList.getPlayer("Luke")
assert(luke.name === "Luke")
}
}
test("player have correctly assigned color") {
new SamplePlayers {
playersList = playersList.addPlayer("Lea", Color.PINK)
val lea = playersList.getPlayer("Lea")
assert(lea.name === "Lea")
assert(lea.color === Color.PINK)
assert(lea.score === 0)
}
}
test("new player is correctly added") {
new SamplePlayers {
playersList = playersList.addPlayer("Lea")
val lea = playersList.getPlayer("Lea")
assert(playersList.players.size === origSize + 1)
assert(lea.name === "Lea")
assert(lea.score === 0)
}
}
test("multiple new players are added") {
new SamplePlayers {
playersList = playersList.addPlayers(List("Lea", "Han", "Yoda"))
assert(playersList.players.size === origSize + 3)
assert(playersList.getPlayer("Lea") !== null)
assert(playersList.getPlayer("Han") !== null)
assert(playersList.getPlayer("Yoda") !== null)
}
}
test("players scores are correctly updated") {
new SamplePlayers {
playersList = playersList.updateScores(Map("Luke" -> 10, "Obi-Wan" -> 15, "Vader" -> 20))
assert(playersList.players.size === 3)
assert(playersList.getPlayer("Luke").score === 10)
assert(playersList.getPlayer("Obi-Wan").score === 15)
assert(playersList.getPlayer("Vader").score === 20)
}
}
test("points are correctly added to the player") {
new SamplePlayers {
assert(playersList.getPlayer("Luke").score === 0)
playersList = playersList.addPoints("Luke", 15)
assert(playersList.getPlayer("Luke").score === 15)
assert(playersList.addPoints("Luke", 11).getPlayer("Luke").score === 26)
}
}
}
|
triceo/drooms
|
drooms-swing-gui/src/test/scala/org/drooms/gui/swing/PlayersListSuite.scala
|
Scala
|
apache-2.0
| 2,505
|
/*
* Copyright 2017-2022 John Snow Labs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.johnsnowlabs.ml.crf
import com.johnsnowlabs.ml.crf.VectorMath._
import com.johnsnowlabs.tags.FastTest
import org.scalatest.flatspec.AnyFlatSpec
class ForwardBackwardSpec extends AnyFlatSpec {
val dataset = TestDatasets.small
val instance = dataset.instances.head._2
val metadata = dataset.metadata
val labels = metadata.label2Id.size
val features = metadata.attrFeatures.size + metadata.transitions.size
val weights = Vector(features, 0.1f)
val fb = new FbCalculator(2, metadata)
val bruteForce = new BruteForceCalculator(metadata, fb)
fb.calculate(instance, weights, 1f)
"EdgeCalculator" should "fill matrix correctly for first word" taggedAs FastTest in {
val firstWordFeatures = dataset.instances(0)._2.items(0).values
val logEdge = Matrix(3, 3)
EdgeCalculator.fillLogEdges(firstWordFeatures, weights, 1f, metadata, logEdge)
assert(logEdge(0).toSeq == Seq(0f, 0.5f, 0.3f))
assert(logEdge(1).toSeq == Seq(0f, 0.4f, 0.4f))
assert(logEdge(2).toSeq == Seq(0f, 0.4f, 0.3f))
}
"EdgeCalculate" should "fill matrix correctly for second word" taggedAs FastTest in {
val secondWordFeatures = dataset.instances(0)._2.items(1).values
val logEdge = Matrix(3, 3)
EdgeCalculator.fillLogEdges(secondWordFeatures, weights, 1f, metadata, logEdge)
FloatAssert.seqEquals(logEdge(0), Seq(0f, 0.6f, 0.6f))
FloatAssert.seqEquals(logEdge(1), Seq(0f, 0.5f, 0.7f))
FloatAssert.seqEquals(logEdge(2), Seq(0f, 0.5f, 0.6f))
}
"EdgeCalculator" should "fill matrix correctly according to scale param" taggedAs FastTest in {
val secondWordFeatures = dataset.instances(0)._2.items(1).values
val logEdge = Matrix(3, 3)
val weights = Vector(features, 1f)
EdgeCalculator.fillLogEdges(secondWordFeatures, weights, 0.1f, metadata, logEdge)
FloatAssert.seqEquals(logEdge(0), Seq(0f, 0.6f, 0.6f))
FloatAssert.seqEquals(logEdge(1), Seq(0f, 0.5f, 0.7f))
FloatAssert.seqEquals(logEdge(2), Seq(0f, 0.5f, 0.6f))
}
"FbCalculator" should "calculate c correct" taggedAs FastTest in {
// Calculate Z(x) as expected
val zTest = fb.c.reduce(_*_)
// Calculate Z(x) by brute force
val z = fb.phi
.reduce((a, b) => mult(a, b))(0)
.sum
assert(zTest == z)
// Calculate Z(x) from alpha
val alphaPaths = fb.alpha(instance.items.length - 1).sum
assert(alphaPaths == 1f)
// Calculate Z(x) from beta
val betaPaths = fb.beta(0).zip(fb.phi(0)(0)).map{case(a,b) => a*b}.sum
assert(betaPaths == 1f)
}
"FbCalculator" should "calculated alpha and beta should satisfy invariants" taggedAs FastTest in {
for (i <- 0 until instance.items.size) {
val fbZ = Range(0, labels).map(label => fb.alpha(i)(label) * fb.beta(i)(label)*fb.c(i)).sum
assert(fbZ == 1f)
}
}
"FbCalculator" should "calculate phi and logPhi correctly" taggedAs FastTest in {
for (i <- 0 until instance.items.size) {
for (from <- 0 until labels) {
for (to <- 0 until labels) {
assert(fb.phi(i)(from)(to) == Math.exp(fb.logPhi(i)(from)(to)).toFloat)
}
}
}
}
"FbCalculator" should "correctly estimates paths goes through label at time" taggedAs FastTest in {
for (i <- 0 until instance.items.length) {
for (label <- 0 until labels) {
val fBProbe = fb.alpha(i)(label) * fb.beta(i)(label) * fb.c(i)
val probe = bruteForce.getProbe(instance, i, label)
FloatAssert.equals(fBProbe, probe)
}
}
}
}
|
JohnSnowLabs/spark-nlp
|
src/test/scala/com/johnsnowlabs/ml/crf/ForwardBackwardSpec.scala
|
Scala
|
apache-2.0
| 4,115
|
import org.scalacheck._
object BasicTest extends Properties("A basic runnable test")
{
specify("startsWith", (a: String, b: String) => (a+b).startsWith(a))
}
abstract class AbstractNotATest extends Properties("Not a runnable test")
{
specify("Fail", (a: Int, b: Int) => false)
}
class ClassNotATest extends Properties("Not a runnable test")
{
specify("Fail", (a: Int, b: Int) => false)
}
trait TraitNotATest
{ self: Properties =>
specify("Fail", (a: Int, b: Int) => false)
}
|
matheshar/simple-build-tool
|
src/sbt-test/tests/scalacheck-a/changes/BasicTest.scala
|
Scala
|
bsd-3-clause
| 482
|
package com.eevolution.context.dictionary.infrastructure.service
import java.util.UUID
import akka.NotUsed
import com.eevolution.context.dictionary.domain._
import com.eevolution.context.dictionary.domain.model.BrowseAccess
import com.eevolution.utils.PaginatedSequence
import com.lightbend.lagom.scaladsl.api.{Service, ServiceCall}
/**
* Copyright (C) 2003-2017, e-Evolution Consultants S.A. , http://www.e-evolution.com
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
* Email: emeris.hernandez@e-evolution.com, http://www.e-evolution.com , http://github.com/EmerisScala
* Created by emeris.hernandez@e-evolution.com , www.e-evolution.com on 15/11/17.
*/
/**
* Browse Access Service
*/
trait BrowseAccessService extends Service with api.service.BrowseAccessService {
override def getAll() : ServiceCall[NotUsed, List[BrowseAccess]]
override def getById(id: Int): ServiceCall[NotUsed, BrowseAccess]
override def getByUUID(uuid :UUID): ServiceCall[NotUsed, BrowseAccess]
override def getAllByPage(pageNo: Option[Int], pageSize: Option[Int]): ServiceCall[NotUsed, PaginatedSequence[BrowseAccess]]
def descriptor = {
import Service._
named("browseAccess").withCalls(
pathCall("/api/v1_0_0/browseAccess/all", getAll _) ,
pathCall("/api/v1_0_0/browseAccess/:id", getById _),
pathCall("/api/v1_0_0/browseAccess/:uuid", getByUUID _) ,
pathCall("/api/v1_0_0/browseAccess?pageNo&pageSize", getAllByPage _)
)
}
}
|
adempiere/ADReactiveSystem
|
dictionary-impl/src/main/scala/com/eevolution/context/dictionary/infrastructure/service/BrowseAccessService.scala
|
Scala
|
gpl-3.0
| 2,073
|
package hubcat
import dispatch._
sealed trait Credentials {
def sign(req: Req): Req
}
case class OAuth2(access: String) extends Credentials {
def sign(req: Req) =
req <:< Map("Authorization" -> "token %s".format(access))
}
case class BasicAuth(user: String, pass: String) extends Credentials {
def sign(req: Req) =
req.as_!(user, pass)
}
|
softprops/hubcat
|
src/main/scala/credentials.scala
|
Scala
|
mit
| 356
|
package ssh
import org.apache.sshd.server.{CommandFactory, Environment, ExitCallback, Command}
import org.slf4j.LoggerFactory
import java.io.{InputStream, OutputStream}
import util.ControlUtil._
import org.eclipse.jgit.api.Git
import util.Directory._
import org.eclipse.jgit.transport.{ReceivePack, UploadPack}
import org.apache.sshd.server.command.UnknownCommand
import servlet.{Database, CommitLogHook}
import service.{AccountService, RepositoryService, SystemSettingsService}
import org.eclipse.jgit.errors.RepositoryNotFoundException
import javax.servlet.ServletContext
import model.profile.simple.Session
object GitCommand {
val CommandRegex = """\\Agit-(upload|receive)-pack '/([a-zA-Z0-9\\-_.]+)/([a-zA-Z0-9\\-_.]+).git'\\Z""".r
}
abstract class GitCommand(val context: ServletContext, val owner: String, val repoName: String) extends Command {
self: RepositoryService with AccountService =>
private val logger = LoggerFactory.getLogger(classOf[GitCommand])
protected var err: OutputStream = null
protected var in: InputStream = null
protected var out: OutputStream = null
protected var callback: ExitCallback = null
protected def runTask(user: String)(implicit session: Session): Unit
private def newTask(user: String): Runnable = new Runnable {
override def run(): Unit = {
Database(context) withTransaction { implicit session =>
try {
runTask(user)
callback.onExit(0)
} catch {
case e: RepositoryNotFoundException =>
logger.info(e.getMessage)
callback.onExit(1, "Repository Not Found")
case e: Throwable =>
logger.error(e.getMessage, e)
callback.onExit(1)
}
}
}
}
override def start(env: Environment): Unit = {
val user = env.getEnv.get("USER")
val thread = new Thread(newTask(user))
thread.start()
}
override def destroy(): Unit = {}
override def setExitCallback(callback: ExitCallback): Unit = {
this.callback = callback
}
override def setErrorStream(err: OutputStream): Unit = {
this.err = err
}
override def setOutputStream(out: OutputStream): Unit = {
this.out = out
}
override def setInputStream(in: InputStream): Unit = {
this.in = in
}
protected def isWritableUser(username: String, repositoryInfo: RepositoryService.RepositoryInfo)
(implicit session: Session): Boolean =
getAccountByUserName(username) match {
case Some(account) => hasWritePermission(repositoryInfo.owner, repositoryInfo.name, Some(account))
case None => false
}
}
class GitUploadPack(context: ServletContext, owner: String, repoName: String, baseUrl: String) extends GitCommand(context, owner, repoName)
with RepositoryService with AccountService {
override protected def runTask(user: String)(implicit session: Session): Unit = {
getRepository(owner, repoName.replaceFirst("\\\\.wiki\\\\Z", ""), baseUrl).foreach { repositoryInfo =>
if(!repositoryInfo.repository.isPrivate || isWritableUser(user, repositoryInfo)){
using(Git.open(getRepositoryDir(owner, repoName))) { git =>
val repository = git.getRepository
val upload = new UploadPack(repository)
upload.upload(in, out, err)
}
}
}
}
}
class GitReceivePack(context: ServletContext, owner: String, repoName: String, baseUrl: String) extends GitCommand(context, owner, repoName)
with SystemSettingsService with RepositoryService with AccountService {
override protected def runTask(user: String)(implicit session: Session): Unit = {
getRepository(owner, repoName.replaceFirst("\\\\.wiki\\\\Z", ""), baseUrl).foreach { repositoryInfo =>
if(isWritableUser(user, repositoryInfo)){
using(Git.open(getRepositoryDir(owner, repoName))) { git =>
val repository = git.getRepository
val receive = new ReceivePack(repository)
if(!repoName.endsWith(".wiki")){
val hook = new CommitLogHook(owner, repoName, user, baseUrl)
receive.setPreReceiveHook(hook)
receive.setPostReceiveHook(hook)
}
receive.receive(in, out, err)
}
}
}
}
}
class GitCommandFactory(context: ServletContext, baseUrl: String) extends CommandFactory {
private val logger = LoggerFactory.getLogger(classOf[GitCommandFactory])
override def createCommand(command: String): Command = {
logger.debug(s"command: $command")
command match {
case GitCommand.CommandRegex("upload", owner, repoName) => new GitUploadPack(context, owner, repoName, baseUrl)
case GitCommand.CommandRegex("receive", owner, repoName) => new GitReceivePack(context, owner, repoName, baseUrl)
case _ => new UnknownCommand(command)
}
}
}
|
campolake/gitbucketV2.1
|
src/main/scala/ssh/GitCommand.scala
|
Scala
|
apache-2.0
| 4,799
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.json
import java.io._
import java.nio.charset.{Charset, StandardCharsets, UnsupportedCharsetException}
import java.nio.file.Files
import java.sql.{Date, Timestamp}
import java.time.{LocalDate, ZoneId}
import java.util.Locale
import com.fasterxml.jackson.core.JsonFactory
import org.apache.hadoop.fs.{Path, PathFilter}
import org.apache.hadoop.io.SequenceFile.CompressionType
import org.apache.hadoop.io.compress.GzipCodec
import org.apache.spark.{SparkConf, SparkException, TestUtils}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{functions => F, _}
import org.apache.spark.sql.catalyst.json._
import org.apache.spark.sql.catalyst.util.DateTimeUtils
import org.apache.spark.sql.execution.ExternalRDD
import org.apache.spark.sql.execution.datasources.DataSource
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.test.SharedSparkSession
import org.apache.spark.sql.types._
import org.apache.spark.sql.types.StructType.fromDDL
import org.apache.spark.sql.types.TestUDT.{MyDenseVector, MyDenseVectorUDT}
import org.apache.spark.util.Utils
class TestFileFilter extends PathFilter {
override def accept(path: Path): Boolean = path.getParent.getName != "p=2"
}
abstract class JsonSuite extends QueryTest with SharedSparkSession with TestJsonData {
import testImplicits._
test("Type promotion") {
def checkTypePromotion(expected: Any, actual: Any): Unit = {
assert(expected.getClass == actual.getClass,
s"Failed to promote ${actual.getClass} to ${expected.getClass}.")
assert(expected == actual,
s"Promoted value ${actual}(${actual.getClass}) does not equal the expected value " +
s"${expected}(${expected.getClass}).")
}
val factory = new JsonFactory()
def enforceCorrectType(
value: Any,
dataType: DataType,
options: Map[String, String] = Map.empty): Any = {
val writer = new StringWriter()
Utils.tryWithResource(factory.createGenerator(writer)) { generator =>
generator.writeObject(value)
generator.flush()
}
val dummyOption = new JSONOptions(options, SQLConf.get.sessionLocalTimeZone)
val dummySchema = StructType(Seq.empty)
val parser = new JacksonParser(dummySchema, dummyOption, allowArrayAsStructs = true)
Utils.tryWithResource(factory.createParser(writer.toString)) { jsonParser =>
jsonParser.nextToken()
val converter = parser.makeConverter(dataType)
converter.apply(jsonParser)
}
}
val intNumber: Int = 2147483647
checkTypePromotion(intNumber, enforceCorrectType(intNumber, IntegerType))
checkTypePromotion(intNumber.toLong, enforceCorrectType(intNumber, LongType))
checkTypePromotion(intNumber.toDouble, enforceCorrectType(intNumber, DoubleType))
checkTypePromotion(
Decimal(intNumber), enforceCorrectType(intNumber, DecimalType.SYSTEM_DEFAULT))
val longNumber: Long = 9223372036854775807L
checkTypePromotion(longNumber, enforceCorrectType(longNumber, LongType))
checkTypePromotion(longNumber.toDouble, enforceCorrectType(longNumber, DoubleType))
checkTypePromotion(
Decimal(longNumber), enforceCorrectType(longNumber, DecimalType.SYSTEM_DEFAULT))
val doubleNumber: Double = 1.7976931348623157d
checkTypePromotion(doubleNumber.toDouble, enforceCorrectType(doubleNumber, DoubleType))
checkTypePromotion(DateTimeUtils.fromJavaTimestamp(new Timestamp(intNumber * 1000L)),
enforceCorrectType(intNumber, TimestampType))
checkTypePromotion(DateTimeUtils.fromJavaTimestamp(new Timestamp(intNumber.toLong * 1000L)),
enforceCorrectType(intNumber.toLong, TimestampType))
val strTime = "2014-09-30 12:34:56"
checkTypePromotion(
expected = DateTimeUtils.fromJavaTimestamp(Timestamp.valueOf(strTime)),
enforceCorrectType(strTime, TimestampType,
Map("timestampFormat" -> "yyyy-MM-dd HH:mm:ss")))
val strDate = "2014-10-15"
checkTypePromotion(
DateTimeUtils.fromJavaDate(Date.valueOf(strDate)), enforceCorrectType(strDate, DateType))
val ISO8601Time1 = "1970-01-01T01:00:01.0Z"
checkTypePromotion(DateTimeUtils.fromJavaTimestamp(new Timestamp(3601000)),
enforceCorrectType(
ISO8601Time1,
TimestampType,
Map("timestampFormat" -> "yyyy-MM-dd'T'HH:mm:ss.SX")))
val ISO8601Time2 = "1970-01-01T02:00:01-01:00"
checkTypePromotion(DateTimeUtils.fromJavaTimestamp(new Timestamp(10801000)),
enforceCorrectType(
ISO8601Time2,
TimestampType,
Map("timestampFormat" -> "yyyy-MM-dd'T'HH:mm:ssXXX")))
val ISO8601Date = "1970-01-01"
checkTypePromotion(DateTimeUtils.microsToDays(32400000000L, ZoneId.systemDefault),
enforceCorrectType(ISO8601Date, DateType))
}
test("Get compatible type") {
def checkDataType(t1: DataType, t2: DataType, expected: DataType): Unit = {
var actual = JsonInferSchema.compatibleType(t1, t2)
assert(actual == expected,
s"Expected $expected as the most general data type for $t1 and $t2, found $actual")
actual = JsonInferSchema.compatibleType(t2, t1)
assert(actual == expected,
s"Expected $expected as the most general data type for $t1 and $t2, found $actual")
}
// NullType
checkDataType(NullType, BooleanType, BooleanType)
checkDataType(NullType, IntegerType, IntegerType)
checkDataType(NullType, LongType, LongType)
checkDataType(NullType, DoubleType, DoubleType)
checkDataType(NullType, DecimalType.SYSTEM_DEFAULT, DecimalType.SYSTEM_DEFAULT)
checkDataType(NullType, StringType, StringType)
checkDataType(NullType, ArrayType(IntegerType), ArrayType(IntegerType))
checkDataType(NullType, StructType(Nil), StructType(Nil))
checkDataType(NullType, NullType, NullType)
// BooleanType
checkDataType(BooleanType, BooleanType, BooleanType)
checkDataType(BooleanType, IntegerType, StringType)
checkDataType(BooleanType, LongType, StringType)
checkDataType(BooleanType, DoubleType, StringType)
checkDataType(BooleanType, DecimalType.SYSTEM_DEFAULT, StringType)
checkDataType(BooleanType, StringType, StringType)
checkDataType(BooleanType, ArrayType(IntegerType), StringType)
checkDataType(BooleanType, StructType(Nil), StringType)
// IntegerType
checkDataType(IntegerType, IntegerType, IntegerType)
checkDataType(IntegerType, LongType, LongType)
checkDataType(IntegerType, DoubleType, DoubleType)
checkDataType(IntegerType, DecimalType.SYSTEM_DEFAULT, DecimalType.SYSTEM_DEFAULT)
checkDataType(IntegerType, StringType, StringType)
checkDataType(IntegerType, ArrayType(IntegerType), StringType)
checkDataType(IntegerType, StructType(Nil), StringType)
// LongType
checkDataType(LongType, LongType, LongType)
checkDataType(LongType, DoubleType, DoubleType)
checkDataType(LongType, DecimalType.SYSTEM_DEFAULT, DecimalType.SYSTEM_DEFAULT)
checkDataType(LongType, StringType, StringType)
checkDataType(LongType, ArrayType(IntegerType), StringType)
checkDataType(LongType, StructType(Nil), StringType)
// DoubleType
checkDataType(DoubleType, DoubleType, DoubleType)
checkDataType(DoubleType, DecimalType.SYSTEM_DEFAULT, DoubleType)
checkDataType(DoubleType, StringType, StringType)
checkDataType(DoubleType, ArrayType(IntegerType), StringType)
checkDataType(DoubleType, StructType(Nil), StringType)
// DecimalType
checkDataType(DecimalType.SYSTEM_DEFAULT, DecimalType.SYSTEM_DEFAULT,
DecimalType.SYSTEM_DEFAULT)
checkDataType(DecimalType.SYSTEM_DEFAULT, StringType, StringType)
checkDataType(DecimalType.SYSTEM_DEFAULT, ArrayType(IntegerType), StringType)
checkDataType(DecimalType.SYSTEM_DEFAULT, StructType(Nil), StringType)
// StringType
checkDataType(StringType, StringType, StringType)
checkDataType(StringType, ArrayType(IntegerType), StringType)
checkDataType(StringType, StructType(Nil), StringType)
// ArrayType
checkDataType(ArrayType(IntegerType), ArrayType(IntegerType), ArrayType(IntegerType))
checkDataType(ArrayType(IntegerType), ArrayType(LongType), ArrayType(LongType))
checkDataType(ArrayType(IntegerType), ArrayType(StringType), ArrayType(StringType))
checkDataType(ArrayType(IntegerType), StructType(Nil), StringType)
checkDataType(
ArrayType(IntegerType, true), ArrayType(IntegerType), ArrayType(IntegerType, true))
checkDataType(
ArrayType(IntegerType, true), ArrayType(IntegerType, false), ArrayType(IntegerType, true))
checkDataType(
ArrayType(IntegerType, true), ArrayType(IntegerType, true), ArrayType(IntegerType, true))
checkDataType(
ArrayType(IntegerType, false), ArrayType(IntegerType), ArrayType(IntegerType, true))
checkDataType(
ArrayType(IntegerType, false), ArrayType(IntegerType, false), ArrayType(IntegerType, false))
checkDataType(
ArrayType(IntegerType, false), ArrayType(IntegerType, true), ArrayType(IntegerType, true))
// StructType
checkDataType(StructType(Nil), StructType(Nil), StructType(Nil))
checkDataType(
StructType(StructField("f1", IntegerType, true) :: Nil),
StructType(StructField("f1", IntegerType, true) :: Nil),
StructType(StructField("f1", IntegerType, true) :: Nil))
checkDataType(
StructType(StructField("f1", IntegerType, true) :: Nil),
StructType(Nil),
StructType(StructField("f1", IntegerType, true) :: Nil))
checkDataType(
StructType(
StructField("f1", IntegerType, true) ::
StructField("f2", IntegerType, true) :: Nil),
StructType(StructField("f1", LongType, true) :: Nil),
StructType(
StructField("f1", LongType, true) ::
StructField("f2", IntegerType, true) :: Nil))
checkDataType(
StructType(
StructField("f1", IntegerType, true) :: Nil),
StructType(
StructField("f2", IntegerType, true) :: Nil),
StructType(
StructField("f1", IntegerType, true) ::
StructField("f2", IntegerType, true) :: Nil))
checkDataType(
StructType(
StructField("f1", IntegerType, true) :: Nil),
DecimalType.SYSTEM_DEFAULT,
StringType)
}
test("Complex field and type inferring with null in sampling") {
withTempView("jsonTable") {
val jsonDF = spark.read.json(jsonNullStruct)
val expectedSchema = StructType(
StructField("headers", StructType(
StructField("Charset", StringType, true) ::
StructField("Host", StringType, true) :: Nil)
, true) ::
StructField("ip", StringType, true) ::
StructField("nullstr", StringType, true):: Nil)
assert(expectedSchema === jsonDF.schema)
jsonDF.createOrReplaceTempView("jsonTable")
checkAnswer(
sql("select nullstr, headers.Host from jsonTable"),
Seq(Row("", "1.abc.com"), Row("", null), Row("", null), Row(null, null))
)
}
}
test("Primitive field and type inferring") {
withTempView("jsonTable") {
val jsonDF = spark.read.json(primitiveFieldAndType)
val expectedSchema = StructType(
StructField("bigInteger", DecimalType(20, 0), true) ::
StructField("boolean", BooleanType, true) ::
StructField("double", DoubleType, true) ::
StructField("integer", LongType, true) ::
StructField("long", LongType, true) ::
StructField("null", StringType, true) ::
StructField("string", StringType, true) :: Nil)
assert(expectedSchema === jsonDF.schema)
jsonDF.createOrReplaceTempView("jsonTable")
checkAnswer(
sql("select * from jsonTable"),
Row(new java.math.BigDecimal("92233720368547758070"),
true,
1.7976931348623157,
10,
21474836470L,
null,
"this is a simple string.")
)
}
}
test("Complex field and type inferring") {
withTempView("jsonTable") {
val jsonDF = spark.read.json(complexFieldAndType1)
val expectedSchema = StructType(
StructField("arrayOfArray1", ArrayType(ArrayType(StringType, true), true), true) ::
StructField("arrayOfArray2", ArrayType(ArrayType(DoubleType, true), true), true) ::
StructField("arrayOfBigInteger", ArrayType(DecimalType(21, 0), true), true) ::
StructField("arrayOfBoolean", ArrayType(BooleanType, true), true) ::
StructField("arrayOfDouble", ArrayType(DoubleType, true), true) ::
StructField("arrayOfInteger", ArrayType(LongType, true), true) ::
StructField("arrayOfLong", ArrayType(LongType, true), true) ::
StructField("arrayOfNull", ArrayType(StringType, true), true) ::
StructField("arrayOfString", ArrayType(StringType, true), true) ::
StructField("arrayOfStruct", ArrayType(
StructType(
StructField("field1", BooleanType, true) ::
StructField("field2", StringType, true) ::
StructField("field3", StringType, true) :: Nil), true), true) ::
StructField("struct", StructType(
StructField("field1", BooleanType, true) ::
StructField("field2", DecimalType(20, 0), true) :: Nil), true) ::
StructField("structWithArrayFields", StructType(
StructField("field1", ArrayType(LongType, true), true) ::
StructField("field2", ArrayType(StringType, true), true) :: Nil), true) :: Nil)
assert(expectedSchema === jsonDF.schema)
jsonDF.createOrReplaceTempView("jsonTable")
// Access elements of a primitive array.
checkAnswer(
sql("select arrayOfString[0], arrayOfString[1], arrayOfString[2] from jsonTable"),
Row("str1", "str2", null)
)
// Access an array of null values.
checkAnswer(
sql("select arrayOfNull from jsonTable"),
Row(Seq(null, null, null, null))
)
// Access elements of a BigInteger array (we use DecimalType internally).
checkAnswer(
sql("select arrayOfBigInteger[0], arrayOfBigInteger[1], arrayOfBigInteger[2] from " +
"jsonTable"),
Row(new java.math.BigDecimal("922337203685477580700"),
new java.math.BigDecimal("-922337203685477580800"), null)
)
// Access elements of an array of arrays.
checkAnswer(
sql("select arrayOfArray1[0], arrayOfArray1[1] from jsonTable"),
Row(Seq("1", "2", "3"), Seq("str1", "str2"))
)
// Access elements of an array of arrays.
checkAnswer(
sql("select arrayOfArray2[0], arrayOfArray2[1] from jsonTable"),
Row(Seq(1.0, 2.0, 3.0), Seq(1.1, 2.1, 3.1))
)
// Access elements of an array inside a filed with the type of ArrayType(ArrayType).
checkAnswer(
sql("select arrayOfArray1[1][1], arrayOfArray2[1][1] from jsonTable"),
Row("str2", 2.1)
)
// Access elements of an array of structs.
checkAnswer(
sql("select arrayOfStruct[0], arrayOfStruct[1], arrayOfStruct[2], arrayOfStruct[3] " +
"from jsonTable"),
Row(
Row(true, "str1", null),
Row(false, null, null),
Row(null, null, null),
null)
)
// Access a struct and fields inside of it.
checkAnswer(
sql("select struct, struct.field1, struct.field2 from jsonTable"),
Row(
Row(true, new java.math.BigDecimal("92233720368547758070")),
true,
new java.math.BigDecimal("92233720368547758070")) :: Nil
)
// Access an array field of a struct.
checkAnswer(
sql("select structWithArrayFields.field1, structWithArrayFields.field2 from jsonTable"),
Row(Seq(4, 5, 6), Seq("str1", "str2"))
)
// Access elements of an array field of a struct.
checkAnswer(
sql("select structWithArrayFields.field1[1], structWithArrayFields.field2[3] from " +
"jsonTable"),
Row(5, null)
)
}
}
test("GetField operation on complex data type") {
withTempView("jsonTable") {
val jsonDF = spark.read.json(complexFieldAndType1)
jsonDF.createOrReplaceTempView("jsonTable")
checkAnswer(
sql("select arrayOfStruct[0].field1, arrayOfStruct[0].field2 from jsonTable"),
Row(true, "str1")
)
// Getting all values of a specific field from an array of structs.
checkAnswer(
sql("select arrayOfStruct.field1, arrayOfStruct.field2 from jsonTable"),
Row(Seq(true, false, null), Seq("str1", null, null))
)
}
}
test("Type conflict in primitive field values") {
withTempView("jsonTable") {
val jsonDF = spark.read.json(primitiveFieldValueTypeConflict)
val expectedSchema = StructType(
StructField("num_bool", StringType, true) ::
StructField("num_num_1", LongType, true) ::
StructField("num_num_2", DoubleType, true) ::
StructField("num_num_3", DoubleType, true) ::
StructField("num_str", StringType, true) ::
StructField("str_bool", StringType, true) :: Nil)
assert(expectedSchema === jsonDF.schema)
jsonDF.createOrReplaceTempView("jsonTable")
checkAnswer(
sql("select * from jsonTable"),
Row("true", 11L, null, 1.1, "13.1", "str1") ::
Row("12", null, 21474836470.9, null, null, "true") ::
Row("false", 21474836470L, 92233720368547758070d, 100, "str1", "false") ::
Row(null, 21474836570L, 1.1, 21474836470L, "92233720368547758070", null) :: Nil
)
// Number and Boolean conflict: resolve the type as number in this query.
checkAnswer(
sql("select num_bool - 10 from jsonTable where num_bool > 11"),
Row(2)
)
// Widening to LongType
checkAnswer(
sql("select num_num_1 - 100 from jsonTable where num_num_1 > 11"),
Row(21474836370L) :: Row(21474836470L) :: Nil
)
checkAnswer(
sql("select num_num_1 - 100 from jsonTable where num_num_1 > 10"),
Row(-89) :: Row(21474836370L) :: Row(21474836470L) :: Nil
)
// Widening to DecimalType
checkAnswer(
sql("select num_num_2 + 1.3 from jsonTable where num_num_2 > 1.1"),
Row(21474836472.2) ::
Row(92233720368547758071.3) :: Nil
)
// Widening to Double
checkAnswer(
sql("select num_num_3 + 1.2 from jsonTable where num_num_3 > 1.1"),
Row(101.2) :: Row(21474836471.2) :: Nil
)
// Number and String conflict: resolve the type as number in this query.
checkAnswer(
sql("select num_str + 1.2 from jsonTable where num_str > 14d"),
Row(92233720368547758071.2)
)
// Number and String conflict: resolve the type as number in this query.
checkAnswer(
sql("select num_str + 1.2 from jsonTable where num_str >= 92233720368547758060"),
Row(new java.math.BigDecimal("92233720368547758071.2").doubleValue)
)
// String and Boolean conflict: resolve the type as string.
checkAnswer(
sql("select * from jsonTable where str_bool = 'str1'"),
Row("true", 11L, null, 1.1, "13.1", "str1")
)
}
}
test("Type conflict in complex field values") {
withTempView("jsonTable") {
val jsonDF = spark.read.json(complexFieldValueTypeConflict)
val expectedSchema = StructType(
StructField("array", ArrayType(LongType, true), true) ::
StructField("num_struct", StringType, true) ::
StructField("str_array", StringType, true) ::
StructField("struct", StructType(
StructField("field", StringType, true) :: Nil), true) ::
StructField("struct_array", StringType, true) :: Nil)
assert(expectedSchema === jsonDF.schema)
jsonDF.createOrReplaceTempView("jsonTable")
checkAnswer(
sql("select * from jsonTable"),
Row(Seq(), "11", "[1,2,3]", Row(null), "[]") ::
Row(null, """{"field":false}""", null, null, "{}") ::
Row(Seq(4, 5, 6), null, "str", Row(null), "[7,8,9]") ::
Row(Seq(7), "{}", """["str1","str2",33]""", Row("str"), """{"field":true}""") :: Nil
)
}
}
test("Type conflict in array elements") {
withTempView("jsonTable") {
val jsonDF = spark.read.json(arrayElementTypeConflict)
val expectedSchema = StructType(
StructField("array1", ArrayType(StringType, true), true) ::
StructField("array2", ArrayType(StructType(
StructField("field", LongType, true) :: Nil), true), true) ::
StructField("array3", ArrayType(StringType, true), true) :: Nil)
assert(expectedSchema === jsonDF.schema)
jsonDF.createOrReplaceTempView("jsonTable")
checkAnswer(
sql("select * from jsonTable"),
Row(Seq("1", "1.1", "true", null, "[]", "{}", "[2,3,4]",
"""{"field":"str"}"""), Seq(Row(214748364700L), Row(1)), null) ::
Row(null, null, Seq("""{"field":"str"}""", """{"field":1}""")) ::
Row(null, null, Seq("1", "2", "3")) :: Nil
)
// Treat an element as a number.
checkAnswer(
sql("select array1[0] + 1 from jsonTable where array1 is not null"),
Row(2)
)
}
}
test("Handling missing fields") {
withTempView("jsonTable") {
val jsonDF = spark.read.json(missingFields)
val expectedSchema = StructType(
StructField("a", BooleanType, true) ::
StructField("b", LongType, true) ::
StructField("c", ArrayType(LongType, true), true) ::
StructField("d", StructType(
StructField("field", BooleanType, true) :: Nil), true) ::
StructField("e", StringType, true) :: Nil)
assert(expectedSchema === jsonDF.schema)
jsonDF.createOrReplaceTempView("jsonTable")
}
}
test("Loading a JSON dataset from a text file") {
withTempView("jsonTable") {
val dir = Utils.createTempDir()
dir.delete()
val path = dir.getCanonicalPath
primitiveFieldAndType.map(record => record.replaceAll("\\n", " ")).write.text(path)
val jsonDF = spark.read.json(path)
val expectedSchema = StructType(
StructField("bigInteger", DecimalType(20, 0), true) ::
StructField("boolean", BooleanType, true) ::
StructField("double", DoubleType, true) ::
StructField("integer", LongType, true) ::
StructField("long", LongType, true) ::
StructField("null", StringType, true) ::
StructField("string", StringType, true) :: Nil)
assert(expectedSchema === jsonDF.schema)
jsonDF.createOrReplaceTempView("jsonTable")
checkAnswer(
sql("select * from jsonTable"),
Row(new java.math.BigDecimal("92233720368547758070"),
true,
1.7976931348623157,
10,
21474836470L,
null,
"this is a simple string.")
)
}
}
test("Loading a JSON dataset primitivesAsString returns schema with primitive types as strings") {
withTempView("jsonTable") {
val dir = Utils.createTempDir()
dir.delete()
val path = dir.getCanonicalPath
primitiveFieldAndType.map(record => record.replaceAll("\\n", " ")).write.text(path)
val jsonDF = spark.read.option("primitivesAsString", "true").json(path)
val expectedSchema = StructType(
StructField("bigInteger", StringType, true) ::
StructField("boolean", StringType, true) ::
StructField("double", StringType, true) ::
StructField("integer", StringType, true) ::
StructField("long", StringType, true) ::
StructField("null", StringType, true) ::
StructField("string", StringType, true) :: Nil)
assert(expectedSchema === jsonDF.schema)
jsonDF.createOrReplaceTempView("jsonTable")
checkAnswer(
sql("select * from jsonTable"),
Row("92233720368547758070",
"true",
"1.7976931348623157",
"10",
"21474836470",
null,
"this is a simple string.")
)
}
}
test("Loading a JSON dataset primitivesAsString returns complex fields as strings") {
withTempView("jsonTable") {
val jsonDF = spark.read.option("primitivesAsString", "true").json(complexFieldAndType1)
val expectedSchema = StructType(
StructField("arrayOfArray1", ArrayType(ArrayType(StringType, true), true), true) ::
StructField("arrayOfArray2", ArrayType(ArrayType(StringType, true), true), true) ::
StructField("arrayOfBigInteger", ArrayType(StringType, true), true) ::
StructField("arrayOfBoolean", ArrayType(StringType, true), true) ::
StructField("arrayOfDouble", ArrayType(StringType, true), true) ::
StructField("arrayOfInteger", ArrayType(StringType, true), true) ::
StructField("arrayOfLong", ArrayType(StringType, true), true) ::
StructField("arrayOfNull", ArrayType(StringType, true), true) ::
StructField("arrayOfString", ArrayType(StringType, true), true) ::
StructField("arrayOfStruct", ArrayType(
StructType(
StructField("field1", StringType, true) ::
StructField("field2", StringType, true) ::
StructField("field3", StringType, true) :: Nil), true), true) ::
StructField("struct", StructType(
StructField("field1", StringType, true) ::
StructField("field2", StringType, true) :: Nil), true) ::
StructField("structWithArrayFields", StructType(
StructField("field1", ArrayType(StringType, true), true) ::
StructField("field2", ArrayType(StringType, true), true) :: Nil), true) :: Nil)
assert(expectedSchema === jsonDF.schema)
jsonDF.createOrReplaceTempView("jsonTable")
// Access elements of a primitive array.
checkAnswer(
sql("select arrayOfString[0], arrayOfString[1], arrayOfString[2] from jsonTable"),
Row("str1", "str2", null)
)
// Access an array of null values.
checkAnswer(
sql("select arrayOfNull from jsonTable"),
Row(Seq(null, null, null, null))
)
// Access elements of a BigInteger array (we use DecimalType internally).
checkAnswer(
sql("select arrayOfBigInteger[0], arrayOfBigInteger[1], arrayOfBigInteger[2] from " +
"jsonTable"),
Row("922337203685477580700", "-922337203685477580800", null)
)
// Access elements of an array of arrays.
checkAnswer(
sql("select arrayOfArray1[0], arrayOfArray1[1] from jsonTable"),
Row(Seq("1", "2", "3"), Seq("str1", "str2"))
)
// Access elements of an array of arrays.
checkAnswer(
sql("select arrayOfArray2[0], arrayOfArray2[1] from jsonTable"),
Row(Seq("1", "2", "3"), Seq("1.1", "2.1", "3.1"))
)
// Access elements of an array inside a filed with the type of ArrayType(ArrayType).
checkAnswer(
sql("select arrayOfArray1[1][1], arrayOfArray2[1][1] from jsonTable"),
Row("str2", "2.1")
)
// Access elements of an array of structs.
checkAnswer(
sql("select arrayOfStruct[0], arrayOfStruct[1], arrayOfStruct[2], arrayOfStruct[3] " +
"from jsonTable"),
Row(
Row("true", "str1", null),
Row("false", null, null),
Row(null, null, null),
null)
)
// Access a struct and fields inside of it.
checkAnswer(
sql("select struct, struct.field1, struct.field2 from jsonTable"),
Row(
Row("true", "92233720368547758070"),
"true",
"92233720368547758070") :: Nil
)
// Access an array field of a struct.
checkAnswer(
sql("select structWithArrayFields.field1, structWithArrayFields.field2 from jsonTable"),
Row(Seq("4", "5", "6"), Seq("str1", "str2"))
)
// Access elements of an array field of a struct.
checkAnswer(
sql("select structWithArrayFields.field1[1], structWithArrayFields.field2[3] from " +
"jsonTable"),
Row("5", null)
)
}
}
test("Loading a JSON dataset prefersDecimal returns schema with float types as BigDecimal") {
withTempView("jsonTable") {
val jsonDF = spark.read.option("prefersDecimal", "true").json(primitiveFieldAndType)
val expectedSchema = StructType(
StructField("bigInteger", DecimalType(20, 0), true) ::
StructField("boolean", BooleanType, true) ::
StructField("double", DecimalType(17, 16), true) ::
StructField("integer", LongType, true) ::
StructField("long", LongType, true) ::
StructField("null", StringType, true) ::
StructField("string", StringType, true) :: Nil)
assert(expectedSchema === jsonDF.schema)
jsonDF.createOrReplaceTempView("jsonTable")
checkAnswer(
sql("select * from jsonTable"),
Row(BigDecimal("92233720368547758070"),
true,
BigDecimal("1.7976931348623157"),
10,
21474836470L,
null,
"this is a simple string.")
)
}
}
test("Find compatible types even if inferred DecimalType is not capable of other IntegralType") {
val mixedIntegerAndDoubleRecords = Seq(
"""{"a": 3, "b": 1.1}""",
s"""{"a": 3.1, "b": 0.${"0" * 38}1}""").toDS()
val jsonDF = spark.read
.option("prefersDecimal", "true")
.json(mixedIntegerAndDoubleRecords)
// The values in `a` field will be decimals as they fit in decimal. For `b` field,
// they will be doubles as `1.0E-39D` does not fit.
val expectedSchema = StructType(
StructField("a", DecimalType(21, 1), true) ::
StructField("b", DoubleType, true) :: Nil)
assert(expectedSchema === jsonDF.schema)
checkAnswer(
jsonDF,
Row(BigDecimal("3"), 1.1D) ::
Row(BigDecimal("3.1"), 1.0E-39D) :: Nil
)
}
test("Infer big integers correctly even when it does not fit in decimal") {
val jsonDF = spark.read
.json(bigIntegerRecords)
// The value in `a` field will be a double as it does not fit in decimal. For `b` field,
// it will be a decimal as `92233720368547758070`.
val expectedSchema = StructType(
StructField("a", DoubleType, true) ::
StructField("b", DecimalType(20, 0), true) :: Nil)
assert(expectedSchema === jsonDF.schema)
checkAnswer(jsonDF, Row(1.0E38D, BigDecimal("92233720368547758070")))
}
test("Infer floating-point values correctly even when it does not fit in decimal") {
val jsonDF = spark.read
.option("prefersDecimal", "true")
.json(floatingValueRecords)
// The value in `a` field will be a double as it does not fit in decimal. For `b` field,
// it will be a decimal as `0.01` by having a precision equal to the scale.
val expectedSchema = StructType(
StructField("a", DoubleType, true) ::
StructField("b", DecimalType(2, 2), true):: Nil)
assert(expectedSchema === jsonDF.schema)
checkAnswer(jsonDF, Row(1.0E-39D, BigDecimal("0.01")))
val mergedJsonDF = spark.read
.option("prefersDecimal", "true")
.json(floatingValueRecords.union(bigIntegerRecords))
val expectedMergedSchema = StructType(
StructField("a", DoubleType, true) ::
StructField("b", DecimalType(22, 2), true):: Nil)
assert(expectedMergedSchema === mergedJsonDF.schema)
checkAnswer(
mergedJsonDF,
Row(1.0E-39D, BigDecimal("0.01")) ::
Row(1.0E38D, BigDecimal("92233720368547758070")) :: Nil
)
}
test("Loading a JSON dataset from a text file with SQL") {
val dir = Utils.createTempDir()
dir.delete()
val path = dir.toURI.toString
primitiveFieldAndType.map(record => record.replaceAll("\\n", " ")).write.text(path)
sql(
s"""
|CREATE TEMPORARY VIEW jsonTableSQL
|USING org.apache.spark.sql.json
|OPTIONS (
| path '$path'
|)
""".stripMargin)
checkAnswer(
sql("select * from jsonTableSQL"),
Row(new java.math.BigDecimal("92233720368547758070"),
true,
1.7976931348623157,
10,
21474836470L,
null,
"this is a simple string.")
)
}
test("Applying schemas") {
withTempView("jsonTable1", "jsonTable2") {
val dir = Utils.createTempDir()
dir.delete()
val path = dir.getCanonicalPath
primitiveFieldAndType.map(record => record.replaceAll("\\n", " ")).write.text(path)
val schema = StructType(
StructField("bigInteger", DecimalType.SYSTEM_DEFAULT, true) ::
StructField("boolean", BooleanType, true) ::
StructField("double", DoubleType, true) ::
StructField("integer", IntegerType, true) ::
StructField("long", LongType, true) ::
StructField("null", StringType, true) ::
StructField("string", StringType, true) :: Nil)
val jsonDF1 = spark.read.schema(schema).json(path)
assert(schema === jsonDF1.schema)
jsonDF1.createOrReplaceTempView("jsonTable1")
checkAnswer(
sql("select * from jsonTable1"),
Row(new java.math.BigDecimal("92233720368547758070"),
true,
1.7976931348623157,
10,
21474836470L,
null,
"this is a simple string.")
)
val jsonDF2 = spark.read.schema(schema).json(primitiveFieldAndType)
assert(schema === jsonDF2.schema)
jsonDF2.createOrReplaceTempView("jsonTable2")
checkAnswer(
sql("select * from jsonTable2"),
Row(new java.math.BigDecimal("92233720368547758070"),
true,
1.7976931348623157,
10,
21474836470L,
null,
"this is a simple string.")
)
}
}
test("Applying schemas with MapType") {
withTempView("jsonWithSimpleMap", "jsonWithComplexMap") {
val schemaWithSimpleMap = StructType(
StructField("map", MapType(StringType, IntegerType, true), false) :: Nil)
val jsonWithSimpleMap = spark.read.schema(schemaWithSimpleMap).json(mapType1)
jsonWithSimpleMap.createOrReplaceTempView("jsonWithSimpleMap")
checkAnswer(
sql("select `map` from jsonWithSimpleMap"),
Row(Map("a" -> 1)) ::
Row(Map("b" -> 2)) ::
Row(Map("c" -> 3)) ::
Row(Map("c" -> 1, "d" -> 4)) ::
Row(Map("e" -> null)) :: Nil
)
withSQLConf(SQLConf.SUPPORT_QUOTED_REGEX_COLUMN_NAME.key -> "false") {
checkAnswer(
sql("select `map`['c'] from jsonWithSimpleMap"),
Row(null) ::
Row(null) ::
Row(3) ::
Row(1) ::
Row(null) :: Nil
)
}
val innerStruct = StructType(
StructField("field1", ArrayType(IntegerType, true), true) ::
StructField("field2", IntegerType, true) :: Nil)
val schemaWithComplexMap = StructType(
StructField("map", MapType(StringType, innerStruct, true), false) :: Nil)
val jsonWithComplexMap = spark.read.schema(schemaWithComplexMap).json(mapType2)
jsonWithComplexMap.createOrReplaceTempView("jsonWithComplexMap")
checkAnswer(
sql("select `map` from jsonWithComplexMap"),
Row(Map("a" -> Row(Seq(1, 2, 3, null), null))) ::
Row(Map("b" -> Row(null, 2))) ::
Row(Map("c" -> Row(Seq(), 4))) ::
Row(Map("c" -> Row(null, 3), "d" -> Row(Seq(null), null))) ::
Row(Map("e" -> null)) ::
Row(Map("f" -> Row(null, null))) :: Nil
)
withSQLConf(SQLConf.SUPPORT_QUOTED_REGEX_COLUMN_NAME.key -> "false") {
checkAnswer(
sql("select `map`['a'].field1, `map`['c'].field2 from jsonWithComplexMap"),
Row(Seq(1, 2, 3, null), null) ::
Row(null, null) ::
Row(null, 4) ::
Row(null, 3) ::
Row(null, null) ::
Row(null, null) :: Nil
)
}
}
}
test("SPARK-2096 Correctly parse dot notations") {
withTempView("jsonTable") {
val jsonDF = spark.read.json(complexFieldAndType2)
jsonDF.createOrReplaceTempView("jsonTable")
checkAnswer(
sql("select arrayOfStruct[0].field1, arrayOfStruct[0].field2 from jsonTable"),
Row(true, "str1")
)
checkAnswer(
sql(
"""
|select complexArrayOfStruct[0].field1[1].inner2[0],
|complexArrayOfStruct[1].field2[0][1]
|from jsonTable
""".stripMargin),
Row("str2", 6)
)
}
}
test("SPARK-3390 Complex arrays") {
withTempView("jsonTable") {
val jsonDF = spark.read.json(complexFieldAndType2)
jsonDF.createOrReplaceTempView("jsonTable")
checkAnswer(
sql(
"""
|select arrayOfArray1[0][0][0], arrayOfArray1[1][0][1], arrayOfArray1[1][1][0]
|from jsonTable
""".stripMargin),
Row(5, 7, 8)
)
checkAnswer(
sql(
"""
|select arrayOfArray2[0][0][0].inner1, arrayOfArray2[1][0],
|arrayOfArray2[1][1][1].inner2[0], arrayOfArray2[2][0][0].inner3[0][0].inner4
|from jsonTable
""".stripMargin),
Row("str1", Nil, "str4", 2)
)
}
}
test("SPARK-3308 Read top level JSON arrays") {
withTempView("jsonTable") {
val jsonDF = spark.read.json(jsonArray)
jsonDF.createOrReplaceTempView("jsonTable")
checkAnswer(
sql(
"""
|select a, b, c
|from jsonTable
""".stripMargin),
Row("str_a_1", null, null) ::
Row("str_a_2", null, null) ::
Row(null, "str_b_3", null) ::
Row("str_a_4", "str_b_4", "str_c_4") :: Nil
)
}
}
test("Corrupt records: FAILFAST mode") {
// `FAILFAST` mode should throw an exception for corrupt records.
val exceptionOne = intercept[SparkException] {
spark.read
.option("mode", "FAILFAST")
.json(corruptRecords)
}.getMessage
assert(exceptionOne.contains(
"Malformed records are detected in schema inference. Parse Mode: FAILFAST."))
val exceptionTwo = intercept[SparkException] {
spark.read
.option("mode", "FAILFAST")
.schema("a string")
.json(corruptRecords)
.collect()
}.getMessage
assert(exceptionTwo.contains(
"Malformed records are detected in record parsing. Parse Mode: FAILFAST."))
}
test("Corrupt records: DROPMALFORMED mode") {
val schemaOne = StructType(
StructField("a", StringType, true) ::
StructField("b", StringType, true) ::
StructField("c", StringType, true) :: Nil)
val schemaTwo = StructType(
StructField("a", StringType, true) :: Nil)
// `DROPMALFORMED` mode should skip corrupt records
val jsonDFOne = spark.read
.option("mode", "DROPMALFORMED")
.json(corruptRecords)
checkAnswer(
jsonDFOne,
Row("str_a_4", "str_b_4", "str_c_4") :: Nil
)
assert(jsonDFOne.schema === schemaOne)
val jsonDFTwo = spark.read
.option("mode", "DROPMALFORMED")
.schema(schemaTwo)
.json(corruptRecords)
checkAnswer(
jsonDFTwo,
Row("str_a_4") :: Nil)
assert(jsonDFTwo.schema === schemaTwo)
}
test("SPARK-19641: Additional corrupt records: DROPMALFORMED mode") {
val schema = new StructType().add("dummy", StringType)
// `DROPMALFORMED` mode should skip corrupt records
val jsonDF = spark.read
.option("mode", "DROPMALFORMED")
.json(additionalCorruptRecords)
checkAnswer(
jsonDF,
Row("test"))
assert(jsonDF.schema === schema)
}
test("Corrupt records: PERMISSIVE mode, without designated column for malformed records") {
val schema = StructType(
StructField("a", StringType, true) ::
StructField("b", StringType, true) ::
StructField("c", StringType, true) :: Nil)
val jsonDF = spark.read.schema(schema).json(corruptRecords)
checkAnswer(
jsonDF.select($"a", $"b", $"c"),
Seq(
// Corrupted records are replaced with null
Row(null, null, null),
Row(null, null, null),
Row(null, null, null),
Row("str_a_4", "str_b_4", "str_c_4"),
Row(null, null, null))
)
}
test("Corrupt records: PERMISSIVE mode, with designated column for malformed records") {
// Test if we can query corrupt records.
withSQLConf(SQLConf.COLUMN_NAME_OF_CORRUPT_RECORD.key -> "_unparsed") {
val jsonDF = spark.read.json(corruptRecords)
val schema = StructType(
StructField("_unparsed", StringType, true) ::
StructField("a", StringType, true) ::
StructField("b", StringType, true) ::
StructField("c", StringType, true) :: Nil)
assert(schema === jsonDF.schema)
// In HiveContext, backticks should be used to access columns starting with a underscore.
checkAnswer(
jsonDF.select($"a", $"b", $"c", $"_unparsed"),
Row(null, null, null, "{") ::
Row(null, null, null, """{"a":1, b:2}""") ::
Row(null, null, null, """{"a":{, b:3}""") ::
Row("str_a_4", "str_b_4", "str_c_4", null) ::
Row(null, null, null, "]") :: Nil
)
checkAnswer(
jsonDF.filter($"_unparsed".isNull).select($"a", $"b", $"c"),
Row("str_a_4", "str_b_4", "str_c_4")
)
checkAnswer(
jsonDF.filter($"_unparsed".isNotNull).select($"_unparsed"),
Row("{") ::
Row("""{"a":1, b:2}""") ::
Row("""{"a":{, b:3}""") ::
Row("]") :: Nil
)
}
}
test("SPARK-13953 Rename the corrupt record field via option") {
val jsonDF = spark.read
.option("columnNameOfCorruptRecord", "_malformed")
.json(corruptRecords)
val schema = StructType(
StructField("_malformed", StringType, true) ::
StructField("a", StringType, true) ::
StructField("b", StringType, true) ::
StructField("c", StringType, true) :: Nil)
assert(schema === jsonDF.schema)
checkAnswer(
jsonDF.selectExpr("a", "b", "c", "_malformed"),
Row(null, null, null, "{") ::
Row(null, null, null, """{"a":1, b:2}""") ::
Row(null, null, null, """{"a":{, b:3}""") ::
Row("str_a_4", "str_b_4", "str_c_4", null) ::
Row(null, null, null, "]") :: Nil
)
}
test("SPARK-4068: nulls in arrays") {
withTempView("jsonTable") {
val jsonDF = spark.read.json(nullsInArrays)
jsonDF.createOrReplaceTempView("jsonTable")
val schema = StructType(
StructField("field1",
ArrayType(ArrayType(ArrayType(ArrayType(StringType, true), true), true), true), true) ::
StructField("field2",
ArrayType(ArrayType(
StructType(StructField("Test", LongType, true) :: Nil), true), true), true) ::
StructField("field3",
ArrayType(ArrayType(
StructType(StructField("Test", StringType, true) :: Nil), true), true), true) ::
StructField("field4",
ArrayType(ArrayType(ArrayType(LongType, true), true), true), true) :: Nil)
assert(schema === jsonDF.schema)
checkAnswer(
sql(
"""
|SELECT field1, field2, field3, field4
|FROM jsonTable
""".stripMargin),
Row(Seq(Seq(null), Seq(Seq(Seq("Test")))), null, null, null) ::
Row(null, Seq(null, Seq(Row(1))), null, null) ::
Row(null, null, Seq(Seq(null), Seq(Row("2"))), null) ::
Row(null, null, null, Seq(Seq(null, Seq(1, 2, 3)))) :: Nil
)
}
}
test("SPARK-4228 DataFrame to JSON") {
withTempView("applySchema1", "applySchema2", "primitiveTable", "complexTable") {
val schema1 = StructType(
StructField("f1", IntegerType, false) ::
StructField("f2", StringType, false) ::
StructField("f3", BooleanType, false) ::
StructField("f4", ArrayType(StringType), nullable = true) ::
StructField("f5", IntegerType, true) :: Nil)
val rowRDD1 = unparsedStrings.map { r =>
val values = r.split(",").map(_.trim)
val v5 = try values(3).toInt catch {
case _: NumberFormatException => null
}
Row(values(0).toInt, values(1), values(2).toBoolean, r.split(",").toList, v5)
}
val df1 = spark.createDataFrame(rowRDD1, schema1)
df1.createOrReplaceTempView("applySchema1")
val df2 = df1.toDF
val result = df2.toJSON.collect()
// scalastyle:off
assert(result(0) === "{\\"f1\\":1,\\"f2\\":\\"A1\\",\\"f3\\":true,\\"f4\\":[\\"1\\",\\" A1\\",\\" true\\",\\" null\\"]}")
assert(result(3) === "{\\"f1\\":4,\\"f2\\":\\"D4\\",\\"f3\\":true,\\"f4\\":[\\"4\\",\\" D4\\",\\" true\\",\\" 2147483644\\"],\\"f5\\":2147483644}")
// scalastyle:on
val schema2 = StructType(
StructField("f1", StructType(
StructField("f11", IntegerType, false) ::
StructField("f12", BooleanType, false) :: Nil), false) ::
StructField("f2", MapType(StringType, IntegerType, true), false) :: Nil)
val rowRDD2 = unparsedStrings.map { r =>
val values = r.split(",").map(_.trim)
val v4 = try values(3).toInt catch {
case _: NumberFormatException => null
}
Row(Row(values(0).toInt, values(2).toBoolean), Map(values(1) -> v4))
}
val df3 = spark.createDataFrame(rowRDD2, schema2)
df3.createOrReplaceTempView("applySchema2")
val df4 = df3.toDF
val result2 = df4.toJSON.collect()
assert(result2(1) === "{\\"f1\\":{\\"f11\\":2,\\"f12\\":false},\\"f2\\":{\\"B2\\":null}}")
assert(result2(3) === "{\\"f1\\":{\\"f11\\":4,\\"f12\\":true},\\"f2\\":{\\"D4\\":2147483644}}")
val jsonDF = spark.read.json(primitiveFieldAndType)
val primTable = spark.read.json(jsonDF.toJSON)
primTable.createOrReplaceTempView("primitiveTable")
checkAnswer(
sql("select * from primitiveTable"),
Row(new java.math.BigDecimal("92233720368547758070"),
true,
1.7976931348623157,
10,
21474836470L,
"this is a simple string.")
)
val complexJsonDF = spark.read.json(complexFieldAndType1)
val compTable = spark.read.json(complexJsonDF.toJSON)
compTable.createOrReplaceTempView("complexTable")
// Access elements of a primitive array.
checkAnswer(
sql("select arrayOfString[0], arrayOfString[1], arrayOfString[2] from complexTable"),
Row("str1", "str2", null)
)
// Access an array of null values.
checkAnswer(
sql("select arrayOfNull from complexTable"),
Row(Seq(null, null, null, null))
)
// Access elements of a BigInteger array (we use DecimalType internally).
checkAnswer(
sql("select arrayOfBigInteger[0], arrayOfBigInteger[1], arrayOfBigInteger[2] " +
" from complexTable"),
Row(new java.math.BigDecimal("922337203685477580700"),
new java.math.BigDecimal("-922337203685477580800"), null)
)
// Access elements of an array of arrays.
checkAnswer(
sql("select arrayOfArray1[0], arrayOfArray1[1] from complexTable"),
Row(Seq("1", "2", "3"), Seq("str1", "str2"))
)
// Access elements of an array of arrays.
checkAnswer(
sql("select arrayOfArray2[0], arrayOfArray2[1] from complexTable"),
Row(Seq(1.0, 2.0, 3.0), Seq(1.1, 2.1, 3.1))
)
// Access elements of an array inside a filed with the type of ArrayType(ArrayType).
checkAnswer(
sql("select arrayOfArray1[1][1], arrayOfArray2[1][1] from complexTable"),
Row("str2", 2.1)
)
// Access a struct and fields inside of it.
checkAnswer(
sql("select struct, struct.field1, struct.field2 from complexTable"),
Row(
Row(true, new java.math.BigDecimal("92233720368547758070")),
true,
new java.math.BigDecimal("92233720368547758070")) :: Nil
)
// Access an array field of a struct.
checkAnswer(
sql("select structWithArrayFields.field1, structWithArrayFields.field2 from complexTable"),
Row(Seq(4, 5, 6), Seq("str1", "str2"))
)
// Access elements of an array field of a struct.
checkAnswer(
sql("select structWithArrayFields.field1[1], structWithArrayFields.field2[3] " +
"from complexTable"),
Row(5, null)
)
}
}
test("Dataset toJSON doesn't construct rdd") {
val containsRDD = spark.emptyDataFrame.toJSON.queryExecution.logical.find {
case ExternalRDD(_, _) => true
case _ => false
}
assert(containsRDD.isEmpty, "Expected logical plan of toJSON to not contain an RDD")
}
test("JSONRelation equality test") {
withTempPath(dir => {
val path = dir.getCanonicalFile.toURI.toString
sparkContext.parallelize(1 to 100)
.map(i => s"""{"a": 1, "b": "str$i"}""").saveAsTextFile(path)
val d1 = DataSource(
spark,
userSpecifiedSchema = None,
partitionColumns = Array.empty[String],
bucketSpec = None,
className = classOf[JsonFileFormat].getCanonicalName,
options = Map("path" -> path)).resolveRelation()
val d2 = DataSource(
spark,
userSpecifiedSchema = None,
partitionColumns = Array.empty[String],
bucketSpec = None,
className = classOf[JsonFileFormat].getCanonicalName,
options = Map("path" -> path)).resolveRelation()
assert(d1 === d2)
})
}
test("SPARK-6245 JsonInferSchema.infer on empty RDD") {
// This is really a test that it doesn't throw an exception
val options = new JSONOptions(Map.empty[String, String], "UTC")
val emptySchema = new JsonInferSchema(options).infer(
empty.rdd,
CreateJacksonParser.string)
assert(StructType(Seq()) === emptySchema)
}
test("SPARK-7565 MapType in JsonRDD") {
withSQLConf(SQLConf.COLUMN_NAME_OF_CORRUPT_RECORD.key -> "_unparsed") {
withTempDir { dir =>
val schemaWithSimpleMap = StructType(
StructField("map", MapType(StringType, IntegerType, true), false) :: Nil)
val df = spark.read.schema(schemaWithSimpleMap).json(mapType1)
val path = dir.getAbsolutePath
df.write.mode("overwrite").parquet(path)
// order of MapType is not defined
assert(spark.read.parquet(path).count() == 5)
val df2 = spark.read.json(corruptRecords)
df2.write.mode("overwrite").parquet(path)
checkAnswer(spark.read.parquet(path), df2.collect())
}
}
}
test("SPARK-8093 Erase empty structs") {
val options = new JSONOptions(Map.empty[String, String], "UTC")
val emptySchema = new JsonInferSchema(options).infer(
emptyRecords.rdd,
CreateJacksonParser.string)
assert(StructType(Seq()) === emptySchema)
}
test("JSON with Partition") {
def makePartition(rdd: RDD[String], parent: File, partName: String, partValue: Any): File = {
val p = new File(parent, s"$partName=${partValue.toString}")
rdd.saveAsTextFile(p.getCanonicalPath)
p
}
withTempPath(root => {
withTempView("test_myjson_with_part") {
val d1 = new File(root, "d1=1")
// root/dt=1/col1=abc
val p1_col1 = makePartition(
sparkContext.parallelize(2 to 5).map(i => s"""{"a": 1, "b": "str$i"}"""),
d1,
"col1",
"abc")
// root/dt=1/col1=abd
val p2 = makePartition(
sparkContext.parallelize(6 to 10).map(i => s"""{"a": 1, "b": "str$i"}"""),
d1,
"col1",
"abd")
spark.read.json(root.getAbsolutePath).createOrReplaceTempView("test_myjson_with_part")
checkAnswer(sql(
"SELECT count(a) FROM test_myjson_with_part where d1 = 1 and col1='abc'"), Row(4))
checkAnswer(sql(
"SELECT count(a) FROM test_myjson_with_part where d1 = 1 and col1='abd'"), Row(5))
checkAnswer(sql(
"SELECT count(a) FROM test_myjson_with_part where d1 = 1"), Row(9))
}
})
}
test("backward compatibility") {
// This test we make sure our JSON support can read JSON data generated by previous version
// of Spark generated through toJSON method and JSON data source.
// The data is generated by the following program.
// Here are a few notes:
// - Spark 1.5.0 cannot save timestamp data. So, we manually added timestamp field (col13)
// in the JSON object.
// - For Spark before 1.5.1, we do not generate UDTs. So, we manually added the UDT value to
// JSON objects generated by those Spark versions (col17).
// - If the type is NullType, we do not write data out.
// Create the schema.
val struct =
StructType(
StructField("f1", FloatType, true) ::
StructField("f2", ArrayType(BooleanType), true) :: Nil)
val dataTypes =
Seq(
StringType, BinaryType, NullType, BooleanType,
ByteType, ShortType, IntegerType, LongType,
FloatType, DoubleType, DecimalType(25, 5), DecimalType(6, 5),
DateType, TimestampType,
ArrayType(IntegerType), MapType(StringType, LongType), struct,
new MyDenseVectorUDT())
val fields = dataTypes.zipWithIndex.map { case (dataType, index) =>
StructField(s"col$index", dataType, nullable = true)
}
val schema = StructType(fields)
val constantValues =
Seq(
"a string in binary".getBytes(StandardCharsets.UTF_8),
null,
true,
1.toByte,
2.toShort,
3,
Long.MaxValue,
0.25.toFloat,
0.75,
new java.math.BigDecimal(s"1234.23456"),
new java.math.BigDecimal(s"1.23456"),
java.sql.Date.valueOf("2015-01-01"),
java.sql.Timestamp.valueOf("2015-01-01 23:50:59.123"),
Seq(2, 3, 4),
Map("a string" -> 2000L),
Row(4.75.toFloat, Seq(false, true)),
new MyDenseVector(Array(0.25, 2.25, 4.25)))
val data =
Row.fromSeq(Seq("Spark " + spark.sparkContext.version) ++ constantValues) :: Nil
// Data generated by previous versions.
// scalastyle:off
val existingJSONData =
"""{"col0":"Spark 1.2.2","col1":"YSBzdHJpbmcgaW4gYmluYXJ5","col3":true,"col4":1,"col5":2,"col6":3,"col7":9223372036854775807,"col8":0.25,"col9":0.75,"col10":1234.23456,"col11":1.23456,"col12":"2015-01-01","col13":"2015-01-01 23:50:59.123","col14":[2,3,4],"col15":{"a string":2000},"col16":{"f1":4.75,"f2":[false,true]},"col17":[0.25,2.25,4.25]}""" ::
"""{"col0":"Spark 1.3.1","col1":"YSBzdHJpbmcgaW4gYmluYXJ5","col3":true,"col4":1,"col5":2,"col6":3,"col7":9223372036854775807,"col8":0.25,"col9":0.75,"col10":1234.23456,"col11":1.23456,"col12":"2015-01-01","col13":"2015-01-01 23:50:59.123","col14":[2,3,4],"col15":{"a string":2000},"col16":{"f1":4.75,"f2":[false,true]},"col17":[0.25,2.25,4.25]}""" ::
"""{"col0":"Spark 1.3.1","col1":"YSBzdHJpbmcgaW4gYmluYXJ5","col3":true,"col4":1,"col5":2,"col6":3,"col7":9223372036854775807,"col8":0.25,"col9":0.75,"col10":1234.23456,"col11":1.23456,"col12":"2015-01-01","col13":"2015-01-01 23:50:59.123","col14":[2,3,4],"col15":{"a string":2000},"col16":{"f1":4.75,"f2":[false,true]},"col17":[0.25,2.25,4.25]}""" ::
"""{"col0":"Spark 1.4.1","col1":"YSBzdHJpbmcgaW4gYmluYXJ5","col3":true,"col4":1,"col5":2,"col6":3,"col7":9223372036854775807,"col8":0.25,"col9":0.75,"col10":1234.23456,"col11":1.23456,"col12":"2015-01-01","col13":"2015-01-01 23:50:59.123","col14":[2,3,4],"col15":{"a string":2000},"col16":{"f1":4.75,"f2":[false,true]},"col17":[0.25,2.25,4.25]}""" ::
"""{"col0":"Spark 1.4.1","col1":"YSBzdHJpbmcgaW4gYmluYXJ5","col3":true,"col4":1,"col5":2,"col6":3,"col7":9223372036854775807,"col8":0.25,"col9":0.75,"col10":1234.23456,"col11":1.23456,"col12":"2015-01-01","col13":"2015-01-01 23:50:59.123","col14":[2,3,4],"col15":{"a string":2000},"col16":{"f1":4.75,"f2":[false,true]},"col17":[0.25,2.25,4.25]}""" ::
"""{"col0":"Spark 1.5.0","col1":"YSBzdHJpbmcgaW4gYmluYXJ5","col3":true,"col4":1,"col5":2,"col6":3,"col7":9223372036854775807,"col8":0.25,"col9":0.75,"col10":1234.23456,"col11":1.23456,"col12":"2015-01-01","col13":"2015-01-01 23:50:59.123","col14":[2,3,4],"col15":{"a string":2000},"col16":{"f1":4.75,"f2":[false,true]},"col17":[0.25,2.25,4.25]}""" ::
"""{"col0":"Spark 1.5.0","col1":"YSBzdHJpbmcgaW4gYmluYXJ5","col3":true,"col4":1,"col5":2,"col6":3,"col7":9223372036854775807,"col8":0.25,"col9":0.75,"col10":1234.23456,"col11":1.23456,"col12":"16436","col13":"2015-01-01 23:50:59.123","col14":[2,3,4],"col15":{"a string":2000},"col16":{"f1":4.75,"f2":[false,true]},"col17":[0.25,2.25,4.25]}""" :: Nil
// scalastyle:on
// Generate data for the current version.
val df = spark.createDataFrame(spark.sparkContext.parallelize(data, 1), schema)
withTempPath { path =>
df.write.format("json").mode("overwrite").save(path.getCanonicalPath)
// df.toJSON will convert internal rows to external rows first and then generate
// JSON objects. While, df.write.format("json") will write internal rows directly.
val allJSON =
existingJSONData ++
df.toJSON.collect() ++
sparkContext.textFile(path.getCanonicalPath).collect()
Utils.deleteRecursively(path)
sparkContext.parallelize(allJSON, 1).saveAsTextFile(path.getCanonicalPath)
// Read data back with the schema specified.
val col0Values =
Seq(
"Spark 1.2.2",
"Spark 1.3.1",
"Spark 1.3.1",
"Spark 1.4.1",
"Spark 1.4.1",
"Spark 1.5.0",
"Spark 1.5.0",
"Spark " + spark.sparkContext.version,
"Spark " + spark.sparkContext.version)
val expectedResult = col0Values.map { v =>
Row.fromSeq(Seq(v) ++ constantValues)
}
checkAnswer(
spark.read.format("json").schema(schema).load(path.getCanonicalPath),
expectedResult
)
}
}
test("SPARK-11544 test pathfilter") {
withTempPath { dir =>
val path = dir.getCanonicalPath
val df = spark.range(2)
df.write.json(path + "/p=1")
df.write.json(path + "/p=2")
assert(spark.read.json(path).count() === 4)
val extraOptions = Map(
"mapred.input.pathFilter.class" -> classOf[TestFileFilter].getName,
"mapreduce.input.pathFilter.class" -> classOf[TestFileFilter].getName
)
assert(spark.read.options(extraOptions).json(path).count() === 2)
}
}
test("SPARK-12057 additional corrupt records do not throw exceptions") {
// Test if we can query corrupt records.
withSQLConf(SQLConf.COLUMN_NAME_OF_CORRUPT_RECORD.key -> "_unparsed") {
withTempView("jsonTable") {
val schema = StructType(
StructField("_unparsed", StringType, true) ::
StructField("dummy", StringType, true) :: Nil)
{
// We need to make sure we can infer the schema.
val jsonDF = spark.read.json(additionalCorruptRecords)
assert(jsonDF.schema === schema)
}
{
val jsonDF = spark.read.schema(schema).json(additionalCorruptRecords)
jsonDF.createOrReplaceTempView("jsonTable")
// In HiveContext, backticks should be used to access columns starting with a underscore.
checkAnswer(
sql(
"""
|SELECT dummy, _unparsed
|FROM jsonTable
""".stripMargin),
Row("test", null) ::
Row(null, """[1,2,3]""") ::
Row(null, """":"test", "a":1}""") ::
Row(null, """42""") ::
Row(null, """ ","ian":"test"}""") :: Nil
)
}
}
}
}
test("Parse JSON rows having an array type and a struct type in the same field.") {
withTempDir { dir =>
val dir = Utils.createTempDir()
dir.delete()
val path = dir.getCanonicalPath
arrayAndStructRecords.map(record => record.replaceAll("\\n", " ")).write.text(path)
val schema =
StructType(
StructField("a", StructType(
StructField("b", StringType) :: Nil
)) :: Nil)
val jsonDF = spark.read.schema(schema).json(path)
assert(jsonDF.count() == 2)
}
}
test("SPARK-12872 Support to specify the option for compression codec") {
withTempDir { dir =>
val dir = Utils.createTempDir()
dir.delete()
val path = dir.getCanonicalPath
primitiveFieldAndType.map(record => record.replaceAll("\\n", " ")).write.text(path)
val jsonDF = spark.read.json(path)
val jsonDir = new File(dir, "json").getCanonicalPath
jsonDF.coalesce(1).write
.format("json")
.option("compression", "gZiP")
.save(jsonDir)
val compressedFiles = new File(jsonDir).listFiles()
assert(compressedFiles.exists(_.getName.endsWith(".json.gz")))
val jsonCopy = spark.read
.format("json")
.load(jsonDir)
assert(jsonCopy.count == jsonDF.count)
val jsonCopySome = jsonCopy.selectExpr("string", "long", "boolean")
val jsonDFSome = jsonDF.selectExpr("string", "long", "boolean")
checkAnswer(jsonCopySome, jsonDFSome)
}
}
test("SPARK-13543 Write the output as uncompressed via option()") {
val extraOptions = Map[String, String](
"mapreduce.output.fileoutputformat.compress" -> "true",
"mapreduce.output.fileoutputformat.compress.type" -> CompressionType.BLOCK.toString,
"mapreduce.output.fileoutputformat.compress.codec" -> classOf[GzipCodec].getName,
"mapreduce.map.output.compress" -> "true",
"mapreduce.map.output.compress.codec" -> classOf[GzipCodec].getName
)
withTempDir { dir =>
val dir = Utils.createTempDir()
dir.delete()
val path = dir.getCanonicalPath
primitiveFieldAndType.map(record => record.replaceAll("\\n", " ")).write.text(path)
val jsonDF = spark.read.json(path)
val jsonDir = new File(dir, "json").getCanonicalPath
jsonDF.coalesce(1).write
.format("json")
.option("compression", "none")
.options(extraOptions)
.save(jsonDir)
val compressedFiles = new File(jsonDir).listFiles()
assert(compressedFiles.exists(!_.getName.endsWith(".json.gz")))
val jsonCopy = spark.read
.format("json")
.options(extraOptions)
.load(jsonDir)
assert(jsonCopy.count == jsonDF.count)
val jsonCopySome = jsonCopy.selectExpr("string", "long", "boolean")
val jsonDFSome = jsonDF.selectExpr("string", "long", "boolean")
checkAnswer(jsonCopySome, jsonDFSome)
}
}
test("Casting long as timestamp") {
withTempView("jsonTable") {
val schema = (new StructType).add("ts", TimestampType)
val jsonDF = spark.read.schema(schema).json(timestampAsLong)
jsonDF.createOrReplaceTempView("jsonTable")
checkAnswer(
sql("select ts from jsonTable"),
Row(java.sql.Timestamp.valueOf("2016-01-02 03:04:05"))
)
}
}
test("wide nested json table") {
val nested = (1 to 100).map { i =>
s"""
|"c$i": $i
""".stripMargin
}.mkString(", ")
val json = s"""
|{"a": [{$nested}], "b": [{$nested}]}
""".stripMargin
val df = spark.read.json(Seq(json).toDS())
assert(df.schema.size === 2)
df.collect()
}
test("Write dates correctly with dateFormat option") {
val customSchema = new StructType(Array(StructField("date", DateType, true)))
withTempDir { dir =>
// With dateFormat option.
val datesWithFormatPath = s"${dir.getCanonicalPath}/datesWithFormat.json"
val datesWithFormat = spark.read
.schema(customSchema)
.option("dateFormat", "dd/MM/yyyy HH:mm")
.json(datesRecords)
datesWithFormat.write
.format("json")
.option("dateFormat", "yyyy/MM/dd")
.save(datesWithFormatPath)
// This will load back the dates as string.
val stringSchema = StructType(StructField("date", StringType, true) :: Nil)
val stringDatesWithFormat = spark.read
.schema(stringSchema)
.json(datesWithFormatPath)
val expectedStringDatesWithFormat = Seq(
Row("2015/08/26"),
Row("2014/10/27"),
Row("2016/01/28"))
checkAnswer(stringDatesWithFormat, expectedStringDatesWithFormat)
}
}
test("Write timestamps correctly with timestampFormat option") {
val customSchema = new StructType(Array(StructField("date", TimestampType, true)))
withTempDir { dir =>
// With dateFormat option.
val timestampsWithFormatPath = s"${dir.getCanonicalPath}/timestampsWithFormat.json"
val timestampsWithFormat = spark.read
.schema(customSchema)
.option("timestampFormat", "dd/MM/yyyy HH:mm")
.json(datesRecords)
timestampsWithFormat.write
.format("json")
.option("timestampFormat", "yyyy/MM/dd HH:mm")
.save(timestampsWithFormatPath)
// This will load back the timestamps as string.
val stringSchema = StructType(StructField("date", StringType, true) :: Nil)
val stringTimestampsWithFormat = spark.read
.schema(stringSchema)
.json(timestampsWithFormatPath)
val expectedStringDatesWithFormat = Seq(
Row("2015/08/26 18:00"),
Row("2014/10/27 18:30"),
Row("2016/01/28 20:00"))
checkAnswer(stringTimestampsWithFormat, expectedStringDatesWithFormat)
}
}
test("Write timestamps correctly with timestampFormat option and timeZone option") {
val customSchema = new StructType(Array(StructField("date", TimestampType, true)))
withTempDir { dir =>
// With dateFormat option and timeZone option.
val timestampsWithFormatPath = s"${dir.getCanonicalPath}/timestampsWithFormat.json"
val timestampsWithFormat = spark.read
.schema(customSchema)
.option("timestampFormat", "dd/MM/yyyy HH:mm")
.json(datesRecords)
timestampsWithFormat.write
.format("json")
.option("timestampFormat", "yyyy/MM/dd HH:mm")
.option(DateTimeUtils.TIMEZONE_OPTION, "UTC")
.save(timestampsWithFormatPath)
// This will load back the timestamps as string.
val stringSchema = StructType(StructField("date", StringType, true) :: Nil)
val stringTimestampsWithFormat = spark.read
.schema(stringSchema)
.json(timestampsWithFormatPath)
val expectedStringDatesWithFormat = Seq(
Row("2015/08/27 01:00"),
Row("2014/10/28 01:30"),
Row("2016/01/29 04:00"))
checkAnswer(stringTimestampsWithFormat, expectedStringDatesWithFormat)
val readBack = spark.read
.schema(customSchema)
.option("timestampFormat", "yyyy/MM/dd HH:mm")
.option(DateTimeUtils.TIMEZONE_OPTION, "UTC")
.json(timestampsWithFormatPath)
checkAnswer(readBack, timestampsWithFormat)
}
}
test("SPARK-18433: Improve DataSource option keys to be more case-insensitive") {
val records = Seq("""{"a": 3, "b": 1.1}""", """{"a": 3.1, "b": 0.000001}""").toDS()
val schema = StructType(
StructField("a", DecimalType(21, 1), true) ::
StructField("b", DecimalType(7, 6), true) :: Nil)
val df1 = spark.read.option("prefersDecimal", "true").json(records)
assert(df1.schema == schema)
val df2 = spark.read.option("PREfersdecimaL", "true").json(records)
assert(df2.schema == schema)
}
test("SPARK-18352: Parse normal multi-line JSON files (compressed)") {
withTempPath { dir =>
val path = dir.getCanonicalPath
primitiveFieldAndType
.toDF("value")
.write
.option("compression", "GzIp")
.text(path)
assert(new File(path).listFiles().exists(_.getName.endsWith(".gz")))
val jsonDF = spark.read.option("multiLine", true).json(path)
val jsonDir = new File(dir, "json").getCanonicalPath
jsonDF.coalesce(1).write
.option("compression", "gZiP")
.json(jsonDir)
assert(new File(jsonDir).listFiles().exists(_.getName.endsWith(".json.gz")))
val originalData = spark.read.json(primitiveFieldAndType)
checkAnswer(jsonDF, originalData)
checkAnswer(spark.read.schema(originalData.schema).json(jsonDir), originalData)
}
}
test("SPARK-18352: Parse normal multi-line JSON files (uncompressed)") {
withTempPath { dir =>
val path = dir.getCanonicalPath
primitiveFieldAndType
.toDF("value")
.write
.text(path)
val jsonDF = spark.read.option("multiLine", true).json(path)
val jsonDir = new File(dir, "json").getCanonicalPath
jsonDF.coalesce(1).write.json(jsonDir)
val compressedFiles = new File(jsonDir).listFiles()
assert(compressedFiles.exists(_.getName.endsWith(".json")))
val originalData = spark.read.json(primitiveFieldAndType)
checkAnswer(jsonDF, originalData)
checkAnswer(spark.read.schema(originalData.schema).json(jsonDir), originalData)
}
}
test("SPARK-18352: Expect one JSON document per file") {
// the json parser terminates as soon as it sees a matching END_OBJECT or END_ARRAY token.
// this might not be the optimal behavior but this test verifies that only the first value
// is parsed and the rest are discarded.
// alternatively the parser could continue parsing following objects, which may further reduce
// allocations by skipping the line reader entirely
withTempPath { dir =>
val path = dir.getCanonicalPath
spark
.createDataFrame(Seq(Tuple1("{}{invalid}")))
.coalesce(1)
.write
.text(path)
val jsonDF = spark.read.option("multiLine", true).json(path)
// no corrupt record column should be created
assert(jsonDF.schema === StructType(Seq()))
// only the first object should be read
assert(jsonDF.count() === 1)
}
}
test("SPARK-18352: Handle multi-line corrupt documents (PERMISSIVE)") {
withTempPath { dir =>
val path = dir.getCanonicalPath
val corruptRecordCount = additionalCorruptRecords.count().toInt
assert(corruptRecordCount === 5)
additionalCorruptRecords
.toDF("value")
// this is the minimum partition count that avoids hash collisions
.repartition(corruptRecordCount * 4, F.hash($"value"))
.write
.text(path)
val jsonDF = spark.read.option("multiLine", true).option("mode", "PERMISSIVE").json(path)
assert(jsonDF.count() === corruptRecordCount)
assert(jsonDF.schema === new StructType()
.add("_corrupt_record", StringType)
.add("dummy", StringType))
val counts = jsonDF
.join(
additionalCorruptRecords.toDF("value"),
F.regexp_replace($"_corrupt_record", "(^\\\\s+|\\\\s+$)", "") === F.trim($"value"),
"outer")
.agg(
F.count($"dummy").as("valid"),
F.count($"_corrupt_record").as("corrupt"),
F.count("*").as("count"))
checkAnswer(counts, Row(1, 4, 6))
}
}
test("SPARK-19641: Handle multi-line corrupt documents (DROPMALFORMED)") {
withTempPath { dir =>
val path = dir.getCanonicalPath
val corruptRecordCount = additionalCorruptRecords.count().toInt
assert(corruptRecordCount === 5)
additionalCorruptRecords
.toDF("value")
// this is the minimum partition count that avoids hash collisions
.repartition(corruptRecordCount * 4, F.hash($"value"))
.write
.text(path)
val jsonDF = spark.read.option("multiLine", true).option("mode", "DROPMALFORMED").json(path)
checkAnswer(jsonDF, Seq(Row("test")))
}
}
test("SPARK-18352: Handle multi-line corrupt documents (FAILFAST)") {
withTempPath { dir =>
val path = dir.getCanonicalPath
val corruptRecordCount = additionalCorruptRecords.count().toInt
assert(corruptRecordCount === 5)
additionalCorruptRecords
.toDF("value")
// this is the minimum partition count that avoids hash collisions
.repartition(corruptRecordCount * 4, F.hash($"value"))
.write
.text(path)
val schema = new StructType().add("dummy", StringType)
// `FAILFAST` mode should throw an exception for corrupt records.
val exceptionOne = intercept[SparkException] {
spark.read
.option("multiLine", true)
.option("mode", "FAILFAST")
.json(path)
}
assert(exceptionOne.getMessage.contains("Malformed records are detected in schema " +
"inference. Parse Mode: FAILFAST."))
val exceptionTwo = intercept[SparkException] {
spark.read
.option("multiLine", true)
.option("mode", "FAILFAST")
.schema(schema)
.json(path)
.collect()
}
assert(exceptionTwo.getMessage.contains("Malformed records are detected in record " +
"parsing. Parse Mode: FAILFAST."))
}
}
test("Throw an exception if a `columnNameOfCorruptRecord` field violates requirements") {
val columnNameOfCorruptRecord = "_unparsed"
val schema = StructType(
StructField(columnNameOfCorruptRecord, IntegerType, true) ::
StructField("a", StringType, true) ::
StructField("b", StringType, true) ::
StructField("c", StringType, true) :: Nil)
val errMsg = intercept[AnalysisException] {
spark.read
.option("mode", "Permissive")
.option("columnNameOfCorruptRecord", columnNameOfCorruptRecord)
.schema(schema)
.json(corruptRecords)
}.getMessage
assert(errMsg.startsWith("The field for corrupt records must be string type and nullable"))
// We use `PERMISSIVE` mode by default if invalid string is given.
withTempPath { dir =>
val path = dir.getCanonicalPath
corruptRecords.toDF("value").write.text(path)
val errMsg = intercept[AnalysisException] {
spark.read
.option("mode", "permm")
.option("columnNameOfCorruptRecord", columnNameOfCorruptRecord)
.schema(schema)
.json(path)
.collect
}.getMessage
assert(errMsg.startsWith("The field for corrupt records must be string type and nullable"))
}
}
test("SPARK-18772: Parse special floats correctly") {
val jsons = Seq(
"""{"a": "NaN"}""",
"""{"a": "Infinity"}""",
"""{"a": "-Infinity"}""")
// positive cases
val checks: Seq[Double => Boolean] = Seq(
_.isNaN,
_.isPosInfinity,
_.isNegInfinity)
Seq(FloatType, DoubleType).foreach { dt =>
jsons.zip(checks).foreach { case (json, check) =>
val ds = spark.read
.schema(StructType(Seq(StructField("a", dt))))
.json(Seq(json).toDS())
.select($"a".cast(DoubleType)).as[Double]
assert(check(ds.first()))
}
}
// negative cases
Seq(FloatType, DoubleType).foreach { dt =>
val lowerCasedJsons = jsons.map(_.toLowerCase(Locale.ROOT))
// The special floats are case-sensitive so these cases below throw exceptions.
lowerCasedJsons.foreach { lowerCasedJson =>
val e = intercept[SparkException] {
spark.read
.option("mode", "FAILFAST")
.schema(StructType(Seq(StructField("a", dt))))
.json(Seq(lowerCasedJson).toDS())
.collect()
}
assert(e.getMessage.contains("Cannot parse"))
}
}
}
test("SPARK-21610: Corrupt records are not handled properly when creating a dataframe " +
"from a file") {
withTempPath { dir =>
val path = dir.getCanonicalPath
val data =
"""{"field": 1}
|{"field": 2}
|{"field": "3"}""".stripMargin
Seq(data).toDF().repartition(1).write.text(path)
val schema = new StructType().add("field", ByteType).add("_corrupt_record", StringType)
// negative cases
val msg = intercept[AnalysisException] {
spark.read.schema(schema).json(path).select("_corrupt_record").collect()
}.getMessage
assert(msg.contains("only include the internal corrupt record column"))
// workaround
val df = spark.read.schema(schema).json(path).cache()
assert(df.filter($"_corrupt_record".isNotNull).count() == 1)
assert(df.filter($"_corrupt_record".isNull).count() == 2)
checkAnswer(
df.select("_corrupt_record"),
Row(null) :: Row(null) :: Row("{\\"field\\": \\"3\\"}") :: Nil
)
}
}
def testLineSeparator(lineSep: String): Unit = {
test(s"SPARK-21289: Support line separator - lineSep: '$lineSep'") {
// Read
val data =
s"""
| {"f":
|"a", "f0": 1}$lineSep{"f":
|
|"c", "f0": 2}$lineSep{"f": "d", "f0": 3}
""".stripMargin
val dataWithTrailingLineSep = s"$data$lineSep"
Seq(data, dataWithTrailingLineSep).foreach { lines =>
withTempPath { path =>
Files.write(path.toPath, lines.getBytes(StandardCharsets.UTF_8))
val df = spark.read.option("lineSep", lineSep).json(path.getAbsolutePath)
val expectedSchema =
StructType(StructField("f", StringType) :: StructField("f0", LongType) :: Nil)
checkAnswer(df, Seq(("a", 1), ("c", 2), ("d", 3)).toDF())
assert(df.schema === expectedSchema)
}
}
// Write
withTempPath { path =>
Seq("a", "b", "c").toDF("value").coalesce(1)
.write.option("lineSep", lineSep).json(path.getAbsolutePath)
val partFile = TestUtils.recursiveList(path).filter(f => f.getName.startsWith("part-")).head
val readBack = new String(Files.readAllBytes(partFile.toPath), StandardCharsets.UTF_8)
assert(
readBack === s"""{"value":"a"}$lineSep{"value":"b"}$lineSep{"value":"c"}$lineSep""")
}
// Roundtrip
withTempPath { path =>
val df = Seq("a", "b", "c").toDF()
df.write.option("lineSep", lineSep).json(path.getAbsolutePath)
val readBack = spark.read.option("lineSep", lineSep).json(path.getAbsolutePath)
checkAnswer(df, readBack)
}
}
}
// scalastyle:off nonascii
Seq("|", "^", "::", "!!!@3", 0x1E.toChar.toString, "아").foreach { lineSep =>
testLineSeparator(lineSep)
}
// scalastyle:on nonascii
test("""SPARK-21289: Support line separator - default value \\r, \\r\\n and \\n""") {
val data =
"{\\"f\\": \\"a\\", \\"f0\\": 1}\\r{\\"f\\": \\"c\\", \\"f0\\": 2}\\r\\n{\\"f\\": \\"d\\", \\"f0\\": 3}\\n"
withTempPath { path =>
Files.write(path.toPath, data.getBytes(StandardCharsets.UTF_8))
val df = spark.read.json(path.getAbsolutePath)
val expectedSchema =
StructType(StructField("f", StringType) :: StructField("f0", LongType) :: Nil)
checkAnswer(df, Seq(("a", 1), ("c", 2), ("d", 3)).toDF())
assert(df.schema === expectedSchema)
}
}
test("SPARK-23849: schema inferring touches less data if samplingRatio < 1.0") {
// Set default values for the DataSource parameters to make sure
// that whole test file is mapped to only one partition. This will guarantee
// reliable sampling of the input file.
withSQLConf(
SQLConf.FILES_MAX_PARTITION_BYTES.key -> (128 * 1024 * 1024).toString,
SQLConf.FILES_OPEN_COST_IN_BYTES.key -> (4 * 1024 * 1024).toString
)(withTempPath { path =>
val ds = sampledTestData.coalesce(1)
ds.write.text(path.getAbsolutePath)
val readback = spark.read.option("samplingRatio", 0.1).json(path.getCanonicalPath)
assert(readback.schema == new StructType().add("f1", LongType))
})
}
test("SPARK-23849: usage of samplingRatio while parsing a dataset of strings") {
val ds = sampledTestData.coalesce(1)
val readback = spark.read.option("samplingRatio", 0.1).json(ds)
assert(readback.schema == new StructType().add("f1", LongType))
}
test("SPARK-23849: samplingRatio is out of the range (0, 1.0]") {
val ds = spark.range(0, 100, 1, 1).map(_.toString)
val errorMsg0 = intercept[IllegalArgumentException] {
spark.read.option("samplingRatio", -1).json(ds)
}.getMessage
assert(errorMsg0.contains("samplingRatio (-1.0) should be greater than 0"))
val errorMsg1 = intercept[IllegalArgumentException] {
spark.read.option("samplingRatio", 0).json(ds)
}.getMessage
assert(errorMsg1.contains("samplingRatio (0.0) should be greater than 0"))
val sampled = spark.read.option("samplingRatio", 1.0).json(ds)
assert(sampled.count() == ds.count())
}
test("SPARK-23723: json in UTF-16 with BOM") {
val fileName = "test-data/utf16WithBOM.json"
val schema = new StructType().add("firstName", StringType).add("lastName", StringType)
val jsonDF = spark.read.schema(schema)
.option("multiline", "true")
.option("encoding", "UTF-16")
.json(testFile(fileName))
checkAnswer(jsonDF, Seq(Row("Chris", "Baird"), Row("Doug", "Rood")))
}
test("SPARK-23723: multi-line json in UTF-32BE with BOM") {
val fileName = "test-data/utf32BEWithBOM.json"
val schema = new StructType().add("firstName", StringType).add("lastName", StringType)
val jsonDF = spark.read.schema(schema)
.option("multiline", "true")
.json(testFile(fileName))
checkAnswer(jsonDF, Seq(Row("Chris", "Baird")))
}
test("SPARK-23723: Use user's encoding in reading of multi-line json in UTF-16LE") {
val fileName = "test-data/utf16LE.json"
val schema = new StructType().add("firstName", StringType).add("lastName", StringType)
val jsonDF = spark.read.schema(schema)
.option("multiline", "true")
.options(Map("encoding" -> "UTF-16LE"))
.json(testFile(fileName))
checkAnswer(jsonDF, Seq(Row("Chris", "Baird")))
}
test("SPARK-23723: Unsupported encoding name") {
val invalidCharset = "UTF-128"
val exception = intercept[UnsupportedCharsetException] {
spark.read
.options(Map("encoding" -> invalidCharset, "lineSep" -> "\\n"))
.json(testFile("test-data/utf16LE.json"))
.count()
}
assert(exception.getMessage.contains(invalidCharset))
}
test("SPARK-23723: checking that the encoding option is case agnostic") {
val fileName = "test-data/utf16LE.json"
val schema = new StructType().add("firstName", StringType).add("lastName", StringType)
val jsonDF = spark.read.schema(schema)
.option("multiline", "true")
.options(Map("encoding" -> "uTf-16lE"))
.json(testFile(fileName))
checkAnswer(jsonDF, Seq(Row("Chris", "Baird")))
}
test("SPARK-23723: specified encoding is not matched to actual encoding") {
val fileName = "test-data/utf16LE.json"
val schema = new StructType().add("firstName", StringType).add("lastName", StringType)
val exception = intercept[SparkException] {
spark.read.schema(schema)
.option("mode", "FAILFAST")
.option("multiline", "true")
.options(Map("encoding" -> "UTF-16BE"))
.json(testFile(fileName))
.count()
}
assert(exception.getMessage.contains("Malformed records are detected in record parsing"))
}
def checkEncoding(expectedEncoding: String, pathToJsonFiles: String,
expectedContent: String): Unit = {
val jsonFiles = new File(pathToJsonFiles)
.listFiles()
.filter(_.isFile)
.filter(_.getName.endsWith("json"))
val actualContent = jsonFiles.map { file =>
new String(Files.readAllBytes(file.toPath), expectedEncoding)
}.mkString.trim
assert(actualContent == expectedContent)
}
test("SPARK-23723: save json in UTF-32BE") {
val encoding = "UTF-32BE"
withTempPath { path =>
val df = spark.createDataset(Seq(("Dog", 42)))
df.write
.options(Map("encoding" -> encoding))
.json(path.getCanonicalPath)
checkEncoding(
expectedEncoding = encoding,
pathToJsonFiles = path.getCanonicalPath,
expectedContent = """{"_1":"Dog","_2":42}""")
}
}
test("SPARK-23723: save json in default encoding - UTF-8") {
withTempPath { path =>
val df = spark.createDataset(Seq(("Dog", 42)))
df.write.json(path.getCanonicalPath)
checkEncoding(
expectedEncoding = "UTF-8",
pathToJsonFiles = path.getCanonicalPath,
expectedContent = """{"_1":"Dog","_2":42}""")
}
}
test("SPARK-23723: wrong output encoding") {
val encoding = "UTF-128"
val exception = intercept[SparkException] {
withTempPath { path =>
val df = spark.createDataset(Seq((0)))
df.write
.options(Map("encoding" -> encoding))
.json(path.getCanonicalPath)
}
}
val baos = new ByteArrayOutputStream()
val ps = new PrintStream(baos, true, StandardCharsets.UTF_8.name())
exception.printStackTrace(ps)
ps.flush()
assert(baos.toString.contains(
"java.nio.charset.UnsupportedCharsetException: UTF-128"))
}
test("SPARK-23723: read back json in UTF-16LE") {
val options = Map("encoding" -> "UTF-16LE", "lineSep" -> "\\n")
withTempPath { path =>
val ds = spark.createDataset(Seq(("a", 1), ("b", 2), ("c", 3))).repartition(2)
ds.write.options(options).json(path.getCanonicalPath)
val readBack = spark
.read
.options(options)
.json(path.getCanonicalPath)
checkAnswer(readBack.toDF(), ds.toDF())
}
}
test("SPARK-23723: write json in UTF-16/32 with multiline off") {
Seq("UTF-16", "UTF-32").foreach { encoding =>
withTempPath { path =>
val ds = spark.createDataset(Seq(("a", 1))).repartition(1)
ds.write
.option("encoding", encoding)
.option("multiline", false)
.json(path.getCanonicalPath)
val jsonFiles = path.listFiles().filter(_.getName.endsWith("json"))
jsonFiles.foreach { jsonFile =>
val readback = Files.readAllBytes(jsonFile.toPath)
val expected = ("""{"_1":"a","_2":1}""" + "\\n").getBytes(Charset.forName(encoding))
assert(readback === expected)
}
}
}
}
def checkReadJson(lineSep: String, encoding: String, inferSchema: Boolean, id: Int): Unit = {
test(s"SPARK-23724: checks reading json in ${encoding} #${id}") {
val schema = new StructType().add("f1", StringType).add("f2", IntegerType)
withTempPath { path =>
val records = List(("a", 1), ("b", 2))
val data = records
.map(rec => s"""{"f1":"${rec._1}", "f2":${rec._2}}""".getBytes(encoding))
.reduce((a1, a2) => a1 ++ lineSep.getBytes(encoding) ++ a2)
val os = new FileOutputStream(path)
os.write(data)
os.close()
val reader = if (inferSchema) {
spark.read
} else {
spark.read.schema(schema)
}
val readBack = reader
.option("encoding", encoding)
.option("lineSep", lineSep)
.json(path.getCanonicalPath)
checkAnswer(readBack, records.map(rec => Row(rec._1, rec._2)))
}
}
}
// scalastyle:off nonascii
List(
(0, "|", "UTF-8", false),
(1, "^", "UTF-16BE", true),
(2, "::", "ISO-8859-1", true),
(3, "!!!@3", "UTF-32LE", false),
(4, 0x1E.toChar.toString, "UTF-8", true),
(5, "아", "UTF-32BE", false),
(6, "куку", "CP1251", true),
(7, "sep", "utf-8", false),
(8, "\\r\\n", "UTF-16LE", false),
(9, "\\r\\n", "utf-16be", true),
(10, "\\u000d\\u000a", "UTF-32BE", false),
(11, "\\u000a\\u000d", "UTF-8", true),
(12, "===", "US-ASCII", false),
(13, "$^+", "utf-32le", true)
).foreach {
case (testNum, sep, encoding, inferSchema) => checkReadJson(sep, encoding, inferSchema, testNum)
}
// scalastyle:on nonascii
test("SPARK-23724: lineSep should be set if encoding if different from UTF-8") {
val encoding = "UTF-16LE"
val exception = intercept[IllegalArgumentException] {
spark.read
.options(Map("encoding" -> encoding))
.json(testFile("test-data/utf16LE.json"))
.count()
}
assert(exception.getMessage.contains(
s"""The lineSep option must be specified for the $encoding encoding"""))
}
private val badJson = "\\u0000\\u0000\\u0000A\\u0001AAA"
test("SPARK-23094: permissively read JSON file with leading nulls when multiLine is enabled") {
withTempPath { tempDir =>
val path = tempDir.getAbsolutePath
Seq(badJson + """{"a":1}""").toDS().write.text(path)
val expected = s"""${badJson}{"a":1}\\n"""
val schema = new StructType().add("a", IntegerType).add("_corrupt_record", StringType)
val df = spark.read.format("json")
.option("mode", "PERMISSIVE")
.option("multiLine", true)
.option("encoding", "UTF-8")
.schema(schema).load(path)
checkAnswer(df, Row(null, expected))
}
}
test("SPARK-23094: permissively read JSON file with leading nulls when multiLine is disabled") {
withTempPath { tempDir =>
val path = tempDir.getAbsolutePath
Seq(badJson, """{"a":1}""").toDS().write.text(path)
val schema = new StructType().add("a", IntegerType).add("_corrupt_record", StringType)
val df = spark.read.format("json")
.option("mode", "PERMISSIVE")
.option("multiLine", false)
.option("encoding", "UTF-8")
.schema(schema).load(path)
checkAnswer(df, Seq(Row(1, null), Row(null, badJson)))
}
}
test("SPARK-23094: permissively parse a dataset contains JSON with leading nulls") {
checkAnswer(
spark.read.option("mode", "PERMISSIVE").option("encoding", "UTF-8").json(Seq(badJson).toDS()),
Row(badJson))
}
test("SPARK-23772 ignore column of all null values or empty array during schema inference") {
withTempPath { tempDir =>
val path = tempDir.getAbsolutePath
// primitive types
Seq(
"""{"a":null, "b":1, "c":3.0}""",
"""{"a":null, "b":null, "c":"string"}""",
"""{"a":null, "b":null, "c":null}""")
.toDS().write.text(path)
var df = spark.read.format("json")
.option("dropFieldIfAllNull", true)
.load(path)
var expectedSchema = new StructType()
.add("b", LongType).add("c", StringType)
assert(df.schema === expectedSchema)
checkAnswer(df, Row(1, "3.0") :: Row(null, "string") :: Row(null, null) :: Nil)
// arrays
Seq(
"""{"a":[2, 1], "b":[null, null], "c":null, "d":[[], [null]], "e":[[], null, [[]]]}""",
"""{"a":[null], "b":[null], "c":[], "d":[null, []], "e":null}""",
"""{"a":null, "b":null, "c":[], "d":null, "e":[null, [], null]}""")
.toDS().write.mode("overwrite").text(path)
df = spark.read.format("json")
.option("dropFieldIfAllNull", true)
.load(path)
expectedSchema = new StructType()
.add("a", ArrayType(LongType))
assert(df.schema === expectedSchema)
checkAnswer(df, Row(Array(2, 1)) :: Row(Array(null)) :: Row(null) :: Nil)
// structs
Seq(
"""{"a":{"a1": 1, "a2":"string"}, "b":{}}""",
"""{"a":{"a1": 2, "a2":null}, "b":{"b1":[null]}}""",
"""{"a":null, "b":null}""")
.toDS().write.mode("overwrite").text(path)
df = spark.read.format("json")
.option("dropFieldIfAllNull", true)
.load(path)
expectedSchema = new StructType()
.add("a", StructType(StructField("a1", LongType) :: StructField("a2", StringType)
:: Nil))
assert(df.schema === expectedSchema)
checkAnswer(df, Row(Row(1, "string")) :: Row(Row(2, null)) :: Row(null) :: Nil)
}
}
test("SPARK-24190: restrictions for JSONOptions in read") {
for (encoding <- Set("UTF-16", "UTF-32")) {
val exception = intercept[IllegalArgumentException] {
spark.read
.option("encoding", encoding)
.option("multiLine", false)
.json(testFile("test-data/utf16LE.json"))
.count()
}
assert(exception.getMessage.contains("encoding must not be included in the blacklist"))
}
}
test("count() for malformed input") {
def countForMalformedJSON(expected: Long, input: Seq[String]): Unit = {
val schema = new StructType().add("a", StringType)
val strings = spark.createDataset(input)
val df = spark.read.schema(schema).json(strings)
assert(df.count() == expected)
}
def checkCount(expected: Long): Unit = {
val validRec = """{"a":"b"}"""
val inputs = Seq(
Seq("{-}", validRec),
Seq(validRec, "?"),
Seq("}", validRec),
Seq(validRec, """{"a": [1, 2, 3]}"""),
Seq("""{"a": {"a": "b"}}""", validRec)
)
inputs.foreach { input =>
countForMalformedJSON(expected, input)
}
}
checkCount(2)
countForMalformedJSON(0, Seq(""))
}
test("SPARK-26745: count() for non-multiline input with empty lines") {
withTempPath { tempPath =>
val path = tempPath.getCanonicalPath
Seq("""{ "a" : 1 }""", "", """ { "a" : 2 }""", " \\t ")
.toDS()
.repartition(1)
.write
.text(path)
assert(spark.read.json(path).count() === 2)
}
}
private def failedOnEmptyString(dataType: DataType): Unit = {
val df = spark.read.schema(s"a ${dataType.catalogString}")
.option("mode", "FAILFAST").json(Seq("""{"a":""}""").toDS)
val errMessage = intercept[SparkException] {
df.collect()
}.getMessage
assert(errMessage.contains(
s"Failed to parse an empty string for data type ${dataType.catalogString}"))
}
private def emptyString(dataType: DataType, expected: Any): Unit = {
val df = spark.read.schema(s"a ${dataType.catalogString}")
.option("mode", "FAILFAST").json(Seq("""{"a":""}""").toDS)
checkAnswer(df, Row(expected) :: Nil)
}
test("SPARK-25040: empty strings should be disallowed") {
failedOnEmptyString(BooleanType)
failedOnEmptyString(ByteType)
failedOnEmptyString(ShortType)
failedOnEmptyString(IntegerType)
failedOnEmptyString(LongType)
failedOnEmptyString(FloatType)
failedOnEmptyString(DoubleType)
failedOnEmptyString(DecimalType.SYSTEM_DEFAULT)
failedOnEmptyString(TimestampType)
failedOnEmptyString(DateType)
failedOnEmptyString(ArrayType(IntegerType))
failedOnEmptyString(MapType(StringType, IntegerType, true))
failedOnEmptyString(StructType(StructField("f1", IntegerType, true) :: Nil))
emptyString(StringType, "")
emptyString(BinaryType, "".getBytes(StandardCharsets.UTF_8))
}
test("SPARK-25040: allowing empty strings when legacy config is enabled") {
def emptyStringAsNull(dataType: DataType): Unit = {
val df = spark.read.schema(s"a ${dataType.catalogString}")
.option("mode", "FAILFAST").json(Seq("""{"a":""}""").toDS)
checkAnswer(df, Row(null) :: Nil)
}
// Legacy mode prior to Spark 3.0.0
withSQLConf(SQLConf.LEGACY_ALLOW_EMPTY_STRING_IN_JSON.key -> "true") {
emptyStringAsNull(BooleanType)
emptyStringAsNull(ByteType)
emptyStringAsNull(ShortType)
emptyStringAsNull(IntegerType)
emptyStringAsNull(LongType)
failedOnEmptyString(FloatType)
failedOnEmptyString(DoubleType)
failedOnEmptyString(TimestampType)
failedOnEmptyString(DateType)
emptyStringAsNull(DecimalType.SYSTEM_DEFAULT)
emptyStringAsNull(ArrayType(IntegerType))
emptyStringAsNull(MapType(StringType, IntegerType, true))
emptyStringAsNull(StructType(StructField("f1", IntegerType, true) :: Nil))
emptyString(StringType, "")
emptyString(BinaryType, "".getBytes(StandardCharsets.UTF_8))
}
}
test("return partial result for bad records") {
val schema = "a double, b array<int>, c string, _corrupt_record string"
val badRecords = Seq(
"""{"a":"-","b":[0, 1, 2],"c":"abc"}""",
"""{"a":0.1,"b":{},"c":"def"}""").toDS()
val df = spark.read.schema(schema).json(badRecords)
checkAnswer(
df,
Row(null, Array(0, 1, 2), "abc", """{"a":"-","b":[0, 1, 2],"c":"abc"}""") ::
Row(0.1, null, "def", """{"a":0.1,"b":{},"c":"def"}""") :: Nil)
}
test("inferring timestamp type") {
def schemaOf(jsons: String*): StructType = spark.read.json(jsons.toDS).schema
assert(schemaOf(
"""{"a":"2018-12-17T10:11:12.123-01:00"}""",
"""{"a":"2018-12-16T22:23:24.123-02:00"}""") === fromDDL("a timestamp"))
assert(schemaOf("""{"a":"2018-12-17T10:11:12.123-01:00"}""", """{"a":1}""")
=== fromDDL("a string"))
assert(schemaOf("""{"a":"2018-12-17T10:11:12.123-01:00"}""", """{"a":"123"}""")
=== fromDDL("a string"))
assert(schemaOf("""{"a":"2018-12-17T10:11:12.123-01:00"}""", """{"a":null}""")
=== fromDDL("a timestamp"))
assert(schemaOf("""{"a":null}""", """{"a":"2018-12-17T10:11:12.123-01:00"}""")
=== fromDDL("a timestamp"))
}
test("roundtrip for timestamp type inferring") {
val customSchema = new StructType().add("date", TimestampType)
withTempDir { dir =>
val timestampsWithFormatPath = s"${dir.getCanonicalPath}/timestampsWithFormat.json"
val timestampsWithFormat = spark.read
.option("timestampFormat", "dd/MM/yyyy HH:mm")
.json(datesRecords)
assert(timestampsWithFormat.schema === customSchema)
timestampsWithFormat.write
.format("json")
.option("timestampFormat", "yyyy-MM-dd HH:mm:ss")
.option(DateTimeUtils.TIMEZONE_OPTION, "UTC")
.save(timestampsWithFormatPath)
val readBack = spark.read
.option("timestampFormat", "yyyy-MM-dd HH:mm:ss")
.option(DateTimeUtils.TIMEZONE_OPTION, "UTC")
.json(timestampsWithFormatPath)
assert(readBack.schema === customSchema)
checkAnswer(readBack, timestampsWithFormat)
}
}
test("SPARK-30960, SPARK-31641: parse date/timestamp string with legacy format") {
val julianDay = -141704 // 1582-01-01 in Julian calendar
val ds = Seq(
s"{'t': '2020-1-12 3:23:34.12', 'd': '2020-1-12 T', 'd2': '12345', 'd3': '$julianDay'}"
).toDS()
val json = spark.read.schema("t timestamp, d date, d2 date, d3 date").json(ds)
checkAnswer(json, Row(
Timestamp.valueOf("2020-1-12 3:23:34.12"),
Date.valueOf("2020-1-12"),
Date.valueOf(LocalDate.ofEpochDay(12345)),
Date.valueOf("1582-01-01")))
}
test("exception mode for parsing date/timestamp string") {
val ds = Seq("{'t': '2020-01-27T20:06:11.847-0800'}").toDS()
val json = spark.read
.schema("t timestamp")
.option("timestampFormat", "yyyy-MM-dd'T'HH:mm:ss.SSSz")
.json(ds)
withSQLConf(SQLConf.LEGACY_TIME_PARSER_POLICY.key -> "exception") {
val msg = intercept[SparkException] {
json.collect()
}.getCause.getMessage
assert(msg.contains("Fail to parse"))
}
withSQLConf(SQLConf.LEGACY_TIME_PARSER_POLICY.key -> "legacy") {
checkAnswer(json, Row(Timestamp.valueOf("2020-01-27 20:06:11.847")))
}
withSQLConf(SQLConf.LEGACY_TIME_PARSER_POLICY.key -> "corrected") {
checkAnswer(json, Row(null))
}
}
}
class JsonV1Suite extends JsonSuite {
override protected def sparkConf: SparkConf =
super
.sparkConf
.set(SQLConf.USE_V1_SOURCE_LIST, "json")
}
class JsonV2Suite extends JsonSuite {
override protected def sparkConf: SparkConf =
super
.sparkConf
.set(SQLConf.USE_V1_SOURCE_LIST, "")
}
class JsonLegacyTimeParserSuite extends JsonSuite {
override protected def sparkConf: SparkConf =
super
.sparkConf
.set(SQLConf.LEGACY_TIME_PARSER_POLICY, "legacy")
}
|
ConeyLiu/spark
|
sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/json/JsonSuite.scala
|
Scala
|
apache-2.0
| 100,585
|
package scalariform.astselect
import org.scalatest._
import org.scalatest.matchers._
import scalariform.utils.Range
import scalariform.ScalaVersions
// format: OFF
class AstSelectorTest extends FlatSpec with ShouldMatchers {
// Legend:
//
// "|" denotes a zero-width selection before the position
// "$" denotes a single-character-width selection at the position
// "$$$$$$" denotes a multiple-character-width selection at the given positions
" wibble " ~
" | " ~
" $$$$$$ "
" wibble " ~
" | " ~
" $$$$$$ "
" wibble " ~
" | " ~
" $$$$$$ "
" wibble " ~
" | " ~
" $$$$$$ "
" /* foo */ " ~
" $ " ~
" $$$$$$$$$ "
" /* foo */ /* bar */ " ~
" $ " ~
" $$$$$$$$$ "
" class A(n: Int) " ~
" $$$$$$ " ~
" $$$$$$$$$$$$$$$ "
" foo(42) " ~
" $$ " ~
" $$$$$$$ "
" object A { } " ~
" | " ~
" $$$$$$$$$$$$ "
" private def foo = 42 " ~
" $$$$$$$$$ " ~
" $$$$$$$$$$$$$$$$$$$$ "
" if (a) b else c " ~
" $$$ " ~
" $$$$$$$$$$$$$$$ "
" aa(bb + cc, dd * ee) " ~
" $$$$$ " ~
" $$$$$$$ "
" class A[B] " ~
" $ " ~
" $$$$$$$$$$ "
" new Wibble " ~
" $$$$$$ " ~
" $$$$$$$$$$ "
" new Wibble() " ~
" $$$ " ~
" $$$$$$$$$$$$ "
" a + b + c " ~
" $ " ~
" $$$$$ " ~
" $$$$$$$$$ "
" a + b + c " ~
" $$$ " ~
" $$$$$$$$$ "
" x + y * z " ~
" $$$ " ~
" $$$$$ " ~
" $$$$$$$$$ "
" a :: b :: c :: Nil " ~
" $$ " ~
" $$$$$$$$ " ~
" $$$$$$$$$$$$$ " ~
" $$$$$$$$$$$$$$$$$$ "
" a :: b :: Nil ++ Nil " ~
" $$$ " ~
" $$$$$$$$$$ " ~
" $$$$$$$$$$$$$$$ " ~
" $$$$$$$$$$$$$$$$$$$$ "
" a + b :: b + c :: Nil ++ Nil " ~
" $ " ~
" $$$$$ " ~
" $$$$$$$$$$$$$$$$$$$ " ~
" $$$$$$$$$$$$$$$$$$$$$$$$$$$$ "
" i += 10 + 2 " ~
" $ " ~
" $$$$$$$$$$$ "
" i += 10 + 2 " ~
" $$ " ~
" $$$$$$ " ~
" $$$$$$$$$$$ "
" 'a'.bar[X](foo).bizzle(a, b).baz.buzz[T].bozz(12)(15).foo _ " ~
" $$$ " ~
" $$$$$$$$$$$$$$$ " ~
" $$$$$$$$$$$$$$$$$$$$$$$$$$$$ " ~
" $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ " ~
" $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ " ~
" $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ " ~
" $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ "
" a.foo(b).bar(c) " ~
" $ " ~
" $$$$$$$$ " ~
" $$$$$$$$$$$$$$$ "
" def x = 42 " ~
" $$ " ~
" $$$$$$$$$$ "
" x: Int " ~
" $ " ~
" $$$$$$ "
" x = a + b " ~
" $ " ~
" $$$$$$$$$ "
" a match { case b => } " ~
" $ " ~
" $$$$$$$$$$$$$$$$$$$$$ "
" a match { case b => c } " ~
" $$$$$$$$$$$ " ~
" $$$$$$$$$$$$$$$$$$$$$$$ "
" (a, b) " ~
" $$$ " ~
" $$$$$$ "
" for { a <- b; c <- d } yield e " ~
" $$$$$$$ " ~
" $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ "
" { case b ⇒ c } " ~
" $ " ~
" $$$$$$$$$$ "
" for (a ← b) { c } " ~
" $ " ~
" $$$$$ "
" for (a <- b if c) {} " ~
" $ " ~
" $$$$$$$$$$$ "
" def a { b } " ~
" $ " ~
" $$$$$$$$$$$ "
" { case b :: c :: d => } " ~
" $ " ~
" $$$$$$ " ~
" $$$$$$$$$$$ "
" /** a */ class B " ~
" $ " ~
" $$$$$$$$$$$$$$$$ "
" /** a */ class B " ~
" $$$$ " ~
" $$$$$$$$ " ~
" $$$$$$$$$$$$$$$$ "
" /** a */ class B; class C " ~
" $$$$$$$$$$$$$$$$ " ~
" $$$$$$$$$$$$$$$$$$$$$$$$$ "
" val a = { b } " ~
" $ " ~
" $$$$$ " // A bit inconsistent with def's, but maybe OK
" try a catch b " ~
" $ " ~
" $$$$$$$$$$$$$ "
" if (a) b " ~
" $ " ~
" $$$$$$$$ "
" def a = b(c) " ~
" $$$ " ~
" $$$$ "
" def a = { b } " ~
" $ " ~
" $$$$$ "
" for (a <- b) c(d) " ~
" $$$ " ~
" $$$$ "
" def a[B <% C] " ~
" $$ " ~
" $$$$$$ "
" class A[B <% C, D <% E] " ~
" $$ " ~
" $$$$$$ " ~
" $$$$$$$$$$$$$$$$$$$$$$$ "
" sealed class A " ~
" $$$ " ~
" $$$$$$$$$$$$$$ "
" protected[a] val b " ~
" $ " ~
" $$$$$$$$$$$$ "
" evaluating { stack.pop() } should produce [NoSuchElementException] " ~
" $$$$$$$ " ~
" $$$$$$$$$$$ " ~
" $$$$$$$$$$$$$$$ " ~
" $$$$$$$$$$$$$$$$$$$$$$$$$$ " ~
" $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ "
" evaluating { stack.pop() } should produce [NoSuchElementException] " ~
" $$$$$$$$$$$$$$$$$$$$$$ " ~
" $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ " ~
" $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ "
" } wibble " ~
" $ " ~
" $$$$$$ "
" foo.bar.baz.buz() " ~
" $ " ~
" $$$ " ~
" $$$$$$$ " ~
" $$$$$$$$$$$ " ~
" $$$$$$$$$$$$$$$$$ "
" foo.super[bar].baz " ~
" $$$ " ~
" $$$$$$$$$$$$$$ " ~
" $$$$$$$$$$$$$$$$$$ "
" package foo.bar.baz " ~
" $$$ " ~
" $$$$$$$ " ~
" $$$$$$$$$$$ " ~
" $$$$$$$$$$$$$$$$$$$ "
" foo[bar][baz] " ~
" $$$ " ~
" $$$$$$$$ " ~
" $$$$$$$$$$$$$ "
" (!foo) " ~
" $$$ " ~
" $$$$ " ~
" $$$$$$ "
/* TODO: Need AST representation for [] calls
" foo[bar][baz][boz] " ~
" $$$$$$$$ " ~
" $$$$$$$$$$$$$ "
*/
{
implicit val scalaVersion = "2.10.0"
""" s"my name is ?{person.name}." """.replace('?', '$') ~
""" $$$$$$ " """ ~
""" $$$$$$$$$$$ " """ ~
""" $$$$$$$$$$$$$$ " """ ~
""" $$$$$$$$$$$$$$$$$$$$$$$$$$$$$ """
""" xyz"" """ ~
""" $ """ ~
""" $$$ """ ~
""" $$$$$ """
""" s"my name is $bob" """ ~
""" $ """ ~
""" $$$$$$$$$$$$$ """
""" s"my name is $bob." """ ~
""" $ """ ~
""" $$$$ """
}
private def findSelectionRange(s: String): Range = {
val barLocation = s indexOf '|'
if (barLocation >= 0)
Range(barLocation, 0)
else {
val firstDollarLocation = s indexOf '$'
require(firstDollarLocation >= 0, "No selection marker: " + s)
val dollars = s.drop(firstDollarLocation).takeWhile(_ == '$')
Range(firstDollarLocation, dollars.length)
}
}
implicit def stringToTestString(source: String)(implicit scalaVersion: String = ScalaVersions.DEFAULT_VERSION): TestString = new TestString(source, scalaVersion)
class TestString(source: String, scalaVersion: String) {
def ~(initialSelectionDiagram: String) = IntermediateTest(source, initialSelectionDiagram, scalaVersion)
}
case class IntermediateTest(source: String, initialSelectionDiagram: String, scalaVersion: String) {
def ~(finalSelectionDiagram: String): IntermediateTest = {
val initialSelection = findSelectionRange(initialSelectionDiagram)
val actualFinalSelection = AstSelector.expandSelection(source, initialSelection, scalaVersion) getOrElse initialSelection
val expectedFinalSelection = findSelectionRange(finalSelectionDiagram)
("source\\n>>>" + source + "<<<\\n") should "expand\\n>>>" + (initialSelectionDiagram + "<<<\\n to \\n>>>" + finalSelectionDiagram + "<<<\\n") in {
actualFinalSelection should equal (expectedFinalSelection)
}
IntermediateTest(source, initialSelectionDiagram = finalSelectionDiagram, scalaVersion)
}
}
}
|
gawkermedia/scalariform
|
scalariform/src/test/scala/scalariform/astselect/AstSelectorTest.scala
|
Scala
|
mit
| 8,340
|
package io.skysail.domain.resources
import io.skysail.domain.RequestEvent
import io.skysail.domain.app.ApplicationApi
/**
*
*/
abstract class AsyncStaticResource extends AsyncResource[ApplicationApi,Any] {
def getAsync(requestEvent: RequestEvent): Unit
}
|
evandor/skysail-server
|
skysail.domain/src/io/skysail/domain/resources/AsyncStaticResource.scala
|
Scala
|
apache-2.0
| 264
|
package com.jiffey.slick.additions.dialects
import com.jiffey.slick.additions.DatabaseDialect
import com.jiffey.slick.additions.components.PostgreSQL
object PostgreSQLDatabaseDialect extends DatabaseDialect[PostgreSQL]
|
dre1080/slick-additions
|
src/main/scala/com/jiffey/slick/additions/dialects/PostgreSQLDatabaseDialect.scala
|
Scala
|
apache-2.0
| 221
|
package com.obecto.gattakka.genetics.operators
import com.obecto.gattakka.genetics.{Chromosome, Genome}
import com.obecto.gattakka.{IndividualDescriptor,IndividualState, PipelineOperator}
import scala.collection.mutable.ListBuffer
import scala.util.Random
trait MutationBaseOperator extends PipelineOperator {
def rnd: Random = Random
def mutationChance: Double
def killParent: Boolean = true
def apply(genome: Genome): Genome
def apply(snapshot: List[IndividualDescriptor]): List[IndividualDescriptor] = {
val withoutDoomed = snapshot filter (_.state != IndividualState.DoomedToDie)
val mutatedIndividuals: ListBuffer[IndividualDescriptor] = ListBuffer.empty
for (descriptor <- withoutDoomed) {
if (rnd.nextFloat() < mutationChance) {
val genome = descriptor.genome
val newGenome = apply(genome)
if (newGenome != genome) {
if (killParent && descriptor.state != IndividualState.Elite) {
descriptor.state = IndividualState.DoomedToDie
}
mutatedIndividuals += IndividualDescriptor(newGenome)
}
}
}
snapshot ++ mutatedIndividuals.toList
}
}
trait ChromosomeMutationBaseOperator extends PipelineOperator with MutationBaseOperator {
def apply(chromosome: Chromosome): Chromosome
def apply(genome: Genome): Genome = {
var genomeMutationOccured = false
val newChromosomes = genome.chromosomes map { chromosome =>
val newChromosome = apply(chromosome)
if (newChromosome != chromosome) {
genomeMutationOccured = true
newChromosome
} else {
chromosome
}
}
if (genomeMutationOccured) {
new Genome(newChromosomes)
} else {
genome
}
}
}
|
obecto/gattakka
|
src/main/scala/com/obecto/gattakka/genetics/operators/MutationBaseOperator.scala
|
Scala
|
mit
| 1,752
|
/*******************************************************************************
Copyright (c) 2013, KAIST, S-Core.
All rights reserved.
Use is subject to license terms.
This distribution may include materials developed by third parties.
******************************************************************************/
package kr.ac.kaist.jsaf.analysis.typing.models.DOMObject
import kr.ac.kaist.jsaf.analysis.typing.Semantics
import kr.ac.kaist.jsaf.analysis.typing.domain._
import kr.ac.kaist.jsaf.analysis.typing.domain.{BoolFalse => F, BoolTrue => T}
import kr.ac.kaist.jsaf.analysis.typing.models._
import kr.ac.kaist.jsaf.analysis.cfg.{CFG, CFGExpr}
import kr.ac.kaist.jsaf.analysis.typing.{ControlPoint, Helper}
import kr.ac.kaist.jsaf.analysis.typing.domain.Heap
import kr.ac.kaist.jsaf.analysis.typing.domain.Context
import kr.ac.kaist.jsaf.analysis.typing.models.AbsBuiltinFunc
import kr.ac.kaist.jsaf.analysis.typing.models.AbsConstValue
import kr.ac.kaist.jsaf.analysis.typing.AddressManager._
// Modeled based on Mozilla DOM Reference
// https://developer.mozilla.org/en-US/docs/Web/API/XMLHttpRequest
object XMLHttpRequest extends DOM {
private val name = "XMLHttpRequest"
/* predefined locations */
val loc_cons = newSystemRecentLoc(name + "Cons")
val loc_proto = newSystemRecentLoc(name + "Proto")
/* constructor */
private val prop_cons: List[(String, AbsProperty)] = List(
("@class", AbsConstValue(PropValue(AbsString.alpha("Function")))),
("@proto", AbsConstValue(PropValue(ObjectValue(Value(ObjProtoLoc), F, F, F)))),
("@extensible", AbsConstValue(PropValue(BoolTrue))),
("@hasinstance", AbsConstValue(PropValue(Value(NullTop)))),
("@construct", AbsInternalFunc("XMLHttpRequest.constructor")),
("length", AbsConstValue(PropValue(ObjectValue(Value(AbsNumber.alpha(0)), F, F, F)))),
("prototype", AbsConstValue(PropValue(ObjectValue(Value(loc_proto), F, F, F))))
)
/* prorotype */
private val prop_proto: List[(String, AbsProperty)] = List(
("@class", AbsConstValue(PropValue(AbsString.alpha("Object")))),
("@proto", AbsConstValue(PropValue(ObjectValue(ObjProtoLoc, F, F, F)))),
("@extensible", AbsConstValue(PropValue(BoolTrue))),
// Field
("UNSENT", AbsConstValue(PropValue(ObjectValue(AbsNumber.alpha(0), F, T, T)))),
("OPENED", AbsConstValue(PropValue(ObjectValue(AbsNumber.alpha(1), F, T, T)))),
("HEADERS_REICEIVED", AbsConstValue(PropValue(ObjectValue(AbsNumber.alpha(2), F, T, T)))),
("LOADING", AbsConstValue(PropValue(ObjectValue(AbsNumber.alpha(3), F, T, T)))),
("DONE", AbsConstValue(PropValue(ObjectValue(AbsNumber.alpha(4), F, T, T)))),
// API
("abort", AbsBuiltinFunc("XMLHttpRequest.abort", 0)),
("getAllResponseHeaders", AbsBuiltinFunc("XMLHttpRequest.getAllResponseHeaders", 0)),
("getResponseHeader", AbsBuiltinFunc("XMLHttpRequest.getResponseHeader", 1)),
("open", AbsBuiltinFunc("XMLHttpRequest.open", 5)),
("overrideMimeType", AbsBuiltinFunc("XMLHttpRequest.overrideMimeType", 1)),
("send", AbsBuiltinFunc("XMLHttpRequest.send", 1)),
("setRequestHeader", AbsBuiltinFunc("XMLHttpRequest.setRequestHeader", 2))
)
/* global */
private val prop_global: List[(String, AbsProperty)] = List(
(name, AbsConstValue(PropValue(ObjectValue(loc_cons, T, F, T))))
)
/* no constructor */
/* initial property list */
def getInitList(): List[(Loc, List[(String, AbsProperty)])] = List(
(loc_cons, prop_cons), (loc_proto, prop_proto), (GlobalLoc, prop_global)
)
def getSemanticMap(): Map[String, SemanticFun] = {
Map(
// constructor
("XMLHttpRequest.constructor" -> (
(sem: Semantics, h: Heap, ctx: Context, he: Heap, ctxe: Context, cp: ControlPoint, cfg: CFG, fun: String, args: CFGExpr) => {
val lset_this = h(SinglePureLocalLoc)("@this")._1._2._2
// create a new XMLHttpRequest Object
val h_1 = lset_this.foldLeft(h)((_h, l) => {
val newobj = default_getInsList.foldLeft(Helper.NewObject(loc_proto))((obj, prop) =>
obj.update(prop._1, prop._2)
)
_h.update(l, newobj)
})
((Helper.ReturnStore(h_1, Value(lset_this)), ctx), (he, ctxe))
}
)),
("XMLHttpRequest.abort" -> (
(sem: Semantics, h: Heap, ctx: Context, he: Heap, ctxe: Context, cp: ControlPoint, cfg: CFG, fun: String, args: CFGExpr) => {
((Helper.ReturnStore(h, Value(UndefTop)), ctx), (he, ctxe))
}
)),
("XMLHttpRequest.getAllResponseHeaders" -> (
(sem: Semantics, h: Heap, ctx: Context, he: Heap, ctxe: Context, cp: ControlPoint, cfg: CFG, fun: String, args: CFGExpr) => {
((Helper.ReturnStore(h, Value(StrTop) + Value(NullTop)), ctx), (he, ctxe))
}
)),
("XMLHttpRequest.getResponseHeader" -> (
(sem: Semantics, h: Heap, ctx: Context, he: Heap, ctxe: Context, cp: ControlPoint, cfg: CFG, fun: String, args: CFGExpr) => {
/* arguments */
val header = Helper.toString(Helper.toPrimitive(getArgValue(h, ctx, args, "0")))
if(header </ StrBot)
((Helper.ReturnStore(h, Value(UndefTop)), ctx), (he, ctxe))
else
((HeapBot, ContextBot), (he, ctxe))
}
)),
("XMLHttpRequest.open" -> (
(sem: Semantics, h: Heap, ctx: Context, he: Heap, ctxe: Context, cp: ControlPoint, cfg: CFG, fun: String, args: CFGExpr) => {
/* arguments */
val method = Helper.toString(Helper.toPrimitive(getArgValue(h, ctx, args, "0")))
val url = Helper.toString(Helper.toPrimitive(getArgValue(h, ctx, args, "1")))
if(method </ StrBot && method </ StrBot) {
val lset_this = h(SinglePureLocalLoc)("@this")._1._2._2
val h_1 = lset_this.foldLeft(h)((_h, l) => {
val newobj = _h(l).update(
"readyState", PropValue(ObjectValue(AbsNumber.alpha(1), T, T, T))).update(
"responseText", PropValue(ObjectValue(Value(StrTop) + Value(NullTop), F, T, T))).update(
"status", PropValue(ObjectValue(UInt, F, T, T))).update(
"statusText", PropValue(ObjectValue(StrTop, F, T, T)))
_h.update(l, newobj)
})
((Helper.ReturnStore(h_1, Value(UndefTop)), ctx), (he, ctxe))
}
else
((HeapBot, ContextBot), (he, ctxe))
}
)),
("XMLHttpRequest.overrideMimeType" -> (
(sem: Semantics, h: Heap, ctx: Context, he: Heap, ctxe: Context, cp: ControlPoint, cfg: CFG, fun: String, args: CFGExpr) => {
/* arguments */
val mimeType = Helper.toString(Helper.toPrimitive(getArgValue(h, ctx, args, "0")))
if(mimeType </ StrBot)
((Helper.ReturnStore(h, Value(UndefTop)), ctx), (he, ctxe))
else
((HeapBot, ContextBot), (he, ctxe))
((Helper.ReturnStore(h, Value(UndefTop)), ctx), (he, ctxe))
}
)),
("XMLHttpRequest.send" -> (
(sem: Semantics, h: Heap, ctx: Context, he: Heap, ctxe: Context, cp: ControlPoint, cfg: CFG, fun: String, args: CFGExpr) => {
/* arguments */
val method = Helper.toString(Helper.toPrimitive(getArgValue(h, ctx, args, "0")))
val url = Helper.toString(Helper.toPrimitive(getArgValue(h, ctx, args, "1")))
if(method </ StrBot && method </ StrBot) {
val lset_this = h(SinglePureLocalLoc)("@this")._1._2._2
val h_1 = lset_this.foldLeft(h)((_h, l) => {
val newobj = _h(l).update(
"readyState", PropValue(ObjectValue(UInt, T, T, T))).update(
"responseText", PropValue(ObjectValue(Value(StrTop) + Value(NullTop), F, T, T))).update(
"status", PropValue(ObjectValue(UInt, F, T, T))).update(
"statusText", PropValue(ObjectValue(StrTop, F, T, T)))
_h.update(l, newobj)
})
((Helper.ReturnStore(h_1, Value(UndefTop)), ctx), (he, ctxe))
}
else
((HeapBot, ContextBot), (he, ctxe))
}
)),
("XMLHttpRequest.setRequestHeader" -> (
(sem: Semantics, h: Heap, ctx: Context, he: Heap, ctxe: Context, cp: ControlPoint, cfg: CFG, fun: String, args: CFGExpr) => {
/* arguments */
val header = Helper.toString(Helper.toPrimitive(getArgValue(h, ctx, args, "0")))
val value = Helper.toString(Helper.toPrimitive(getArgValue(h, ctx, args, "1")))
if(header </ StrBot && value </ StrBot) {
((Helper.ReturnStore(h, Value(UndefTop)), ctx), (he, ctxe))
}
else
((HeapBot, ContextBot), (he, ctxe))
}
))
)
}
def getPreSemanticMap(): Map[String, SemanticFun] = {
Map(
)
}
def getDefMap(): Map[String, AccessFun] = {
Map(
)
}
def getUseMap(): Map[String, AccessFun] = {
Map(
)
}
/* instance */
def getInsList(readyState: PropValue, responseText: PropValue, responseXML: PropValue, status: PropValue,
statusText: PropValue, timeout: PropValue, withCredentials: PropValue): List[(String, PropValue)] = List(
("@class", PropValue(AbsString.alpha("Object"))),
("@proto", PropValue(ObjectValue(loc_proto, F, F, F))),
("@extensible", PropValue(BoolTrue)),
("readyState", readyState),
("responseText", responseText),
("responseXML", responseXML),
("status", status),
("statusText", statusText),
("timeout", timeout),
("withCredentials", withCredentials)
)
override def default_getInsList(): List[(String, PropValue)] = {
val readyState = PropValue(ObjectValue(AbsNumber.alpha(0), T, T, T))
val responseText = PropValue(ObjectValue(AbsString.alpha(""), F, T, T))
val responseXML = PropValue(ObjectValue(NullTop, F, T, T))
val status = PropValue(ObjectValue(AbsNumber.alpha(0), T, T, T))
val statusText = PropValue(ObjectValue(AbsString.alpha(""), F, T, T))
val timeout = PropValue(ObjectValue(UInt, T, T, T))
val withCredentials = PropValue(ObjectValue(BoolFalse, T, T, T))
getInsList(readyState, responseText, responseXML, status, statusText, timeout, withCredentials)
}
}
|
daejunpark/jsaf
|
src/kr/ac/kaist/jsaf/analysis/typing/models/DOMObject/XMLHttpRequest.scala
|
Scala
|
bsd-3-clause
| 10,514
|
package bootstrap.liftweb
import net.liftweb._
import util._
import Helpers._
import common._
import http._
import sitemap._
import Loc._
import net.liftmodules.JQueryModule
import net.liftweb.http.js.jquery._
import code.rest.Downloads
/**
* A class that's instantiated early and run. It allows the application
* to modify lift's environment
*/
class Boot {
def boot {
// where to search snippet
LiftRules.addToPackages("code")
// Build SiteMap
val entries = List(
Menu.i("Home") / "index", // the simple way to declare a menu
Menu.i("Presenter Login") / "presenter"
)
// set the sitemap. Note if you don't want access control for
// each page, just comment this line out.
LiftRules.setSiteMap(SiteMap(entries:_*))
//Show the spinny image when an Ajax call starts
LiftRules.ajaxStart =
Full(() => LiftRules.jsArtifacts.show("ajax-loader").cmd)
// Make the spinny image go away when it ends
LiftRules.ajaxEnd =
Full(() => LiftRules.jsArtifacts.hide("ajax-loader").cmd)
// Force the request to be UTF-8
LiftRules.early.append(_.setCharacterEncoding("UTF-8"))
// Use HTML5 for rendering
LiftRules.htmlProperties.default.set((r: Req) =>
new Html5Properties(r.userAgent))
//Init the jQuery module, see http://liftweb.net/jquery for more information.
LiftRules.jsArtifacts = JQueryArtifacts
JQueryModule.InitParam.JQuery=JQueryModule.JQuery172
JQueryModule.init()
LiftRules.earlyResponse.append { (req: Req) =>
if(Props.mode != Props.RunModes.Development &&
req.path.partPath.headOption == Some("presenter") &&
req.header("X-Forwarded-Proto") != Full("https")) {
val uriAndQuery = req.uri + (req.request.queryString.map(s => "?"+s) openOr "")
val uri = "https://%s%s".format(req.request.serverName, uriAndQuery)
Full(PermRedirectResponse(uri, req, req.cookies: _*))
}
else Empty
}
LiftRules.statelessDispatch.append(Downloads)
LiftRules.securityRules = () => {
SecurityRules(content = Some(ContentSecurityPolicy(
styleSources = List(
ContentSourceRestriction.UnsafeInline,
ContentSourceRestriction.All
),
fontSources = List(
ContentSourceRestriction.All
),
scriptSources = List(
ContentSourceRestriction.UnsafeEval,
ContentSourceRestriction.UnsafeInline,
ContentSourceRestriction.Self
)
)))
}
}
}
|
joescii/type-prog-impress
|
src/main/scala/bootstrap/liftweb/Boot.scala
|
Scala
|
apache-2.0
| 2,529
|
/**
* Copyright (C) 2009-2014 Typesafe Inc. <http://www.typesafe.com>
*/
package akka.remote
import scala.annotation.tailrec
import scala.util.control.NonFatal
import akka.actor.{ VirtualPathContainer, Terminated, Deploy, Props, Nobody, LocalActorRef, InternalActorRef, Address, ActorSystemImpl, ActorRef, ActorPathExtractor, ActorPath, Actor, AddressTerminated }
import akka.event.LoggingAdapter
import akka.dispatch.sysmsg.{ DeathWatchNotification, SystemMessage, Watch }
import akka.actor.ActorRefWithCell
import akka.actor.ActorRefScope
import akka.util.Switch
import akka.actor.RootActorPath
import akka.actor.ActorSelectionMessage
import akka.actor.SelectParent
import akka.actor.SelectChildName
import akka.actor.SelectChildPattern
import akka.actor.Identify
import akka.actor.ActorIdentity
import akka.actor.EmptyLocalActorRef
import akka.event.AddressTerminatedTopic
import java.util.concurrent.ConcurrentHashMap
import akka.dispatch.sysmsg.Unwatch
/**
* INTERNAL API
*/
private[akka] sealed trait DaemonMsg
/**
* INTERNAL API
*/
@SerialVersionUID(1L)
private[akka] case class DaemonMsgCreate(props: Props, deploy: Deploy, path: String, supervisor: ActorRef) extends DaemonMsg
/**
* INTERNAL API
*
* Internal system "daemon" actor for remote internal communication.
*
* It acts as the brain of the remote that responds to system remote events (messages) and undertakes action.
*/
private[akka] class RemoteSystemDaemon(
system: ActorSystemImpl,
_path: ActorPath,
_parent: InternalActorRef,
terminator: ActorRef,
_log: LoggingAdapter,
val untrustedMode: Boolean)
extends VirtualPathContainer(system.provider, _path, _parent, _log) {
import akka.actor.SystemGuardian._
private val terminating = new Switch(false)
AddressTerminatedTopic(system).subscribe(this)
private val parent2children = new ConcurrentHashMap[ActorRef, Set[ActorRef]]
@tailrec private def addChildParentNeedsWatch(parent: ActorRef, child: ActorRef): Boolean =
parent2children.get(parent) match {
case null ⇒
if (parent2children.putIfAbsent(parent, Set(child)) == null) true
else addChildParentNeedsWatch(parent, child)
case children ⇒
if (parent2children.replace(parent, children, children + child)) false
else addChildParentNeedsWatch(parent, child)
}
@tailrec private def removeChildParentNeedsUnwatch(parent: ActorRef, child: ActorRef): Boolean = {
parent2children.get(parent) match {
case null ⇒ false // no-op
case children ⇒
val next = children - child
if (next.isEmpty) {
if (!parent2children.remove(parent, children)) removeChildParentNeedsUnwatch(parent, child)
else true
} else {
if (!parent2children.replace(parent, children, next)) removeChildParentNeedsUnwatch(parent, child)
else false
}
}
}
/**
* Find the longest matching path which we know about and return that ref
* (or ask that ref to continue searching if elements are left).
*/
override def getChild(names: Iterator[String]): InternalActorRef = {
@tailrec
def rec(s: String, n: Int): (InternalActorRef, Int) = {
import akka.actor.ActorCell._
val (childName, uid) = splitNameAndUid(s)
getChild(childName) match {
case null ⇒
val last = s.lastIndexOf('/')
if (last == -1) (Nobody, n)
else rec(s.substring(0, last), n + 1)
case ref if uid != undefinedUid && uid != ref.path.uid ⇒ (Nobody, n)
case ref ⇒ (ref, n)
}
}
val full = Vector() ++ names
rec(full.mkString("/"), 0) match {
case (Nobody, _) ⇒ Nobody
case (ref, 0) ⇒ ref
case (ref, n) ⇒ ref.getChild(full.takeRight(n).iterator)
}
}
override def sendSystemMessage(message: SystemMessage): Unit = message match {
case DeathWatchNotification(child: ActorRefWithCell with ActorRefScope, _, _) if child.isLocal ⇒
terminating.locked {
removeChild(child.path.elements.drop(1).mkString("/"), child)
val parent = child.getParent
if (removeChildParentNeedsUnwatch(parent, child)) parent.sendSystemMessage(Unwatch(parent, this))
terminationHookDoneWhenNoChildren()
}
case DeathWatchNotification(parent: ActorRef with ActorRefScope, _, _) if !parent.isLocal ⇒
terminating.locked {
parent2children.remove(parent) match {
case null ⇒
case children ⇒
for (c ← children) {
system.stop(c)
removeChild(c.path.elements.drop(1).mkString("/"), c)
}
terminationHookDoneWhenNoChildren()
}
}
case _ ⇒ super.sendSystemMessage(message)
}
override def !(msg: Any)(implicit sender: ActorRef = Actor.noSender): Unit = try msg match {
case message: DaemonMsg ⇒
log.debug("Received command [{}] to RemoteSystemDaemon on [{}]", message, path.address)
message match {
case DaemonMsgCreate(_, _, path, _) if untrustedMode ⇒ log.debug("does not accept deployments (untrusted) for [{}]", path)
case DaemonMsgCreate(props, deploy, path, supervisor) ⇒
path match {
case ActorPathExtractor(address, elems) if elems.nonEmpty && elems.head == "remote" ⇒
// TODO RK currently the extracted “address” is just ignored, is that okay?
// TODO RK canonicalize path so as not to duplicate it always #1446
val subpath = elems.drop(1)
val p = this.path / subpath
val childName = {
val s = subpath.mkString("/")
val i = s.indexOf('#')
if (i < 0) s
else s.substring(0, i)
}
val isTerminating = !terminating.whileOff {
val parent = supervisor.asInstanceOf[InternalActorRef]
val actor = system.provider.actorOf(system, props, parent,
p, systemService = false, Some(deploy), lookupDeploy = true, async = false)
addChild(childName, actor)
actor.sendSystemMessage(Watch(actor, this))
actor.start()
if (addChildParentNeedsWatch(parent, actor)) parent.sendSystemMessage(Watch(parent, this))
}
if (isTerminating) log.error("Skipping [{}] to RemoteSystemDaemon on [{}] while terminating", message, p.address)
case _ ⇒
log.debug("remote path does not match path from message [{}]", message)
}
}
case sel: ActorSelectionMessage ⇒
val (concatenatedChildNames, m) = {
val iter = sel.elements.iterator
// find child elements, and the message to send, which is a remaining ActorSelectionMessage
// in case of SelectChildPattern, otherwise the the actual message of the selection
@tailrec def rec(acc: List[String]): (List[String], Any) =
if (iter.isEmpty)
(acc.reverse, sel.msg)
else {
iter.next() match {
case SelectChildName(name) ⇒ rec(name :: acc)
case SelectParent if acc.isEmpty ⇒ rec(acc)
case SelectParent ⇒ rec(acc.tail)
case pat: SelectChildPattern ⇒ (acc.reverse, sel.copy(elements = pat +: iter.toVector))
}
}
rec(Nil)
}
getChild(concatenatedChildNames.iterator) match {
case Nobody ⇒
val emptyRef = new EmptyLocalActorRef(system.provider, path / sel.elements.map(_.toString),
system.eventStream)
emptyRef.tell(sel, sender)
case child ⇒
child.tell(m, sender)
}
case Identify(messageId) ⇒ sender ! ActorIdentity(messageId, Some(this))
case TerminationHook ⇒
terminating.switchOn {
terminationHookDoneWhenNoChildren()
foreachChild { system.stop }
}
case AddressTerminated(address) ⇒
foreachChild {
case a: InternalActorRef if a.getParent.path.address == address ⇒ system.stop(a)
case _ ⇒ // skip, this child doesn't belong to the terminated address
}
case unknown ⇒ log.warning("Unknown message [{}] received by [{}]", unknown, this)
} catch {
case NonFatal(e) ⇒ log.error(e, "exception while processing remote command [{}] from [{}]", msg, sender)
}
def terminationHookDoneWhenNoChildren(): Unit = terminating.whileOn {
if (!hasChildren) terminator.tell(TerminationHookDone, this)
}
}
|
Fincore/org.spark-project.akka
|
remote/src/main/scala/akka/remote/RemoteDaemon.scala
|
Scala
|
mit
| 8,587
|
/* Code Pulse: a real-time code coverage tool, for more information, see <http://code-pulse.com/>
*
* Copyright (C) 2014-2017 Code Dx, Inc. <https://codedx.com/>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.codedx.codepulse.hq.trace
import scala.collection.mutable.ArrayBuffer
sealed trait TracePlayer
sealed trait StopMethod
object StopMethod {
/** A graceful stop has been requested - e.g., finish doing whatever you're doing/keep all data */
case object GracefulStop extends StopMethod
/** An immediate halt has been requested - e.g., immediately cease processing/throw away data */
case object ImmediateHalt extends StopMethod
}
/** Trait for trace players who implement stop control methods */
trait StopControl extends TracePlayer {
def stop(how: StopMethod)
}
/** Trait for trace players who require cleanup at trace end */
trait Cleanup extends TracePlayer {
def cleanup()
}
/** TracePlayerManager manages the lifetime of the moving parts contained in a trace. It provides
* tracking of components and forwarding lifetime events to supporting players.
*
* @author robertf
*/
class TracePlayerManager {
private val players = ArrayBuffer[TracePlayer]()
def +=(p: TracePlayer) = players += p
def ++=(p: TraversableOnce[TracePlayer]) = players ++= p
def -=(p: TracePlayer) = players -= p
def --=(p: TraversableOnce[TracePlayer]) = players --= p
def stop(how: StopMethod) {
for (player <- players) player match {
case p: StopControl => p.stop(how)
case _ =>
}
}
def cleanup() {
for (player <- players) player match {
case p: Cleanup => p.cleanup
case _ =>
}
}
}
|
secdec/codepulse
|
hq/src/main/scala/com/secdec/bytefrog/hq/trace/TracePlayerManager.scala
|
Scala
|
apache-2.0
| 2,151
|
trait Foo[In] { type Out }
object Test {
implicit def fooInt: Foo[Int] { type Out = String } = ???
implicit def str: String = ???
def test1[A](f1: Foo[A])(implicit f2: f1.Out) = ???
def test2[A](implicit f1: Foo[A], f2: f1.Out) = ???
test1(fooInt) // OK
test2 // OK
}
object Test2 {
implicit def fooInt: Foo[Int] { type Out = String } = ???
implicit def fooString: Foo[String] { type Out = Boolean } = ???
implicit def fooBoolean: Foo[Boolean] { type Out = Double } = ???
def test3[A](f1: Foo[A], f2: Foo[f1.Out])(implicit f3: Foo[f2.Out]): f3.Out = ???
def test4[A](implicit f1: Foo[A], f2: Foo[f1.Out], f3: Foo[f2.Out]): f3.Out = ???
val t3 = test3(fooInt, fooString)
t3: Double
val t4 = test4[Int]
t4: Double
}
object Test3 {
def fooInt: Foo[Int] { type Out = String } = ???
implicit def istr: String = ???
implicit def iint: Int = ???
def test5[A](implicit f1: Foo[A] = fooInt, f2: f1.Out) = f2
val t5 = test5
// used to succeed with just one local implicit `istr`
// but failed if a competing implicit `iint` was added.
t5: String
}
object Test4 {
implicit def fooInt: Foo[Int] { type Out = String } = ???
def str: String = ???
def test6[A](implicit f1: Foo[A], f2: f1.Out = str) = f2
val t6 = test6
t6: String
}
|
dotty-staging/dotty
|
tests/pos/i5427.scala
|
Scala
|
apache-2.0
| 1,298
|
import sbt._
import Keys._
object Dependencies {
object Version {
val logback = "1.1.8"
val akka = "2.4.14"
val gremlin = "2.6.0"
val orientDb = "2.2.13"
val scalaTest = "3.0.1"
}
val logbackClassic = "ch.qos.logback" % "logback-classic" % Version.logback
val scalaTest = "org.scalatest" %% "scalatest" % Version.scalaTest
val scalactic = "org.scalactic" %% "scalactic" % "3.0.1"
val akkaActor = "com.typesafe.akka" %% "akka-actor" % Version.akka
val akkaRemote = "com.typesafe.akka" %% "akka-remote" % Version.akka
val akkaLog = "com.typesafe.akka" %% "akka-slf4j" % Version.akka
val akkaTestkit = "com.typesafe.akka" %% "akka-testkit" % Version.akka
val orientDbServer = "com.orientechnologies" % "orientdb-server" % Version.orientDb
val orientDbClient = "com.orientechnologies" % "orientdb-client" % Version.orientDb
val orientDbCore = "com.orientechnologies" % "orientdb-core" % Version.orientDb
val orientDbTools = "com.orientechnologies" % "orientdb-tools" % Version.orientDb
val orientDbGraphdb = "com.orientechnologies" % "orientdb-graphdb" % Version.orientDb withJavadoc
val blueprintsCore = "com.tinkerpop.blueprints" % "blueprints-core" % Version.gremlin
val orientDbDistributed = "com.orientechnologies" % "orientdb-distributed" % Version.orientDb withJavadoc
val orientDbEnterprise = "com.orientechnologies" % "orientdb-enterprise" % "2.2.0-beta" withJavadoc //is this still needed?
val gremlinGroovy = "com.tinkerpop.gremlin" % "gremlin-groovy" % Version.gremlin
val gremlinJava = "com.tinkerpop.gremlin" % "gremlin-java" % Version.gremlin
//experimental mpollmeier gremlin-scala + orientdb-gremlin (the latter now supported by the Orient team)
//val gremlinScala = "com.michaelpollmeier" %% "gremlin-scala" % "3.2.3.3"
//val orientDbGremlin = "com.michaelpollmeier" % "orientdb-gremlin" % "3.2.3.0"
val dependencies = Seq(gremlinGroovy, gremlinJava, blueprintsCore, orientDbServer, orientDbClient, orientDbCore, orientDbTools, orientDbGraphdb, orientDbDistributed) ++ Seq(scalactic, akkaActor, akkaLog, akkaRemote)
val testDependencies = Seq(akkaTestkit % Test, scalaTest % Test, logbackClassic % Test)
}
|
jurajzachar/orientdb-embedded
|
project/Dependencies.scala
|
Scala
|
mit
| 2,194
|
// Author: Olivier Chafik (http://ochafik.com)
package scalaxy.privacy
import scala.tools.nsc.Global
import scala.tools.nsc.plugins.Plugin
import scala.tools.nsc.plugins.PluginComponent
object PrivacyPlugin {
def getInternalPhases(global: Global): List[PluginComponent] =
List(
new PrivacyComponent(global),
new ExplicitTypeAnnotationsComponent(global, runAfter = PrivacyComponent.phaseName))
}
class PrivacyPlugin(override val global: Global) extends Plugin {
override val name = "scalaxy-privacy"
override val description = "Compiler plugin that makes all vals and defs private by default (unless @scalaxy.public is used)."
override val components = PrivacyPlugin.getInternalPhases(global)
}
|
nativelibs4java/Scalaxy
|
Privacy/Plugin/src/main/scala/scalaxy/privacy/PrivacyPlugin.scala
|
Scala
|
bsd-3-clause
| 724
|
package models.manager.commands
import models.Item
import models.manager.commands.traits.CanStartPlayer
/**
* Created by rbrowning on 4/26/17.
*
* Sent from the endpoints to the manager, this will tell the manager to play the current items.
*
* If there's already a playlist this will stop the current player and clear the playlist and start
* a new item with the new playlist
*/
case class PlayAllItems(items: Seq[Item]) extends CanStartPlayer
|
rebrowning/entertainment-cluster-member
|
src/main/scala/models/manager/commands/PlayAllItems.scala
|
Scala
|
apache-2.0
| 460
|
package com.faacets
package ext
import scala.math._
import alg._
import spire.std.int._
import spire.math.{lcm, Rational}
/** Extensions of correlation inequalities with binary outputs, from
* Wu et al. arXiv:1302.6698 Compact Bell inequalities for multipartite experiments.
*/
object Wu2013 {
import alg.immutable.QVector
object Example1 {
val s = Scenario(Vector(Vector(2,2,2),Vector(2,2,2)))
val b1 = Bra(s, NCRepr, QVector(-2,0,0,0, 0,0,1,-1, 0,0,1,1, 0,0,0,0)) // <= 0
val b2 = Bra(s, NCRepr, QVector(-2,0,0,0, 0,0,0,0, 0,-1,0,1, 0,-1,0,-1)) // <= 0
val b3 = Bra(s, NCRepr, QVector(-2,0,0,0, 0,0,0,0, 0,1,1,0, 0,1,-1,0)) // <= 0
def i = extend(List(b1,b2,b3))
}
object Example2 {
val s = Scenario(Vector(Vector(2,2,2,2)))
val b1 = Bra(s, NCRepr, QVector(-3, 0, 2, 1, 0)) // <= 0
val b2 = Bra(s, NCRepr, QVector(-3, -1, 1, 0, -1)) // <= 0
val b3 = Bra(s, NCRepr, QVector(-3, -1, 1, 0, 1)) // <= 0
val b4 = Bra(s, NCRepr, QVector(-3, 0, 1, 2, 0)) // <= 0
def i = extend(List(b1,b2,b3,b4))
}
def extend(Bgen: List[Bra]) = {
// TODO: use rational arithmetic fully, and be closer to original paper
val s = Bgen.head.scenario
assert(Bgen.forall(_.scenario == s))
assert(s.parties.forall(party => party.inputs.forall(_ == 2)))
val Bnsc = Bgen.map(_.as(NCRepr))
val l = (1 /: Bnsc.map(b => abs(b.coeffs(0).toInt)))(lcm[Int])
val B = Bnsc.map(b => Bra(s, NCRepr, {
val ct = b.coeffs(0).toInt
b.coeffs.mapElements((rat: Rational) => -rat.toInt*(l/ct)*signum(ct))
}))
val n = B.size
val d = B.head.coeffs.length
val coeff = alg.mutable.QVector.zeros(d*(n+1))
for (l <- 1 to n) {
val b = B(l-1).coeffs
val c = alg.mutable.QVector.zeros(n+1)
if (l == 1) {
c(1) = -(n-3)
for (k <- 2 to n)
c(k) = 1
} else {
c(1) = 1
c(l) = -1
}
for (i <- 1 until d; j <- 1 to n)
coeff(i + d*j) += b(i)*c(j)
}
coeff(0) = -2*l
Bra(Scenario(s.parties ++ Vector(Party(Vector.fill[Int](n)(2)))), NCRepr, coeff.toImmutable) // <= 0
}
}
|
denisrosset/faacets-families
|
src/main/scala/ext/Wu2013.scala
|
Scala
|
bsd-3-clause
| 2,139
|
// Copyright 2014 Commonwealth Bank of Australia
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package au.com.cba.omnia.ebenezer.example
import scalaz.Scalaz._
import org.apache.hadoop.fs.Path
import org.apache.hadoop.hive.conf.HiveConf
import com.twitter.scalding.{Execution, ExecutionApp}
import com.twitter.scalding.typed.IterablePipe
import au.com.cba.omnia.beeswax.Hive
import au.com.cba.omnia.ebenezer.ParquetLogging
import au.com.cba.omnia.ebenezer.scrooge.PartitionParquetScroogeSink
object HiveExampleStep1 extends ExecutionApp with ParquetLogging {
val data = List(
Customer("CUSTOMER-A", "Fred", "Bedrock", 40),
Customer("CUSTOMER-2", "Wilma", "Bedrock", 40),
Customer("CUSTOMER-3", "Barney", "Bedrock", 39),
Customer("CUSTOMER-4", "BamBam", "Bedrock", 2)
)
def job = for {
args <- Execution.getConfig.map(_.getArgs)
_ <- execute(args("db"), args("table"), args.optional("location"))
} yield ()
def execute(db: String, table: String, location: Option[String] = None): Execution[Unit] = {
val conf = new HiveConf
Execution.from({
for {
_ <- Hive.createParquetTable[Customer](db, table, List("pid" -> "string"), location.map(new Path(_)))
path <- Hive.getPath(db, table)
} yield path
}.run(conf).toOption.get).flatMap { p =>
IterablePipe(data).map(c => c.id -> c)
.writeExecution(PartitionParquetScroogeSink[String, Customer]("pid=%s", p.toString))
}.flatMap(_ => Execution.from(Hive.repair(db, table).run(conf))).map(_ => ())
}
}
|
CommBank/ebenezer
|
example/src/main/scala/au/com/cba/omnia/ebenezer/example/HiveExampleStep1.scala
|
Scala
|
apache-2.0
| 2,081
|
package com.example.http4s
package jetty
import cats.effect._
import com.codahale.metrics.MetricRegistry
import fs2._
import fs2.StreamApp.ExitCode
import org.http4s.dsl.Http4sDsl
import org.http4s.server.HttpMiddleware
import org.http4s.server.jetty.JettyBuilder
import org.http4s.server.metrics._
object JettyExample extends JettyExampleApp[IO]
class JettyExampleApp[F[_]: Effect] extends StreamApp[F] with Http4sDsl[F] {
val metricsRegistry: MetricRegistry = new MetricRegistry
val metrics: HttpMiddleware[F] = Metrics[F](metricsRegistry)
def stream(args: List[String], requestShutdown: F[Unit]): Stream[F, ExitCode] =
Scheduler(corePoolSize = 2).flatMap { implicit scheduler =>
JettyBuilder[F]
.bindHttp(8080)
.mountService(metrics(new ExampleService[F].service), "/http4s")
.mountService(metricsService(metricsRegistry), "/metrics")
.mountFilter(NoneShallPass, "/http4s/science/black-knight/*")
.serve
}
}
|
reactormonk/http4s
|
examples/jetty/src/main/scala/com/example/http4s/jetty/JettyExample.scala
|
Scala
|
apache-2.0
| 975
|
package mesosphere.marathon.api.v2
import javax.inject.Inject
import javax.ws.rs._
import javax.ws.rs.core.Response.Status._
import javax.ws.rs.core.{ MediaType, Response }
import mesosphere.marathon.api.v2.json.Formats._
import mesosphere.marathon.api.{ MarathonMediaType, RestResource }
import mesosphere.marathon.state.GroupManager
import mesosphere.marathon.upgrade.DeploymentManager.DeploymentStepInfo
import mesosphere.marathon.upgrade.{ DeploymentAction, DeploymentPlan }
import mesosphere.marathon.{ MarathonConf, MarathonSchedulerService }
import mesosphere.util.Logging
import play.api.libs.json.{ Json, JsObject, JsValue }
@Path("v2/deployments")
@Consumes(Array(MediaType.APPLICATION_JSON))
@Produces(Array(MarathonMediaType.PREFERRED_APPLICATION_JSON))
class DeploymentsResource @Inject() (
service: MarathonSchedulerService,
groupManager: GroupManager,
val config: MarathonConf)
extends RestResource
with Logging {
@GET
def running(): Response = ok(jsonString(result(service.listRunningDeployments()).map { currentStep =>
toInfo(currentStep.plan, currentStep)
}))
@DELETE
@Path("{id}")
def cancel(
@PathParam("id") id: String,
@DefaultValue("false")@QueryParam("force") force: Boolean): Response =
result(service.listRunningDeployments())
.map(_.plan)
.find(_.id == id)
.fold(notFound(s"DeploymentPlan $id does not exist")) {
case plan: DeploymentPlan if force =>
// do not create a new deployment to return to the previous state
log.info(s"Canceling deployment [$id]")
service.cancelDeployment(id)
status(ACCEPTED) // 202: Accepted
case plan: DeploymentPlan =>
// create a new deployment to return to the previous state
deploymentResult(result(groupManager.update(
plan.original.id,
plan.revert,
force = true
)))
}
private def toInfo(
deployment: DeploymentPlan,
currentStepInfo: DeploymentStepInfo): JsObject = {
val steps = deployment.steps.map(step => step.actions.map(actionToMap)).map(Json.toJson(_))
Json.obj(
"id" -> deployment.id,
"version" -> deployment.version,
"affectedApps" -> deployment.affectedApplicationIds.map(_.toString),
"steps" -> steps,
"currentActions" -> currentStepInfo.step.actions.map(actionToMap),
"currentStep" -> currentStepInfo.nr,
"totalSteps" -> deployment.steps.size
)
}
def actionToMap(action: DeploymentAction): Map[String, String] =
Map(
"action" -> action.getClass.getSimpleName,
"app" -> action.app.id.toString
)
}
|
EasonYi/marathon
|
src/main/scala/mesosphere/marathon/api/v2/DeploymentsResource.scala
|
Scala
|
apache-2.0
| 2,653
|
package controllers
import controllers.EmployeeController._
import model.Product
import play.api.libs.json.Json
import play.api.mvc.{Action, Controller}
import play.api.{Logger, Play}
import scala.reflect.io.File
/**
* Created by oguzhan on 8/20/14.
*/
object ProductController extends Controller {
def list() = Action { request =>
// Ok(Json.toJson(Product.all))
val host = request.headers.get("host").get
val productAsMap = Map("products" -> Product.all.map { product =>
val host = Play.current.configuration.getString("server.host").getOrElse("localhost")
val fullUrl = s"http://${host}:9000/api/products/${product.id}/photo"
product.copy(photo = fullUrl)
})
val productAsJson = Json.toJson(productAsMap)
Ok(productAsJson)
}
def getProduct(id: Int) = Action { request =>
val host = Play.current.configuration.getString("server.host").getOrElse("localhost")
val product = Product.get(id).map { product =>
val fullUrl = s"http://${host}:9000/api/products/${product.id}/photo"
product.copy(photo = fullUrl)
}
val productJson = Json.toJson(product)
Ok(productJson)
}
def getProductPhoto(id: Int) = Action {
Product.get(id).map { product =>
val imageBytes = File(product.photo).toByteArray()
Ok(imageBytes).withHeaders(
"content-type" -> "image/jpeg",
"cache-control" -> s"public, max-age=${7 * 24 * 60 * 60}"
)
}.getOrElse(NotFound)
}
def popular() = popularLimit("10")
/**
* Take most popular n Products
* @param limit how many product do we want
* @return
*/
def popularLimit(limit: String) = Action {
val products = Product.takeMostPopular(limit.toInt)
val productsJson = Json.toJson(products)
Logger.debug(Json.prettyPrint(productsJson))
Ok(productsJson)
}
}
|
moguzhanataman/fikrimuhal-staj
|
server/fikrimuhal-json-api/app/controllers/ProductController.scala
|
Scala
|
mit
| 1,842
|
package ru.tinkoff.aerospikeexamples.example
import ru.tinkoff.aerospikemacro.printer.Printer
import ru.tinkoff.aerospikescala.domain.{ByteSegment, SingleBin}
import shapeless._
import scala.collection.mutable.ArrayBuffer
import scala.concurrent.Await
import scala.concurrent.duration.Duration.Inf
import scala.language.experimental.macros
/**
* @author MarinaSigaeva
* @since 04.04.17
*/
object SampleCollectionsApp extends App {
import scala.concurrent.ExecutionContext.Implicits.global
val spike = AClient.spikeImpl
val myObj = SampleScheme(spike)
myObj.putListSt("listStBinKey", SingleBin("listStringBin", List("a", "b")))
myObj.putListInt("listIntKey", SingleBin("listIntBin", List(1, 2, 3, 4)))
myObj.putListLong("listLongKey", SingleBin("listLongBin", List(1000L, 2000L, 3000L, 4000L)))
myObj.putListFloat("listFloatKey", SingleBin("listFloatBin", List(1.12F, 2.13F, 3.5F, 4.5F)))
myObj.putListDouble("listDoubleKey", SingleBin("listDoubleBin", List(12.11, 12.13, 23.5, 46.5)))
myObj.putListBoolean("listBoolKey", SingleBin("listBoolBin", List(true, false, false, true)))
myObj.putArrayString("arrayStKey", SingleBin("arrayStBin", Array("abcd", "efgh", "ijkl")))
myObj.putArrayInt("arrayIntKey", SingleBin("arrayInt", Array(3, 6, 8)))
myObj.putArrayLong("arrayLongKey", SingleBin("arrayLong", Array(1L, 56L, 98L)))
myObj.putArrayFloat("arrayFloatKey", SingleBin("arrayFloat", Array(1.12F, 2.13F, 3.5F)))
myObj.putArrayDouble("arrayDoubleKey", SingleBin("arrayDouble", Array(12.13, 23.5, 46.5)))
myObj.putArrayByte("arrayByteKey", SingleBin("arrayByteBin", Array(Byte.MinValue, Byte.MaxValue, Byte.MinValue)))
myObj.putArrayBoolean("arrayBoolKey", SingleBin("arrayBoolBin", Array(true, false, true)))
myObj.putSeqArrayBuffer("seqArrBuff", SingleBin("ww", Seq(ArrayBuffer(1.2, 3.1, 5.6))))
// myObj.putByteSegment("byteSegmKey", SingleBin("byteSegment", ByteSegment(Array(Byte.MinValue, Byte.MaxValue), 12, 33)))
val listStrs = Await.result(myObj.getListSt("listStBinKey"), Inf)
Printer.printNameValue(listStrs)
val listInt = Await.result(myObj.getListInt("listIntKey"), Inf)
Printer.printNameValue(listInt)
val listLong = Await.result(myObj.getListLong("listLongKey"), Inf)
Printer.printNameValue(listLong)
val listFloat = Await.result(myObj.getListFloat("listFloatKey"), Inf)
Printer.printNameValue(listFloat)
val listDouble = Await.result(myObj.getListDouble("listDoubleKey"), Inf)
Printer.printNameValue(listDouble)
val listBoolean = Await.result(myObj.getListBoolean("listBoolKey"), Inf)
Printer.printNameValue(listBoolean)
val arrayString = Await.result(myObj.getArrayString("arrayStKey"), Inf)
Printer.printNameValue(arrayString)
val arrayInt = Await.result(myObj.getArrayInt("arrayIntKey"), Inf)
Printer.printNameValue(arrayInt)
val arrayLong = Await.result(myObj.getArrayLong("arrayLongKey"), Inf)
Printer.printNameValue(arrayLong)
val arrayFloat = Await.result(myObj.getArrayFloat("arrayFloatKey"), Inf)
Printer.printNameValue(arrayFloat)
val arrayDouble = Await.result(myObj.getArrayDouble("arrayDoubleKey"), Inf)
Printer.printNameValue(arrayDouble)
val arrayBoolean = Await.result(myObj.getArrayBoolean("arrayBoolKey"), Inf)
Printer.printNameValue(arrayBoolean)
val arrayByteBin = Await.result(myObj.getArrayByte("arrayByteKey"), Inf)
Printer.printNameValue(arrayByteBin)
/* val seqArrBuff = Await.result(myObj.getSeqArrayBuffer("seqArrBuff"), Inf)
Printer.printNameValue(seqArrBuff)*/
}
|
TinkoffCreditSystems/aerospike-scala
|
aerospike-scala-example/src/main/scala/ru/tinkoff/aerospikeexamples/example/SampleCollectionsApp.scala
|
Scala
|
apache-2.0
| 3,525
|
package spinoco.fs2.http.websocket
import java.nio.channels.AsynchronousChannelGroup
import java.util.concurrent.Executors
import cats.Applicative
import javax.net.ssl.SSLContext
import cats.effect.{Concurrent, ConcurrentEffect, ContextShift, Timer}
import fs2.Chunk.ByteVectorChunk
import fs2._
import fs2.concurrent.Queue
import scodec.Attempt.{Failure, Successful}
import scodec.bits.ByteVector
import scodec.{Codec, Decoder, Encoder}
import spinoco.fs2.http.HttpResponse
import spinoco.protocol.http.codec.{HttpRequestHeaderCodec, HttpResponseHeaderCodec}
import spinoco.protocol.http.header._
import spinoco.protocol.http._
import spinoco.protocol.http.header.value.ProductDescription
import spinoco.protocol.mime.{ContentType, MIMECharset, MediaType}
import spinoco.protocol.websocket.{OpCode, WebSocketFrame}
import spinoco.protocol.websocket.codec.WebSocketFrameCodec
import spinoco.fs2.http.util.chunk2ByteVector
import scala.concurrent.ExecutionContext
import scala.concurrent.duration._
import scala.util.Random
object WebSocket {
/**
* Creates a websocket to be used on server side.
*
* Implementation is according to RFC-6455 (https://tools.ietf.org/html/rfc6455).
*
* @param pipe A websocket pipe. `I` is received from the client and `O` is sent to client.
* Decoder (for I) and Encoder (for O) must be supplied.
* @param pingInterval An interval for the Ping / Pong protocol.
* @param handshakeTimeout An timeout to await for handshake to be successfull. If the handshake is not completed
* within supplied period, connection is terminated.
* @param maxFrameSize Maximum size of single websocket frame. If the binary size of single frame is larger than
* supplied value, websocket will fail.
* @tparam F
* @return
*/
def server[F[_] : Concurrent : Timer, I : Decoder, O : Encoder](
pipe: Pipe[F, Frame[I], Frame[O]]
, pingInterval: Duration = 30.seconds
, handshakeTimeout: FiniteDuration = 10.seconds
, maxFrameSize: Int = 1024*1024
)(header: HttpRequestHeader, input:Stream[F,Byte]): Stream[F,HttpResponse[F]] = {
Stream.emit(
impl.verifyHeaderRequest[F](header).right.map { key =>
val respHeader = impl.computeHandshakeResponse(header, key)
HttpResponse(respHeader, input through impl.webSocketOf(pipe, pingInterval, maxFrameSize, client2Server = false))
}.merge
)
}
/**
* Establishes websocket connection to the server.
*
* Implementation is according to RFC-6455 (https://tools.ietf.org/html/rfc6455).
*
* If this is established successfully, then this consults `pipe` to receive/sent any frames
* From/To server. Once the connection finishes, this will emit once None.
*
* If the connection was not established correctly (i.e. Authorization failure) this will not
* consult supplied pipe and instead this will immediately emit response received from the server.
*
* @param request WebSocket request
* @param pipe Pipe that is consulted when websocket is established correctly
* @param maxHeaderSize Max size of Http Response header received
* @param receiveBufferSize Size of receive buffer to use
* @param maxFrameSize Maximum size of single websocket frame. If the binary size of single frame is larger than
* supplied value, websocket will fail.
* @param requestCodec Codec to encode HttpRequests Header
* @param responseCodec Codec to decode HttpResponse Header
*
*/
def client[F[_] : ConcurrentEffect : ContextShift : Timer, I : Decoder, O : Encoder](
request: WebSocketRequest
, pipe: Pipe[F, Frame[I], Frame[O]]
, maxHeaderSize: Int = 4096
, receiveBufferSize: Int = 256 * 1024
, maxFrameSize: Int = 1024*1024
, requestCodec: Codec[HttpRequestHeader] = HttpRequestHeaderCodec.defaultCodec
, responseCodec: Codec[HttpResponseHeader] = HttpResponseHeaderCodec.defaultCodec
, sslES: => ExecutionContext = ExecutionContext.fromExecutorService(Executors.newCachedThreadPool(spinoco.fs2.http.util.mkThreadFactory("fs2-http-ssl", daemon = true)))
, sslContext: => SSLContext = { val ctx = SSLContext.getInstance("TLS"); ctx.init(null,null,null); ctx }
)(implicit AG: AsynchronousChannelGroup): Stream[F, Option[HttpResponseHeader]] = {
import spinoco.fs2.http.internal._
import Stream._
eval(addressForRequest[F](if (request.secure) HttpScheme.WSS else HttpScheme.WS, request.hostPort)).flatMap { address =>
Stream.resource(io.tcp.client[F](address, receiveBufferSize = receiveBufferSize))
.evalMap { socket => if (request.secure) clientLiftToSecure(sslES, sslContext)(socket, request.hostPort) else Applicative[F].pure(socket) }
.flatMap { socket =>
val (header, fingerprint) = impl.createRequestHeaders(request.header)
requestCodec.encode(header) match {
case Failure(err) => Stream.raiseError(new Throwable(s"Failed to encode websocket request: $err"))
case Successful(headerBits) =>
eval(socket.write(ByteVectorChunk(headerBits.bytes ++ `\\r\\n\\r\\n`))).flatMap { _ =>
socket.reads(receiveBufferSize) through httpHeaderAndBody(maxHeaderSize) flatMap { case (respHeaderBytes, body) =>
responseCodec.decodeValue(respHeaderBytes.bits) match {
case Failure(err) => raiseError(new Throwable(s"Failed to decode websocket response: $err"))
case Successful(responseHeader) =>
impl.validateResponse[F](header, responseHeader, fingerprint).flatMap {
case Some(resp) => emit(Some(resp))
case None => (body through impl.webSocketOf(pipe, Duration.Undefined, maxFrameSize, client2Server = true) through socket.writes(None)).drain ++ emit(None)
}
}
}
}
}
}}
}
object impl {
private sealed trait PingPong
private object PingPong {
object Ping extends PingPong
object Pong extends PingPong
}
/**
* Verifies validity of WebSocket header request (on server) and extracts WebSocket key
*/
def verifyHeaderRequest[F[_]](header: HttpRequestHeader): Either[HttpResponse[F], ByteVector] = {
def badRequest(s:String) = HttpResponse[F](
header = HttpResponseHeader(
status = HttpStatusCode.BadRequest
, reason = HttpStatusCode.BadRequest.label
, headers = List(
`Content-Type`(ContentType.TextContent(MediaType.`text/plain`, Some(MIMECharset.`UTF-8`)))
)
)
, body = Stream.chunk(ByteVectorChunk(ByteVector.view(s.getBytes)))
)
def version: Either[HttpResponse[F], Int] = header.headers.collectFirst {
case `Sec-WebSocket-Version`(13) => Right(13)
case `Sec-WebSocket-Version`(other) => Left(badRequest(s"Unsupported websocket version: $other"))
}.getOrElse(Left(badRequest("Missing Sec-WebSocket-Version header")))
def host: Either[HttpResponse[F], Unit] = header.headers.collectFirst {
case Host(_) => Right(())
}.getOrElse(Left(badRequest("Missing header `Host: hostname`")))
def upgrade: Either[HttpResponse[F], Unit] = header.headers.collectFirst {
case Upgrade(pds) if pds.exists { pd => pd.name.equalsIgnoreCase("websocket") && pd.comment.isEmpty } => Right(())
}.getOrElse(Left(badRequest("Missing header `Upgrade: websocket`")))
def connection: Either[HttpResponse[F], Unit] = header.headers.collectFirst {
case Connection(s) if s.exists(_.equalsIgnoreCase("Upgrade")) => Right(())
}.getOrElse(Left(badRequest("Missing header `Connection: upgrade`")))
def webSocketKey: Either[HttpResponse[F], ByteVector] = header.headers.collectFirst {
case `Sec-WebSocket-Key`(key) => Right(key)
}.getOrElse(Left(badRequest("Missing Sec-WebSocket-Key header")))
for {
_ <- version.right
_ <- host.right
_ <- upgrade.right
_ <- connection.right
key <- webSocketKey.right
} yield key
}
/** creates the handshake response to complete websocket handshake on server side **/
def computeHandshakeResponse(header: HttpRequestHeader, key: ByteVector): HttpResponseHeader = {
val fingerprint = computeFingerPrint(key)
val headers = header.headers.collect {
case h: `Sec-WebSocket-Protocol` => h
}
HttpResponseHeader(
status = HttpStatusCode.SwitchingProtocols
, reason = HttpStatusCode.SwitchingProtocols.label
, headers = List(
Upgrade(List(ProductDescription("websocket", None)))
, Connection(List("Upgrade"))
, `Sec-WebSocket-Accept`(fingerprint)
) ++ headers
)
}
/**
* Creates websocket of supplied pipe
*
* @param pingInterval If Finite, defines duration when keep-alive pings are sent to client
* If client won't respond with pong to 3x this internal, the websocket will be terminated
* by server.
* @param client2Server When true, this represent client -> server direction, when false this represents reverse direction
*/
def webSocketOf[F[_] : Concurrent : Timer, I : Decoder, O : Encoder](
pipe: Pipe[F, Frame[I], Frame[O]]
, pingInterval: Duration
, maxFrameSize: Int
, client2Server: Boolean
):Pipe[F, Byte, Byte] = { source: Stream[F, Byte] => Stream.suspend {
Stream.eval(Queue.unbounded[F, PingPong]).flatMap { pingPongQ =>
val metronome: Stream[F, Unit] = pingInterval match {
case fin: FiniteDuration => Stream.awakeEvery[F](fin).map { _ => () }
case inf => Stream.empty
}
val control = controlStream[F](pingPongQ.dequeue, metronome, maxUnanswered = 3, flag = client2Server)
source
.through(decodeWebSocketFrame[F](maxFrameSize, client2Server))
.through(webSocketFrame2Frame[F, I](pingPongQ))
.through(pipe)
.through(frame2WebSocketFrame[F, O](if (client2Server) Some(Random.nextInt()) else None))
.mergeHaltBoth(control)
.through(encodeWebSocketFrame[F](client2Server))
}
}}
/**
* Cuts necessary data for decoding the frame, done by partially decoding
* the frame
* Empty if the data couldn't be decoded yet
*
* @param in Current buffer that may contain full frame
*/
def cutFrame(in:ByteVector): Option[ByteVector] = {
val bits = in.bits
if (bits.size < 16) None // smallest frame is 16 bits
else {
val maskSize = if (bits(8)) 4 else 0
val sz = bits.drop(9).take(7).toInt(signed = false)
val maybeEnough =
if (sz < 126) {
// no extended payload size, sz bytes expected
Some(sz.toLong + 2)
} else if (sz == 126) {
// next 16 bits is payload size
if (bits.size < 32) None
else Some(bits.drop(16).take(16).toInt(signed = false).toLong + 4)
} else {
// next 64 bits is payload size
if (bits.size < 80) None
else Some(bits.drop(16).take(64).toLong(signed = false) + 10)
}
maybeEnough.flatMap { sz =>
val fullSize = sz + maskSize
if (in.size < fullSize) None
else Some(in.take(fullSize))
}
}
}
/**
* Decodes websocket frame.
*
* This will fail when the frame failed to be decoded or when frame is larger than
* supplied `maxFrameSize` parameter.
*
* @param maxFrameSize Maximum size of the frame, including its header.
*/
def decodeWebSocketFrame[F[_] : RaiseThrowable](maxFrameSize: Int , flag: Boolean): Pipe[F, Byte, WebSocketFrame] = {
// Returns list of raw frames and tail of
// the buffer. Tail of the buffer cant be empty
// (or non-empty if last one frame isn't finalized).
def cutFrames(data: ByteVector, acc: Vector[ByteVector] = Vector.empty): (Vector[ByteVector], ByteVector) = {
cutFrame(data) match {
case Some(frameData) => cutFrames(data.drop(frameData.size), acc :+ frameData)
case None => (acc, data)
}
}
def go(buff: ByteVector): Stream[F, Byte] => Pull[F, WebSocketFrame, Unit] = { h0 =>
if (buff.size > maxFrameSize) Pull.raiseError(new Throwable(s"Size of websocket frame exceeded max size: $maxFrameSize, current: ${buff.size}, $buff"))
else {
h0.pull.uncons flatMap {
case None => Pull.done // todo: is ok to silently ignore buffer remainder ?
case Some((chunk, tl)) =>
val data = buff ++ chunk2ByteVector(chunk)
cutFrames(data) match {
case (rawFrames, _) if rawFrames.isEmpty => go(data)(tl)
case (rawFrames, dataTail) =>
val pulls = rawFrames.map { data =>
WebSocketFrameCodec.codec.decodeValue(data.bits) match {
case Failure(err) => Pull.raiseError(new Throwable(s"Failed to decode websocket frame: $err, $data"))
case Successful(wsFrame) => Pull.output1(wsFrame)
}
}
// pulls nonempty
pulls.reduce(_ >> _) >> go(dataTail)(tl)
}
}
}
}
src => go(ByteVector.empty)(src).stream
}
/**
* Collects incoming frames. to produce and deserialize Frame[A].
*
* Also interprets WebSocket operations.
* - if Ping is received, supplied Queue is enqueued with true
* - if Pong is received, supplied Queue is enqueued with false
* - if Close is received, the WebSocket is terminated
* - if Continuation is received, the buffer of the frame is enqueued and later used to deserialize to `A`.
*
* @param pongQ Queue to notify about ping/pong frames.
*/
def webSocketFrame2Frame[F[_] : RaiseThrowable, A](pongQ: Queue[F, PingPong])(implicit R: Decoder[A]): Pipe[F, WebSocketFrame, Frame[A]] = {
def decode(from: Vector[WebSocketFrame]):Pull[F, Frame[A], A] = {
val bs = from.map(_.payload).reduce(_ ++ _)
R.decodeValue(bs.bits) match {
case Failure(err) => Pull.raiseError(new Throwable(s"Failed to decode value: $err, content: $bs"))
case Successful(a) => Pull.pure(a)
}
}
def go(buff:Vector[WebSocketFrame]): Stream[F, WebSocketFrame] => Pull[F, Frame[A], Unit] = {
_.pull.uncons1 flatMap {
case None => Pull.done // todo: is ok to ignore remainder in buffer ?
case Some((frame, tl)) =>
frame.opcode match {
case OpCode.Continuation => go(buff :+ frame)(tl)
case OpCode.Text => decode(buff :+ frame).flatMap { decoded => Pull.output1(Frame.Text(decoded)) >> go(Vector.empty)(tl) }
case OpCode.Binary => decode(buff :+ frame).flatMap { decoded => Pull.output1(Frame.Binary(decoded)) >> go(Vector.empty)(tl) }
case OpCode.Ping => Pull.eval(pongQ.enqueue1(PingPong.Ping)) >> go(buff)(tl)
case OpCode.Pong => Pull.eval(pongQ.enqueue1(PingPong.Pong)) >> go(buff)(tl)
case OpCode.Close => Pull.done
}
}
}
src => go(Vector.empty)(src).stream
}
/**
* Encodes received frome to WebSocketFrame.
* @param maskKey A funtion that allows to generate random masking key. Masking is applied at client -> server direction only.
*/
def frame2WebSocketFrame[F[_] : RaiseThrowable, A](maskKey: => Option[Int])(implicit W: Encoder[A]): Pipe[F, Frame[A], WebSocketFrame] = {
_.flatMap { frame =>
W.encode(frame.a) match {
case Failure(err) => Stream.raiseError(new Throwable(s"Failed to encode frame: $err (frame: $frame)"))
case Successful(payload) =>
val opCode = if (frame.isText) OpCode.Text else OpCode.Binary
Stream.emit(WebSocketFrame(fin = true, (false, false, false), opCode, payload.bytes, maskKey))
}
}
}
private val pingFrame = WebSocketFrame(fin = true, (false, false, false), OpCode.Ping, ByteVector.empty, None)
private val pongFrame = WebSocketFrame(fin = true, (false, false, false), OpCode.Pong, ByteVector.empty, None)
private val closeFrame = WebSocketFrame(fin = true, (false, false, false), OpCode.Close, ByteVector.empty, None)
/**
* Encodes incoming frames to wire format.
* @tparam F
* @return
*/
def encodeWebSocketFrame[F[_] : RaiseThrowable](flag: Boolean): Pipe[F, WebSocketFrame, Byte] = {
_.append(Stream.emit(closeFrame)).flatMap { wsf =>
WebSocketFrameCodec.codec.encode(wsf) match {
case Failure(err) => Stream.raiseError(new Throwable(s"Failed to encode websocket frame: $err (frame: $wsf)"))
case Successful(data) => Stream.chunk(ByteVectorChunk(data.bytes))
}
}
}
/**
* Creates control stream. When control stream terminates WebSocket will terminate too.
*
* This takes ping-pong stream, for each Ping, this responds with Pong.
* For each Pong received this zeroes number of pings sent.
*
* @param pingPongs Stream of ping pongs received
* @param metronome A metronome that emits time to send Ping
* @param maxUnanswered Max unanswered pings to await before the stream terminates.
* @tparam F
* @return
*/
def controlStream[F[_] : Concurrent](
pingPongs: Stream[F, PingPong]
, metronome: Stream[F, Unit]
, maxUnanswered: Int
, flag: Boolean
): Stream[F, WebSocketFrame] = {
(pingPongs either metronome)
.mapAccumulate(0) { case (pingsSent, in) => in match {
case Left(PingPong.Pong) => (0, Stream.empty)
case Left(PingPong.Ping) => (pingsSent, Stream.emit(pongFrame))
case Right(_) => (pingsSent + 1, Stream.emit(pingFrame))
}}
.flatMap { case (unconfirmed, out) =>
if (unconfirmed < 3) out
else Stream.raiseError(new Throwable(s"Maximum number of unconfirmed pings exceeded: $unconfirmed"))
}
}
val magic = ByteVector.view("258EAFA5-E914-47DA-95CA-C5AB0DC85B11".getBytes)
def computeFingerPrint(key: ByteVector): ByteVector =
(ByteVector.view(key.toBase64.getBytes) ++ magic).digest("SHA-1")
/**
* Augments header to be correct for Websocket request (adding Sec-WebSocket-Key header) and
* returnng the correct header with expected SHA-1 response from the server
* @param header
* @param random Random generator of 16 byte websocket keys
* @return
*/
def createRequestHeaders(header:HttpRequestHeader, random: => ByteVector = randomBytes(16)): (HttpRequestHeader, ByteVector) = {
val key = random
val headers =
header.headers.filterNot ( h =>
h.isInstanceOf[`Sec-WebSocket-Key`]
|| h.isInstanceOf[`Sec-WebSocket-Version`]
|| h.isInstanceOf[Upgrade]
) ++
List(
`Sec-WebSocket-Key`(key)
, `Sec-WebSocket-Version`(13)
, Connection(List("upgrade"))
, Upgrade(List(ProductDescription("websocket", None)))
)
header.copy(
method = HttpMethod.GET
, headers = headers
) -> computeFingerPrint(key)
}
/** random generator, ascii compatible **/
def randomBytes(size: Int):ByteVector = {
ByteVector.view(Random.alphanumeric.take(size).mkString.getBytes)
}
/**
* Validates response received. If other than 101 status code is received, this evaluates to Some()
* If fingerprint won't match or the websocket headers wont match the request, this fails.
* @param request Sent request header
* @param response received header
* @param expectFingerPrint expected fingerprint in header
* @return
*/
def validateResponse[F[_] : RaiseThrowable](
request: HttpRequestHeader
, response: HttpResponseHeader
, expectFingerPrint: ByteVector
): Stream[F, Option[HttpResponseHeader]] = {
import Stream._
def validateFingerPrint: Stream[F,Unit] =
response.headers.collectFirst {
case `Sec-WebSocket-Accept`(receivedFp) =>
if (receivedFp != expectFingerPrint) raiseError(new Throwable(s"Websocket fingerprints won't match, expected $expectFingerPrint, but got $receivedFp"))
else emit(())
}.getOrElse(raiseError(new Throwable(s"Websocket response is missing the `Sec-WebSocket-Accept` header : $response")))
def validateUpgrade: Stream[F,Unit] =
response.headers.collectFirst {
case Upgrade(pds) if pds.exists { pd => pd.name.equalsIgnoreCase("websocket") && pd.comment.isEmpty } => emit(())
}.getOrElse(raiseError(new Throwable(s"WebSocket response must contain header 'Upgrade: websocket' : $response")))
def validateConnection: Stream[F,Unit] =
response.headers.collectFirst {
case Connection(ids) if ids.exists(_.equalsIgnoreCase("upgrade")) => emit(())
}.getOrElse(raiseError(new Throwable(s"WebSocket response must contain header 'Connection: Upgrade' : $response")))
def validateProtocols: Stream[F,Unit] = {
val received =
response.headers.collectFirst {
case `Sec-WebSocket-Protocol`(protocols) => protocols
}.getOrElse(Nil)
val expected =
request.headers.collectFirst {
case `Sec-WebSocket-Protocol`(protocols) => protocols
}.getOrElse(Nil)
if (expected.diff(received).nonEmpty) raiseError(new Throwable(s"Websocket protocols do not match. Expected $expected, received: $received"))
else emit(())
}
def validateExtensions: Stream[F,Unit] = {
val received =
response.headers.collectFirst {
case `Sec-WebSocket-Extensions`(extensions) => extensions
}.getOrElse(Nil)
val expected =
request.headers.collectFirst {
case `Sec-WebSocket-Extensions`(extensions) => extensions
}.getOrElse(Nil)
if (expected.diff(received).nonEmpty) raiseError(new Throwable(s"Websocket extensions do not match. Expected $expected, received: $received"))
else emit(())
}
if (response.status != HttpStatusCode.SwitchingProtocols) emit(Some(response))
else {
for {
_ <- validateUpgrade
_ <- validateConnection
_ <- validateFingerPrint
_ <- validateProtocols
_ <- validateExtensions
} yield None: Option[HttpResponseHeader]
}
}
}
}
|
Spinoco/fs2-http
|
src/main/scala/spinoco/fs2/http/websocket/WebSocket.scala
|
Scala
|
mit
| 23,022
|
package com.twitter.finagle.filter
import com.twitter.finagle.Service
import com.twitter.util.{Future, Promise, Return}
import org.junit.runner.RunWith
import org.mockito.Matchers.anyObject
import org.mockito.Mockito.{when, verify}
import org.scalatest.FunSuite
import org.scalatest.junit.JUnitRunner
import org.scalatest.mock.MockitoSugar
@RunWith(classOf[JUnitRunner])
class MaskCancelFilterTest extends FunSuite with MockitoSugar {
trait MaskHelper {
val service = mock[Service[Int, Int]]
when(service.close(anyObject)).thenReturn(Future.Done)
val filter = new MaskCancelFilter[Int, Int]
val filtered = filter andThen service
val p = new Promise[Int] {
@volatile var interrupted: Option[Throwable] = None
setInterruptHandler { case exc => interrupted = Some(exc) }
}
when(service(1)).thenReturn(p)
val f = filtered(1)
verify(service).apply(1)
}
test("MaskCancelFilter should mask interrupts") {
new MaskHelper {
assert(p.interrupted === None)
f.raise(new Exception)
assert(p.interrupted === None)
}
}
test("MaskCancelFilter should propagate results") {
new MaskHelper {
assert(f.poll === None)
p.setValue(123)
assert(p.poll === Some(Return(123)))
}
}
}
|
JustinTulloss/finagle
|
finagle-core/src/test/scala/com/twitter/finagle/filter/MaskCancelFilterTest.scala
|
Scala
|
apache-2.0
| 1,274
|
/*start*/"a b c".split("\\\\s+").map(_.toUpperCase)/*end*/
//Array[String]
|
ilinum/intellij-scala
|
testdata/typeInference/expected/placeholder/Uppercase.scala
|
Scala
|
apache-2.0
| 72
|
package net.categoricaldata.category
import org.scalatest.FlatSpec
import org.scalatest.matchers.ShouldMatchers
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
import scala.math._
import net.categoricaldata.examples.Examples
import net.categoricaldata.ontology._
import net.categoricaldata.util.CustomMatchers
import net.tqft.toolkit.arithmetic.Factorial
@RunWith(classOf[JUnitRunner])
class LeftPushforwardTest extends FlatSpec with ShouldMatchers with CustomMatchers {
// NOTE to use the DSL, you need this line:
import net.categoricaldata.dsl.Sentences._
val DavidsFunkyFunction = Dataset(source = Examples.Chain(1),
onObjects = Map(
"V0" -> List("David", "Scott", "UC Berkeley", "MIT"),
"V1" -> List("1978", "Scott's birthyear", "1868", "1861")),
onMorphisms = Map(
"V0" --- "E01" --> "V1" -> Map(
"David" -> "1978",
"Scott" -> "Scott's birthyear",
"UC Berkeley" -> "1868",
"MIT" -> "1861")))
val DavidsFunkySet2 = Dataset(source = Examples.Chain(0),
onObjects = Map(
"V0" -> List("1978", "Scott's birthyear", "1868", "1861")),
onMorphisms = Map())
"__!" should "work with terminal functor on 'function'" in {
val shriek = Ontologies.morphismToTerminalObject(Examples.Chain(1)).__!(DavidsFunkyFunction)
shriek should beIsomorphicTo(DavidsFunkySet2)
}
"__!" should "preserve the initial dataset" in {
val FCM = Examples.FiniteCyclicMonoid(10, 7)
val F1 = Ontologies.terminalObject.findAllTranslationsTo(FCM).head
for (F <- List(F1)) {
F.__!(F.source.Datasets.initialObject) should beIsomorphicTo((F.target.Datasets.initialObject))
}
}
"__!" should "work reverse graph as expected" in {
val DavidsFunkyGraph = Dataset(source = Examples.Graph,
onObjects = Map(
"an edge" -> List("f", "g", "h", "i", "j"),
"a vertex" -> List("A", "B", "C", "D")),
onMorphisms = Map(
("an edge" --- "has as source" --> "a vertex") -> Map(
"f" -> "A",
"g" -> "A",
"h" -> "B",
"i" -> "A",
"j" -> "C"),
("an edge" --- "has as target" --> "a vertex") -> Map(
"f" -> "B",
"g" -> "B",
"h" -> "C",
"i" -> "C",
"j" -> "C")))
val DavidsFunkyGraphReversed = Dataset(source = Examples.Graph,
onObjects = Map(
"an edge" -> List("f", "g", "h", "i", "j"),
"a vertex" -> List("A", "B", "C", "D")),
onMorphisms = Map(
("an edge" --- "has as source" --> "a vertex") -> Map(
"f" -> "B",
"g" -> "B",
"h" -> "C",
"i" -> "C",
"j" -> "C"),
("an edge" --- "has as target" --> "a vertex") -> Map(
"f" -> "A",
"g" -> "A",
"h" -> "B",
"i" -> "A",
"j" -> "C")))
val LHS = Examples.ReverseGraph.__!(DavidsFunkyGraph)
val RHS = DavidsFunkyGraphReversed
LHS should beIsomorphicTo(RHS)
}
val DavidsFunkyFiniteCyclicMonoid = Dataset(
source = Examples.FiniteCyclicMonoid(2, 1),
onObjects = Map("an element" -> List("David", "Scott", "UC Berkeley", "MIT", "succDavid", "succScott", "succUC Berkeley", "succMIT")),
onMorphisms = Map("an element" --- "has as successor" --> "an element" -> Map(
"David" -> "succDavid",
"Scott" -> "succScott",
"UC Berkeley" -> "succUC Berkeley",
"MIT" -> "succMIT",
"succDavid" -> "succDavid",
"succScott" -> "succScott",
"succUC Berkeley" -> "succUC Berkeley",
"succMIT" -> "succMIT")))
val DavidsFunkySet1 = Dataset(source = Examples.Chain(0),
onObjects = Map(
"V0" -> List("David", "Scott", "UC Berkeley", "MIT")),
onMorphisms = Map())
"__!" should "properly convert a set to a step-and-hold FCM2_1" in {
val F = Ontologies.terminalObject.findAllTranslationsTo(Examples.FiniteCyclicMonoid(2, 1)).head
F.__!(DavidsFunkySet1) should beIsomorphicTo(DavidsFunkyFiniteCyclicMonoid)
}
val OneTwoThreePointed = Dataset(
source = Examples.PointedSets,
onObjects = Map(
"an element" -> List("a1", "b1", "b2", "c1", "c2", "c3"),
"a pointed set" -> List("a", "b", "c")),
onMorphisms = Map(
("an element" --- "is in" --> "a pointed set") -> Map(
"a1" -> "a",
"b1" -> "b",
"b2" -> "b",
"c1" -> "c",
"c2" -> "c",
"c3" -> "c"),
("a pointed set" --- "has as chosen" --> "an element") -> Map(
"a" -> "a1",
"b" -> "b1",
"c" -> "c1")))
val ThreeElementsIso = Dataset(
source = Examples.Isomorphism,
onObjects = Map(
"0" -> List("a", "b", "c"),
"1" -> List("a", "b", "c")),
onMorphisms = Map(
("0" --- "E01" --> "1") -> Map(
"a" -> "a",
"b" -> "b",
"c" -> "c"),
("1" --- "E10" --> "0") -> Map(
"a" -> "a",
"b" -> "b",
"c" -> "c")))
"__!" should "work with PointedSetsToIsomorphism" in {
val X = OneTwoThreePointed
val LHS = ThreeElementsIso
val RHS = Examples.PointedSetsToIsomorphism.__!(X)
LHS should beIsomorphicTo(RHS)
}
val FCM4_3 = Dataset(source = Examples.FiniteCyclicMonoid(4, 3),
onObjects = Map(
"an element" -> List("a", "b")),
onMorphisms = Map(
"an element" --- "has as successor" --> "an element" -> Map(
"a" -> "b",
"b" -> "b")))
val FCM4_3Times2LToFCM5_3 = Dataset(source = Examples.FiniteCyclicMonoid(5, 3),
onObjects = Map(
"an element" -> List("a1", "b1", "a2", "b2")),
onMorphisms = Map(
"an element" --- "has as successor" --> "an element" -> Map(
"a1" -> "a2",
"b1" -> "b2",
"a2" -> "b1",
"b2" -> "b1")))
"__!" should "provide a 'half-speed' FCM-thing" in {
val X = FCM4_3
val LHS = Examples.TranslationFiniteCyclicMonoids(4, 3, 5, 3, 2).__!(X)
val RHS = FCM4_3Times2LToFCM5_3
LHS should beIsomorphicTo(RHS)
}
}
|
JasonGross/categoricaldata
|
src/test/scala/net/categoricaldata/category/LeftPushforwardTest.scala
|
Scala
|
mit
| 5,986
|
package skuber.batch
import skuber.ResourceSpecification.{Names, Scope}
import skuber.{ObjectReference, LabelSelector, NonCoreResourceSpecification, ObjectMeta, ObjectResource, Pod, ResourceDefinition, Timestamp}
/**
* @author David O'Riordan
*/
case class CronJob(val kind: String ="CronJob",
override val apiVersion: String = batchAPIVersion,
val metadata: ObjectMeta = ObjectMeta(),
spec: Option[CronJob.Spec] = None,
status: Option[CronJob.Status] = None) extends ObjectResource
object CronJob {
implicit val cronjobDef = new ResourceDefinition[CronJob] {
def spec = NonCoreResourceSpecification(
group=Some("batch"),
version="v2alpha1",
scope = Scope.Namespaced,
names=Names(
plural = "cronjobs",
singular = "cronjob",
kind = "CronJob",
shortNames = Nil)
)
}
def apply(name: String) = new CronJob(metadata=ObjectMeta(name=name))
def apply(name: String, schedule: String, jobTemplateSpec: JobTemplate.Spec) =
new CronJob(metadata=ObjectMeta(name=name),spec=Some(Spec(schedule=schedule, jobTemplate = jobTemplateSpec)))
case class Spec(
schedule: String,
jobTemplate: JobTemplate.Spec,
startingDeadlineSeconds: Option[Long] = None,
concurrencyPolicy: Option[String] = None, // can be "Allow" (implied if None), "Forbid" or "Replace"
suspend: Option[Boolean] = None,
successfulJobsHistoryLimit: Option[Int] = None,
failedJobsHistoryLimit: Option[Int] = None)
case class Status(
lastScheduleTime: Option[Timestamp],
active: List[ObjectReference]
)
}
|
minatjanster/skuber
|
client/src/main/scala/skuber/batch/CronJob.scala
|
Scala
|
apache-2.0
| 1,588
|
/**
* Copyright (C) 2011 Orbeon, Inc.
*
* This program is free software; you can redistribute it and/or modify it under the terms of the
* GNU Lesser General Public License as published by the Free Software Foundation; either version
* 2.1 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Lesser General Public License for more details.
*
* The full text of the license is available at http://www.gnu.org/copyleft/lesser.html
*/
package org.orbeon.oxf.portlet.liferay
import java.{util ⇒ ju}
import javax.portlet.filter.PortletRequestWrapper
import javax.portlet.{PortletRequest, PortletSession}
import org.mockito.invocation.InvocationOnMock
import org.mockito.stubbing.Answer
import org.mockito.{Matchers, Mockito}
import org.orbeon.oxf.http.Headers
import org.orbeon.oxf.portlet.liferay.LiferayAPI.RoleFacade
import org.orbeon.oxf.test.ResourceManagerSupport
import org.scalatest.FunSpecLike
import org.scalatest.mockito.MockitoSugar
import scala.collection.JavaConverters._
import scala.collection.immutable.TreeMap
import scala.collection.mutable
class FormRunnerRequestFilterTest extends ResourceManagerSupport with FunSpecLike with MockitoSugar {
describe("The portlet filter's `amendRequest()` function") {
// Initial properties
val initialProperties = Map("p1" → List("v1a", "v1b"))
// Session
val sessionAttributes = mutable.Map[String, AnyRef]()
val mockSession = mock[PortletSession]
Mockito when mockSession.getAttribute(Matchers.anyString) thenAnswer new Answer[AnyRef] {
def answer(invocation: InvocationOnMock) =
sessionAttributes.get(invocation.getArguments()(0).asInstanceOf[String]).orNull
}
Mockito when mockSession.setAttribute(Matchers.anyString, Matchers.anyObject) thenAnswer new Answer[Unit] {
def answer(invocation: InvocationOnMock) =
sessionAttributes += invocation.getArguments()(0).asInstanceOf[String] → invocation.getArguments()(1)
}
// Request with initial properties
val mockRequest = new PortletRequestWrapper(mock[PortletRequest]) {
override def getProperty(name: String) = initialProperties.get(name) map (_.head) orNull
override def getProperties(name: String) =
(initialProperties.get(name) map (_.iterator) getOrElse Iterator.empty).asJavaEnumeration
override def getPropertyNames = initialProperties.keysIterator.asJavaEnumeration
override def getPortletSession = mockSession
override def getPortletSession(create: Boolean) = mockSession
}
class MyGroup {
def getGroupId = 42L
def getName = "universe"
def getDescriptiveName = getName
}
case class MyRole(getName: String) {
def getType = LiferayAPI.LiferayRegularRoleType.value
override def toString() = s"MyRole($getName)"
}
class MyUser {
def getUserId = 123L
def getScreenName = "jsmith"
def getFullName = "John Paul Smith"
def getFirstName = "John"
def getMiddleName = "Paul"
def getLastName = "Smith"
def getEmailAddress = "test@orbeon.com"
def getGroup = new MyGroup
def getRoles = ju.Arrays.asList(MyRole("manager"): RoleFacade, MyRole("employee"): RoleFacade)
}
class MyCompany {
def getAuthType = LiferayAPI.LiferayEmailAddressAuthType.name
}
import org.orbeon.oxf.portlet.liferay.FormRunnerAuthFilter._
val amendedRequest =
wrapWithOrbeonAuthHeaders(wrapWithLiferayUserHeaders(mockRequest, new LiferayUser {
override def userHeaders = LiferaySupport.userHeaders(new MyUser, new MyCompany, tests = true)
}))
val expectedProperties =
initialProperties ++ Map(
"orbeon-liferay-user-id" → List("123"),
"orbeon-liferay-user-screen-name" → List("jsmith"),
"orbeon-liferay-user-full-name" → List("John Paul Smith"),
"orbeon-liferay-user-first-name" → List("John"),
"orbeon-liferay-user-middle-name" → List("Paul"),
"orbeon-liferay-user-last-name" → List("Smith"),
"orbeon-liferay-user-email" → List("test@orbeon.com"),
"orbeon-liferay-user-group-id" → List("42"),
"orbeon-liferay-user-group-name" → List("universe"),
"orbeon-liferay-user-roles" → List("manager", "employee"),
Headers.OrbeonUsernameLower → List("test@orbeon.com"),
Headers.OrbeonGroupLower → List("universe"),
Headers.OrbeonRolesLower → List("manager", "employee"),
Headers.OrbeonCredentialsLower → List("""{"username":"test%40orbeon.com","groups":["universe"],"roles":[{"name":"manager"},{"name":"employee"}],"organizations":[]}""")
)
// NOTE: Don't use Array for comparison, because Array's == doesn't work as expected in Scala
val actualProperties =
amendedRequest.getPropertyNames.asScala map (n ⇒ n → amendedRequest.getProperties(n).asScala.toList) toMap
// Compare using TreeMap to get a reliable order
def toTreeMap[K, V](map: Map[K, V])(implicit ord: Ordering[K]) = TreeMap[K, V]() ++ map
it ("must set authentication headers based on incoming headers") {
assert(toTreeMap(expectedProperties) === toTreeMap(actualProperties))
}
}
}
|
brunobuzzi/orbeon-forms
|
form-runner/jvm/src/test/scala/org/orbeon/oxf/portlet/liferay/FormRunnerRequestFilterTest.scala
|
Scala
|
lgpl-2.1
| 5,533
|
/**
* Copyright 2011-2016 GatlingCorp (http://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.app
import io.gatling.app.cli.StatusCode
import io.gatling.charts.report.{ ReportsGenerationInputs, ReportsGenerator }
import io.gatling.charts.stats.LogFileReader
import io.gatling.commons.stats.assertion.{ AssertionResult, AssertionValidator }
import io.gatling.commons.util.TimeHelper._
import io.gatling.core.config.GatlingConfiguration
trait RunResultProcessor {
def processRunResult(runResult: RunResult): StatusCode
}
class LogFileProcessor(configuration: GatlingConfiguration) extends RunResultProcessor {
implicit val config = configuration
override def processRunResult(runResult: RunResult): StatusCode = {
val start = nowMillis
initLogFileReader(runResult) match {
case Some(reader) =>
val assertionResults = AssertionValidator.validateAssertions(reader)
if (reportsGenerationEnabled) {
val reportsGenerationInputs = ReportsGenerationInputs(runResult.runId, reader, assertionResults)
generateReports(reportsGenerationInputs, start)
}
runStatus(assertionResults)
case _ =>
StatusCode.Success
}
}
private def initLogFileReader(runResult: RunResult): Option[LogFileReader] =
if (reportsGenerationEnabled || runResult.hasAssertions)
Some(new LogFileReader(runResult.runId))
else
None
private def reportsGenerationEnabled =
configuration.core.directory.reportsOnly.isDefined || (configuration.data.fileDataWriterEnabled && !configuration.charting.noReports)
private def generateReports(reportsGenerationInputs: ReportsGenerationInputs, start: Long): Unit = {
println("Generating reports...")
val indexFile = new ReportsGenerator().generateFor(reportsGenerationInputs)
println(s"Reports generated in ${(nowMillis - start) / 1000}s.")
println(s"Please open the following file: ${indexFile.toFile}")
}
private def runStatus(assertionResults: List[AssertionResult]): StatusCode = {
val consolidatedAssertionResult = assertionResults.foldLeft(true) { (isValid, assertionResult) =>
println(s"${assertionResult.message} : ${assertionResult.result}")
isValid && assertionResult.result
}
if (consolidatedAssertionResult) StatusCode.Success
else StatusCode.AssertionsFailed
}
}
|
ryez/gatling
|
gatling-app/src/main/scala/io/gatling/app/RunResultProcessor.scala
|
Scala
|
apache-2.0
| 2,899
|
package iot.pood.management.security
import com.typesafe.config.ConfigFactory
import iot.pood.base.actors.BaseTest
import iot.pood.base.exception.Exceptions.IncorrectConfigurationException
import org.scalatest._
import scala.collection.mutable
/**
* Created by rafik on 12.10.2017.
*/
class SecurityConfigTest extends BaseTest {
"Security config" should "return correct configuration" in {
val config = ConfigFactory.parseString(
"""
|security {
| expiration = 2 seconds
| secret_key = "thisjusasodifsodifj"
| header = "HS256"
|}
""".stripMargin)
config.getConfig("security")
val securityConfig = SecurityConfig.securityConfig(config)
securityConfig shouldBe a[SecurityConfig]
securityConfig.expiration.toSeconds equals (2)
securityConfig.secretKey should ===("thisjusasodifsodifj")
}
it should "throw exception IncorrectConfigurationException" in {
val config = ConfigFactory.parseString(
"""
|security {
| expiration = 2 xxxx
| secret_key = "thisjusasodifsodifj"
| header = "HS256"
|}
""".stripMargin)
a[IncorrectConfigurationException] shouldBe thrownBy {
SecurityConfig.securityConfig(config)
}
}
}
|
rafajpet/iot-pood
|
iot-pood-management/src/test/scala/iot/pood/management/security/SecurityConfigTest.scala
|
Scala
|
mit
| 1,300
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.r
import org.apache.hadoop.fs.Path
import org.json4s.DefaultFormats
import org.json4s.jackson.JsonMethods._
import org.apache.spark.SparkException
import org.apache.spark.ml.util.MLReader
/**
* This is the Scala stub of SparkR read.ml. It will dispatch the call to corresponding
* model wrapper loading function according the class name extracted from rMetadata of the path.
*/
private[r] object RWrappers extends MLReader[Object] {
override def load(path: String): Object = {
implicit val format = DefaultFormats
val rMetadataPath = new Path(path, "rMetadata").toString
val rMetadataStr = sc.textFile(rMetadataPath, 1).first()
val rMetadata = parse(rMetadataStr)
val className = (rMetadata \\ "class").extract[String]
className match {
case "org.apache.spark.ml.r.NaiveBayesWrapper" => NaiveBayesWrapper.load(path)
case "org.apache.spark.ml.r.AFTSurvivalRegressionWrapper" =>
AFTSurvivalRegressionWrapper.load(path)
case "org.apache.spark.ml.r.GeneralizedLinearRegressionWrapper" =>
GeneralizedLinearRegressionWrapper.load(path)
case "org.apache.spark.ml.r.KMeansWrapper" =>
KMeansWrapper.load(path)
case "org.apache.spark.ml.r.MultilayerPerceptronClassifierWrapper" =>
MultilayerPerceptronClassifierWrapper.load(path)
case "org.apache.spark.ml.r.LDAWrapper" =>
LDAWrapper.load(path)
case "org.apache.spark.ml.r.IsotonicRegressionWrapper" =>
IsotonicRegressionWrapper.load(path)
case "org.apache.spark.ml.r.GaussianMixtureWrapper" =>
GaussianMixtureWrapper.load(path)
case "org.apache.spark.ml.r.ALSWrapper" =>
ALSWrapper.load(path)
case "org.apache.spark.ml.r.LogisticRegressionWrapper" =>
LogisticRegressionWrapper.load(path)
case "org.apache.spark.ml.r.RandomForestRegressorWrapper" =>
RandomForestRegressorWrapper.load(path)
case "org.apache.spark.ml.r.RandomForestClassifierWrapper" =>
RandomForestClassifierWrapper.load(path)
case "org.apache.spark.ml.r.DecisionTreeRegressorWrapper" =>
DecisionTreeRegressorWrapper.load(path)
case "org.apache.spark.ml.r.DecisionTreeClassifierWrapper" =>
DecisionTreeClassifierWrapper.load(path)
case "org.apache.spark.ml.r.GBTRegressorWrapper" =>
GBTRegressorWrapper.load(path)
case "org.apache.spark.ml.r.GBTClassifierWrapper" =>
GBTClassifierWrapper.load(path)
case "org.apache.spark.ml.r.BisectingKMeansWrapper" =>
BisectingKMeansWrapper.load(path)
case "org.apache.spark.ml.r.LinearSVCWrapper" =>
LinearSVCWrapper.load(path)
case "org.apache.spark.ml.r.FPGrowthWrapper" =>
FPGrowthWrapper.load(path)
case "org.apache.spark.ml.r.FMClassifierWrapper" =>
FMClassifierWrapper.load(path)
case "org.apache.spark.ml.r.LinearRegressionWrapper" =>
LinearRegressionWrapper.load(path)
case "org.apache.spark.ml.r.FMRegressorWrapper" =>
FMRegressorWrapper.load(path)
case _ =>
throw new SparkException(s"SparkR read.ml does not support load $className")
}
}
}
|
shaneknapp/spark
|
mllib/src/main/scala/org/apache/spark/ml/r/RWrappers.scala
|
Scala
|
apache-2.0
| 3,975
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.analysis
import java.sql.Timestamp
import org.apache.spark.sql.catalyst.analysis.TypeCoercion._
import org.apache.spark.sql.catalyst.dsl.expressions._
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.plans.PlanTest
import org.apache.spark.sql.catalyst.plans.logical._
import org.apache.spark.sql.catalyst.rules.{Rule, RuleExecutor}
import org.apache.spark.sql.types._
import org.apache.spark.unsafe.types.CalendarInterval
class TypeCoercionSuite extends PlanTest {
// scalastyle:off line.size.limit
// The following table shows all implicit data type conversions that are not visible to the user.
// +----------------------+----------+-----------+-------------+----------+------------+-----------+------------+------------+-------------+------------+----------+---------------+------------+----------+-------------+----------+----------------------+---------------------+-------------+--------------+
// | Source Type\\CAST TO | ByteType | ShortType | IntegerType | LongType | DoubleType | FloatType | Dec(10, 2) | BinaryType | BooleanType | StringType | DateType | TimestampType | ArrayType | MapType | StructType | NullType | CalendarIntervalType | DecimalType | NumericType | IntegralType |
// +----------------------+----------+-----------+-------------+----------+------------+-----------+------------+------------+-------------+------------+----------+---------------+------------+----------+-------------+----------+----------------------+---------------------+-------------+--------------+
// | ByteType | ByteType | ShortType | IntegerType | LongType | DoubleType | FloatType | Dec(10, 2) | X | X | StringType | X | X | X | X | X | X | X | DecimalType(3, 0) | ByteType | ByteType |
// | ShortType | ByteType | ShortType | IntegerType | LongType | DoubleType | FloatType | Dec(10, 2) | X | X | StringType | X | X | X | X | X | X | X | DecimalType(5, 0) | ShortType | ShortType |
// | IntegerType | ByteType | ShortType | IntegerType | LongType | DoubleType | FloatType | Dec(10, 2) | X | X | StringType | X | X | X | X | X | X | X | DecimalType(10, 0) | IntegerType | IntegerType |
// | LongType | ByteType | ShortType | IntegerType | LongType | DoubleType | FloatType | Dec(10, 2) | X | X | StringType | X | X | X | X | X | X | X | DecimalType(20, 0) | LongType | LongType |
// | DoubleType | ByteType | ShortType | IntegerType | LongType | DoubleType | FloatType | Dec(10, 2) | X | X | StringType | X | X | X | X | X | X | X | DecimalType(30, 15) | DoubleType | IntegerType |
// | FloatType | ByteType | ShortType | IntegerType | LongType | DoubleType | FloatType | Dec(10, 2) | X | X | StringType | X | X | X | X | X | X | X | DecimalType(14, 7) | FloatType | IntegerType |
// | Dec(10, 2) | ByteType | ShortType | IntegerType | LongType | DoubleType | FloatType | Dec(10, 2) | X | X | StringType | X | X | X | X | X | X | X | DecimalType(10, 2) | Dec(10, 2) | IntegerType |
// | BinaryType | X | X | X | X | X | X | X | BinaryType | X | StringType | X | X | X | X | X | X | X | X | X | X |
// | BooleanType | X | X | X | X | X | X | X | X | BooleanType | StringType | X | X | X | X | X | X | X | X | X | X |
// | StringType | ByteType | ShortType | IntegerType | LongType | DoubleType | FloatType | Dec(10, 2) | BinaryType | X | StringType | DateType | TimestampType | X | X | X | X | X | DecimalType(38, 18) | DoubleType | X |
// | DateType | X | X | X | X | X | X | X | X | X | StringType | DateType | TimestampType | X | X | X | X | X | X | X | X |
// | TimestampType | X | X | X | X | X | X | X | X | X | StringType | DateType | TimestampType | X | X | X | X | X | X | X | X |
// | ArrayType | X | X | X | X | X | X | X | X | X | X | X | X | ArrayType* | X | X | X | X | X | X | X |
// | MapType | X | X | X | X | X | X | X | X | X | X | X | X | X | MapType* | X | X | X | X | X | X |
// | StructType | X | X | X | X | X | X | X | X | X | X | X | X | X | X | StructType* | X | X | X | X | X |
// | NullType | ByteType | ShortType | IntegerType | LongType | DoubleType | FloatType | Dec(10, 2) | BinaryType | BooleanType | StringType | DateType | TimestampType | ArrayType | MapType | StructType | NullType | CalendarIntervalType | DecimalType(38, 18) | DoubleType | IntegerType |
// | CalendarIntervalType | X | X | X | X | X | X | X | X | X | X | X | X | X | X | X | X | CalendarIntervalType | X | X | X |
// +----------------------+----------+-----------+-------------+----------+------------+-----------+------------+------------+-------------+------------+----------+---------------+------------+----------+-------------+----------+----------------------+---------------------+-------------+--------------+
// Note: MapType*, StructType* are castable only when the internal child types also match; otherwise, not castable.
// Note: ArrayType* is castable when the element type is castable according to the table.
// scalastyle:on line.size.limit
private def shouldCast(from: DataType, to: AbstractDataType, expected: DataType): Unit = {
// Check default value
val castDefault = TypeCoercion.ImplicitTypeCasts.implicitCast(default(from), to)
assert(DataType.equalsIgnoreCompatibleNullability(
castDefault.map(_.dataType).getOrElse(null), expected),
s"Failed to cast $from to $to")
// Check null value
val castNull = TypeCoercion.ImplicitTypeCasts.implicitCast(createNull(from), to)
assert(DataType.equalsIgnoreCaseAndNullability(
castNull.map(_.dataType).getOrElse(null), expected),
s"Failed to cast $from to $to")
}
private def shouldNotCast(from: DataType, to: AbstractDataType): Unit = {
// Check default value
val castDefault = TypeCoercion.ImplicitTypeCasts.implicitCast(default(from), to)
assert(castDefault.isEmpty, s"Should not be able to cast $from to $to, but got $castDefault")
// Check null value
val castNull = TypeCoercion.ImplicitTypeCasts.implicitCast(createNull(from), to)
assert(castNull.isEmpty, s"Should not be able to cast $from to $to, but got $castNull")
}
private def default(dataType: DataType): Expression = dataType match {
case ArrayType(internalType: DataType, _) =>
CreateArray(Seq(Literal.default(internalType)))
case MapType(keyDataType: DataType, valueDataType: DataType, _) =>
CreateMap(Seq(Literal.default(keyDataType), Literal.default(valueDataType)))
case _ => Literal.default(dataType)
}
private def createNull(dataType: DataType): Expression = dataType match {
case ArrayType(internalType: DataType, _) =>
CreateArray(Seq(Literal.create(null, internalType)))
case MapType(keyDataType: DataType, valueDataType: DataType, _) =>
CreateMap(Seq(Literal.create(null, keyDataType), Literal.create(null, valueDataType)))
case _ => Literal.create(null, dataType)
}
val integralTypes: Seq[DataType] =
Seq(ByteType, ShortType, IntegerType, LongType)
val fractionalTypes: Seq[DataType] =
Seq(DoubleType, FloatType, DecimalType.SYSTEM_DEFAULT, DecimalType(10, 2))
val numericTypes: Seq[DataType] = integralTypes ++ fractionalTypes
val atomicTypes: Seq[DataType] =
numericTypes ++ Seq(BinaryType, BooleanType, StringType, DateType, TimestampType)
val complexTypes: Seq[DataType] =
Seq(ArrayType(IntegerType),
ArrayType(StringType),
MapType(StringType, StringType),
new StructType().add("a1", StringType),
new StructType().add("a1", StringType).add("a2", IntegerType))
val allTypes: Seq[DataType] =
atomicTypes ++ complexTypes ++ Seq(NullType, CalendarIntervalType)
// Check whether the type `checkedType` can be cast to all the types in `castableTypes`,
// but cannot be cast to the other types in `allTypes`.
private def checkTypeCasting(checkedType: DataType, castableTypes: Seq[DataType]): Unit = {
val nonCastableTypes = allTypes.filterNot(castableTypes.contains)
castableTypes.foreach { tpe =>
shouldCast(checkedType, tpe, tpe)
}
nonCastableTypes.foreach { tpe =>
shouldNotCast(checkedType, tpe)
}
}
private def checkWidenType(
widenFunc: (DataType, DataType) => Option[DataType],
t1: DataType,
t2: DataType,
expected: Option[DataType]): Unit = {
var found = widenFunc(t1, t2)
assert(found == expected,
s"Expected $expected as wider common type for $t1 and $t2, found $found")
// Test both directions to make sure the widening is symmetric.
found = widenFunc(t2, t1)
assert(found == expected,
s"Expected $expected as wider common type for $t2 and $t1, found $found")
}
test("implicit type cast - ByteType") {
val checkedType = ByteType
checkTypeCasting(checkedType, castableTypes = numericTypes ++ Seq(StringType))
shouldCast(checkedType, DecimalType, DecimalType.ByteDecimal)
shouldCast(checkedType, NumericType, checkedType)
shouldCast(checkedType, IntegralType, checkedType)
}
test("implicit type cast - ShortType") {
val checkedType = ShortType
checkTypeCasting(checkedType, castableTypes = numericTypes ++ Seq(StringType))
shouldCast(checkedType, DecimalType, DecimalType.ShortDecimal)
shouldCast(checkedType, NumericType, checkedType)
shouldCast(checkedType, IntegralType, checkedType)
}
test("implicit type cast - IntegerType") {
val checkedType = IntegerType
checkTypeCasting(checkedType, castableTypes = numericTypes ++ Seq(StringType))
shouldCast(IntegerType, DecimalType, DecimalType.IntDecimal)
shouldCast(checkedType, NumericType, checkedType)
shouldCast(checkedType, IntegralType, checkedType)
}
test("implicit type cast - LongType") {
val checkedType = LongType
checkTypeCasting(checkedType, castableTypes = numericTypes ++ Seq(StringType))
shouldCast(checkedType, DecimalType, DecimalType.LongDecimal)
shouldCast(checkedType, NumericType, checkedType)
shouldCast(checkedType, IntegralType, checkedType)
}
test("implicit type cast - FloatType") {
val checkedType = FloatType
checkTypeCasting(checkedType, castableTypes = numericTypes ++ Seq(StringType))
shouldCast(checkedType, DecimalType, DecimalType.FloatDecimal)
shouldCast(checkedType, NumericType, checkedType)
shouldNotCast(checkedType, IntegralType)
}
test("implicit type cast - DoubleType") {
val checkedType = DoubleType
checkTypeCasting(checkedType, castableTypes = numericTypes ++ Seq(StringType))
shouldCast(checkedType, DecimalType, DecimalType.DoubleDecimal)
shouldCast(checkedType, NumericType, checkedType)
shouldNotCast(checkedType, IntegralType)
}
test("implicit type cast - DecimalType(10, 2)") {
val checkedType = DecimalType(10, 2)
checkTypeCasting(checkedType, castableTypes = numericTypes ++ Seq(StringType))
shouldCast(checkedType, DecimalType, checkedType)
shouldCast(checkedType, NumericType, checkedType)
shouldNotCast(checkedType, IntegralType)
}
test("implicit type cast - BinaryType") {
val checkedType = BinaryType
checkTypeCasting(checkedType, castableTypes = Seq(checkedType, StringType))
shouldNotCast(checkedType, DecimalType)
shouldNotCast(checkedType, NumericType)
shouldNotCast(checkedType, IntegralType)
}
test("implicit type cast - BooleanType") {
val checkedType = BooleanType
checkTypeCasting(checkedType, castableTypes = Seq(checkedType, StringType))
shouldNotCast(checkedType, DecimalType)
shouldNotCast(checkedType, NumericType)
shouldNotCast(checkedType, IntegralType)
}
test("implicit type cast - StringType") {
val checkedType = StringType
val nonCastableTypes =
complexTypes ++ Seq(BooleanType, NullType, CalendarIntervalType)
checkTypeCasting(checkedType, castableTypes = allTypes.filterNot(nonCastableTypes.contains))
shouldCast(checkedType, DecimalType, DecimalType.SYSTEM_DEFAULT)
shouldCast(checkedType, NumericType, NumericType.defaultConcreteType)
shouldNotCast(checkedType, IntegralType)
}
test("implicit type cast - DateType") {
val checkedType = DateType
checkTypeCasting(checkedType, castableTypes = Seq(checkedType, StringType, TimestampType))
shouldNotCast(checkedType, DecimalType)
shouldNotCast(checkedType, NumericType)
shouldNotCast(checkedType, IntegralType)
}
test("implicit type cast - TimestampType") {
val checkedType = TimestampType
checkTypeCasting(checkedType, castableTypes = Seq(checkedType, StringType, DateType))
shouldNotCast(checkedType, DecimalType)
shouldNotCast(checkedType, NumericType)
shouldNotCast(checkedType, IntegralType)
}
test("implicit type cast - ArrayType(StringType)") {
val checkedType = ArrayType(StringType)
val nonCastableTypes =
complexTypes ++ Seq(BooleanType, NullType, CalendarIntervalType)
checkTypeCasting(checkedType,
castableTypes = allTypes.filterNot(nonCastableTypes.contains).map(ArrayType(_)))
nonCastableTypes.map(ArrayType(_)).foreach(shouldNotCast(checkedType, _))
shouldNotCast(ArrayType(DoubleType, containsNull = false),
ArrayType(LongType, containsNull = false))
shouldNotCast(checkedType, DecimalType)
shouldNotCast(checkedType, NumericType)
shouldNotCast(checkedType, IntegralType)
}
test("implicit type cast - MapType(StringType, StringType)") {
val checkedType = MapType(StringType, StringType)
checkTypeCasting(checkedType, castableTypes = Seq(checkedType))
shouldNotCast(checkedType, DecimalType)
shouldNotCast(checkedType, NumericType)
shouldNotCast(checkedType, IntegralType)
}
test("implicit type cast - StructType().add(\\"a1\\", StringType)") {
val checkedType = new StructType().add("a1", StringType)
checkTypeCasting(checkedType, castableTypes = Seq(checkedType))
shouldNotCast(checkedType, DecimalType)
shouldNotCast(checkedType, NumericType)
shouldNotCast(checkedType, IntegralType)
}
test("implicit type cast - NullType") {
val checkedType = NullType
checkTypeCasting(checkedType, castableTypes = allTypes)
shouldCast(checkedType, DecimalType, DecimalType.SYSTEM_DEFAULT)
shouldCast(checkedType, NumericType, NumericType.defaultConcreteType)
shouldCast(checkedType, IntegralType, IntegralType.defaultConcreteType)
}
test("implicit type cast - CalendarIntervalType") {
val checkedType = CalendarIntervalType
checkTypeCasting(checkedType, castableTypes = Seq(checkedType))
shouldNotCast(checkedType, DecimalType)
shouldNotCast(checkedType, NumericType)
shouldNotCast(checkedType, IntegralType)
}
test("eligible implicit type cast - TypeCollection") {
shouldCast(NullType, TypeCollection(StringType, BinaryType), StringType)
shouldCast(StringType, TypeCollection(StringType, BinaryType), StringType)
shouldCast(BinaryType, TypeCollection(StringType, BinaryType), BinaryType)
shouldCast(StringType, TypeCollection(BinaryType, StringType), StringType)
shouldCast(IntegerType, TypeCollection(IntegerType, BinaryType), IntegerType)
shouldCast(IntegerType, TypeCollection(BinaryType, IntegerType), IntegerType)
shouldCast(BinaryType, TypeCollection(BinaryType, IntegerType), BinaryType)
shouldCast(BinaryType, TypeCollection(IntegerType, BinaryType), BinaryType)
shouldCast(IntegerType, TypeCollection(StringType, BinaryType), StringType)
shouldCast(IntegerType, TypeCollection(BinaryType, StringType), StringType)
shouldCast(DecimalType.SYSTEM_DEFAULT,
TypeCollection(IntegerType, DecimalType), DecimalType.SYSTEM_DEFAULT)
shouldCast(DecimalType(10, 2), TypeCollection(IntegerType, DecimalType), DecimalType(10, 2))
shouldCast(DecimalType(10, 2), TypeCollection(DecimalType, IntegerType), DecimalType(10, 2))
shouldCast(IntegerType, TypeCollection(DecimalType(10, 2), StringType), DecimalType(10, 2))
shouldCast(StringType, TypeCollection(NumericType, BinaryType), DoubleType)
shouldCast(
ArrayType(StringType, false),
TypeCollection(ArrayType(StringType), StringType),
ArrayType(StringType, false))
shouldCast(
ArrayType(StringType, true),
TypeCollection(ArrayType(StringType), StringType),
ArrayType(StringType, true))
}
test("ineligible implicit type cast - TypeCollection") {
shouldNotCast(IntegerType, TypeCollection(DateType, TimestampType))
}
test("tightest common bound for types") {
def widenTest(t1: DataType, t2: DataType, expected: Option[DataType]): Unit =
checkWidenType(TypeCoercion.findTightestCommonType, t1, t2, expected)
// Null
widenTest(NullType, NullType, Some(NullType))
// Boolean
widenTest(NullType, BooleanType, Some(BooleanType))
widenTest(BooleanType, BooleanType, Some(BooleanType))
widenTest(IntegerType, BooleanType, None)
widenTest(LongType, BooleanType, None)
// Integral
widenTest(NullType, ByteType, Some(ByteType))
widenTest(NullType, IntegerType, Some(IntegerType))
widenTest(NullType, LongType, Some(LongType))
widenTest(ShortType, IntegerType, Some(IntegerType))
widenTest(ShortType, LongType, Some(LongType))
widenTest(IntegerType, LongType, Some(LongType))
widenTest(LongType, LongType, Some(LongType))
// Floating point
widenTest(NullType, FloatType, Some(FloatType))
widenTest(NullType, DoubleType, Some(DoubleType))
widenTest(FloatType, DoubleType, Some(DoubleType))
widenTest(FloatType, FloatType, Some(FloatType))
widenTest(DoubleType, DoubleType, Some(DoubleType))
// Integral mixed with floating point.
widenTest(IntegerType, FloatType, Some(FloatType))
widenTest(IntegerType, DoubleType, Some(DoubleType))
widenTest(IntegerType, DoubleType, Some(DoubleType))
widenTest(LongType, FloatType, Some(FloatType))
widenTest(LongType, DoubleType, Some(DoubleType))
// No up-casting for fixed-precision decimal (this is handled by arithmetic rules)
widenTest(DecimalType(2, 1), DecimalType(3, 2), None)
widenTest(DecimalType(2, 1), DoubleType, None)
widenTest(DecimalType(2, 1), IntegerType, None)
widenTest(DoubleType, DecimalType(2, 1), None)
// StringType
widenTest(NullType, StringType, Some(StringType))
widenTest(StringType, StringType, Some(StringType))
widenTest(IntegerType, StringType, None)
widenTest(LongType, StringType, None)
// TimestampType
widenTest(NullType, TimestampType, Some(TimestampType))
widenTest(TimestampType, TimestampType, Some(TimestampType))
widenTest(DateType, TimestampType, Some(TimestampType))
widenTest(IntegerType, TimestampType, None)
widenTest(StringType, TimestampType, None)
// ComplexType
widenTest(NullType,
MapType(IntegerType, StringType, false),
Some(MapType(IntegerType, StringType, false)))
widenTest(NullType, StructType(Seq()), Some(StructType(Seq())))
widenTest(StringType, MapType(IntegerType, StringType, true), None)
widenTest(ArrayType(IntegerType), StructType(Seq()), None)
}
test("wider common type for decimal and array") {
def widenTestWithStringPromotion(
t1: DataType,
t2: DataType,
expected: Option[DataType]): Unit = {
checkWidenType(TypeCoercion.findWiderTypeForTwo, t1, t2, expected)
}
def widenTestWithoutStringPromotion(
t1: DataType,
t2: DataType,
expected: Option[DataType]): Unit = {
checkWidenType(TypeCoercion.findWiderTypeWithoutStringPromotionForTwo, t1, t2, expected)
}
// Decimal
widenTestWithStringPromotion(
DecimalType(2, 1), DecimalType(3, 2), Some(DecimalType(3, 2)))
widenTestWithStringPromotion(
DecimalType(2, 1), DoubleType, Some(DoubleType))
widenTestWithStringPromotion(
DecimalType(2, 1), IntegerType, Some(DecimalType(11, 1)))
widenTestWithStringPromotion(
DecimalType(2, 1), LongType, Some(DecimalType(21, 1)))
// ArrayType
widenTestWithStringPromotion(
ArrayType(ShortType, containsNull = true),
ArrayType(DoubleType, containsNull = false),
Some(ArrayType(DoubleType, containsNull = true)))
widenTestWithStringPromotion(
ArrayType(TimestampType, containsNull = false),
ArrayType(StringType, containsNull = true),
Some(ArrayType(StringType, containsNull = true)))
widenTestWithStringPromotion(
ArrayType(ArrayType(IntegerType), containsNull = false),
ArrayType(ArrayType(LongType), containsNull = false),
Some(ArrayType(ArrayType(LongType), containsNull = false)))
// Without string promotion
widenTestWithoutStringPromotion(IntegerType, StringType, None)
widenTestWithoutStringPromotion(StringType, TimestampType, None)
widenTestWithoutStringPromotion(ArrayType(LongType), ArrayType(StringType), None)
widenTestWithoutStringPromotion(ArrayType(StringType), ArrayType(TimestampType), None)
// String promotion
widenTestWithStringPromotion(IntegerType, StringType, Some(StringType))
widenTestWithStringPromotion(StringType, TimestampType, Some(StringType))
widenTestWithStringPromotion(
ArrayType(LongType), ArrayType(StringType), Some(ArrayType(StringType)))
widenTestWithStringPromotion(
ArrayType(StringType), ArrayType(TimestampType), Some(ArrayType(StringType)))
}
private def ruleTest(rule: Rule[LogicalPlan], initial: Expression, transformed: Expression) {
ruleTest(Seq(rule), initial, transformed)
}
private def ruleTest(
rules: Seq[Rule[LogicalPlan]],
initial: Expression,
transformed: Expression): Unit = {
val testRelation = LocalRelation(AttributeReference("a", IntegerType)())
val analyzer = new RuleExecutor[LogicalPlan] {
override val batches = Seq(Batch("Resolution", FixedPoint(3), rules: _*))
}
comparePlans(
analyzer.execute(Project(Seq(Alias(initial, "a")()), testRelation)),
Project(Seq(Alias(transformed, "a")()), testRelation))
}
test("cast NullType for expressions that implement ExpectsInputTypes") {
import TypeCoercionSuite._
ruleTest(TypeCoercion.ImplicitTypeCasts,
AnyTypeUnaryExpression(Literal.create(null, NullType)),
AnyTypeUnaryExpression(Literal.create(null, NullType)))
ruleTest(TypeCoercion.ImplicitTypeCasts,
NumericTypeUnaryExpression(Literal.create(null, NullType)),
NumericTypeUnaryExpression(Literal.create(null, DoubleType)))
}
test("cast NullType for binary operators") {
import TypeCoercionSuite._
ruleTest(TypeCoercion.ImplicitTypeCasts,
AnyTypeBinaryOperator(Literal.create(null, NullType), Literal.create(null, NullType)),
AnyTypeBinaryOperator(Literal.create(null, NullType), Literal.create(null, NullType)))
ruleTest(TypeCoercion.ImplicitTypeCasts,
NumericTypeBinaryOperator(Literal.create(null, NullType), Literal.create(null, NullType)),
NumericTypeBinaryOperator(Literal.create(null, DoubleType), Literal.create(null, DoubleType)))
}
test("coalesce casts") {
ruleTest(TypeCoercion.FunctionArgumentConversion,
Coalesce(Literal(1.0)
:: Literal(1)
:: Literal.create(1.0, FloatType)
:: Nil),
Coalesce(Cast(Literal(1.0), DoubleType)
:: Cast(Literal(1), DoubleType)
:: Cast(Literal.create(1.0, FloatType), DoubleType)
:: Nil))
ruleTest(TypeCoercion.FunctionArgumentConversion,
Coalesce(Literal(1L)
:: Literal(1)
:: Literal(new java.math.BigDecimal("1000000000000000000000"))
:: Nil),
Coalesce(Cast(Literal(1L), DecimalType(22, 0))
:: Cast(Literal(1), DecimalType(22, 0))
:: Cast(Literal(new java.math.BigDecimal("1000000000000000000000")), DecimalType(22, 0))
:: Nil))
}
test("CreateArray casts") {
ruleTest(TypeCoercion.FunctionArgumentConversion,
CreateArray(Literal(1.0)
:: Literal(1)
:: Literal.create(1.0, FloatType)
:: Nil),
CreateArray(Cast(Literal(1.0), DoubleType)
:: Cast(Literal(1), DoubleType)
:: Cast(Literal.create(1.0, FloatType), DoubleType)
:: Nil))
ruleTest(TypeCoercion.FunctionArgumentConversion,
CreateArray(Literal(1.0)
:: Literal(1)
:: Literal("a")
:: Nil),
CreateArray(Cast(Literal(1.0), StringType)
:: Cast(Literal(1), StringType)
:: Cast(Literal("a"), StringType)
:: Nil))
ruleTest(TypeCoercion.FunctionArgumentConversion,
CreateArray(Literal.create(null, DecimalType(5, 3))
:: Literal(1)
:: Nil),
CreateArray(Literal.create(null, DecimalType(5, 3)).cast(DecimalType(13, 3))
:: Literal(1).cast(DecimalType(13, 3))
:: Nil))
ruleTest(TypeCoercion.FunctionArgumentConversion,
CreateArray(Literal.create(null, DecimalType(5, 3))
:: Literal.create(null, DecimalType(22, 10))
:: Literal.create(null, DecimalType(38, 38))
:: Nil),
CreateArray(Literal.create(null, DecimalType(5, 3)).cast(DecimalType(38, 38))
:: Literal.create(null, DecimalType(22, 10)).cast(DecimalType(38, 38))
:: Literal.create(null, DecimalType(38, 38)).cast(DecimalType(38, 38))
:: Nil))
}
test("CreateMap casts") {
// type coercion for map keys
ruleTest(TypeCoercion.FunctionArgumentConversion,
CreateMap(Literal(1)
:: Literal("a")
:: Literal.create(2.0, FloatType)
:: Literal("b")
:: Nil),
CreateMap(Cast(Literal(1), FloatType)
:: Literal("a")
:: Cast(Literal.create(2.0, FloatType), FloatType)
:: Literal("b")
:: Nil))
ruleTest(TypeCoercion.FunctionArgumentConversion,
CreateMap(Literal.create(null, DecimalType(5, 3))
:: Literal("a")
:: Literal.create(2.0, FloatType)
:: Literal("b")
:: Nil),
CreateMap(Literal.create(null, DecimalType(5, 3)).cast(DoubleType)
:: Literal("a")
:: Literal.create(2.0, FloatType).cast(DoubleType)
:: Literal("b")
:: Nil))
// type coercion for map values
ruleTest(TypeCoercion.FunctionArgumentConversion,
CreateMap(Literal(1)
:: Literal("a")
:: Literal(2)
:: Literal(3.0)
:: Nil),
CreateMap(Literal(1)
:: Cast(Literal("a"), StringType)
:: Literal(2)
:: Cast(Literal(3.0), StringType)
:: Nil))
ruleTest(TypeCoercion.FunctionArgumentConversion,
CreateMap(Literal(1)
:: Literal.create(null, DecimalType(38, 0))
:: Literal(2)
:: Literal.create(null, DecimalType(38, 38))
:: Nil),
CreateMap(Literal(1)
:: Literal.create(null, DecimalType(38, 0)).cast(DecimalType(38, 38))
:: Literal(2)
:: Literal.create(null, DecimalType(38, 38)).cast(DecimalType(38, 38))
:: Nil))
// type coercion for both map keys and values
ruleTest(TypeCoercion.FunctionArgumentConversion,
CreateMap(Literal(1)
:: Literal("a")
:: Literal(2.0)
:: Literal(3.0)
:: Nil),
CreateMap(Cast(Literal(1), DoubleType)
:: Cast(Literal("a"), StringType)
:: Cast(Literal(2.0), DoubleType)
:: Cast(Literal(3.0), StringType)
:: Nil))
}
test("greatest/least cast") {
for (operator <- Seq[(Seq[Expression] => Expression)](Greatest, Least)) {
ruleTest(TypeCoercion.FunctionArgumentConversion,
operator(Literal(1.0)
:: Literal(1)
:: Literal.create(1.0, FloatType)
:: Nil),
operator(Cast(Literal(1.0), DoubleType)
:: Cast(Literal(1), DoubleType)
:: Cast(Literal.create(1.0, FloatType), DoubleType)
:: Nil))
ruleTest(TypeCoercion.FunctionArgumentConversion,
operator(Literal(1L)
:: Literal(1)
:: Literal(new java.math.BigDecimal("1000000000000000000000"))
:: Nil),
operator(Cast(Literal(1L), DecimalType(22, 0))
:: Cast(Literal(1), DecimalType(22, 0))
:: Cast(Literal(new java.math.BigDecimal("1000000000000000000000")), DecimalType(22, 0))
:: Nil))
ruleTest(TypeCoercion.FunctionArgumentConversion,
operator(Literal(1.0)
:: Literal.create(null, DecimalType(10, 5))
:: Literal(1)
:: Nil),
operator(Literal(1.0).cast(DoubleType)
:: Literal.create(null, DecimalType(10, 5)).cast(DoubleType)
:: Literal(1).cast(DoubleType)
:: Nil))
ruleTest(TypeCoercion.FunctionArgumentConversion,
operator(Literal.create(null, DecimalType(15, 0))
:: Literal.create(null, DecimalType(10, 5))
:: Literal(1)
:: Nil),
operator(Literal.create(null, DecimalType(15, 0)).cast(DecimalType(20, 5))
:: Literal.create(null, DecimalType(10, 5)).cast(DecimalType(20, 5))
:: Literal(1).cast(DecimalType(20, 5))
:: Nil))
ruleTest(TypeCoercion.FunctionArgumentConversion,
operator(Literal.create(2L, LongType)
:: Literal(1)
:: Literal.create(null, DecimalType(10, 5))
:: Nil),
operator(Literal.create(2L, LongType).cast(DecimalType(25, 5))
:: Literal(1).cast(DecimalType(25, 5))
:: Literal.create(null, DecimalType(10, 5)).cast(DecimalType(25, 5))
:: Nil))
}
}
test("nanvl casts") {
ruleTest(TypeCoercion.FunctionArgumentConversion,
NaNvl(Literal.create(1.0, FloatType), Literal.create(1.0, DoubleType)),
NaNvl(Cast(Literal.create(1.0, FloatType), DoubleType), Literal.create(1.0, DoubleType)))
ruleTest(TypeCoercion.FunctionArgumentConversion,
NaNvl(Literal.create(1.0, DoubleType), Literal.create(1.0, FloatType)),
NaNvl(Literal.create(1.0, DoubleType), Cast(Literal.create(1.0, FloatType), DoubleType)))
ruleTest(TypeCoercion.FunctionArgumentConversion,
NaNvl(Literal.create(1.0, DoubleType), Literal.create(1.0, DoubleType)),
NaNvl(Literal.create(1.0, DoubleType), Literal.create(1.0, DoubleType)))
}
test("type coercion for If") {
val rule = TypeCoercion.IfCoercion
ruleTest(rule,
If(Literal(true), Literal(1), Literal(1L)),
If(Literal(true), Cast(Literal(1), LongType), Literal(1L)))
ruleTest(rule,
If(Literal.create(null, NullType), Literal(1), Literal(1)),
If(Literal.create(null, BooleanType), Literal(1), Literal(1)))
ruleTest(rule,
If(AssertTrue(Literal.create(true, BooleanType)), Literal(1), Literal(2)),
If(Cast(AssertTrue(Literal.create(true, BooleanType)), BooleanType), Literal(1), Literal(2)))
ruleTest(rule,
If(AssertTrue(Literal.create(false, BooleanType)), Literal(1), Literal(2)),
If(Cast(AssertTrue(Literal.create(false, BooleanType)), BooleanType), Literal(1), Literal(2)))
}
test("type coercion for CaseKeyWhen") {
ruleTest(TypeCoercion.ImplicitTypeCasts,
CaseKeyWhen(Literal(1.toShort), Seq(Literal(1), Literal("a"))),
CaseKeyWhen(Cast(Literal(1.toShort), IntegerType), Seq(Literal(1), Literal("a")))
)
ruleTest(TypeCoercion.CaseWhenCoercion,
CaseKeyWhen(Literal(true), Seq(Literal(1), Literal("a"))),
CaseKeyWhen(Literal(true), Seq(Literal(1), Literal("a")))
)
ruleTest(TypeCoercion.CaseWhenCoercion,
CaseWhen(Seq((Literal(true), Literal(1.2))), Literal.create(1, DecimalType(7, 2))),
CaseWhen(Seq((Literal(true), Literal(1.2))),
Cast(Literal.create(1, DecimalType(7, 2)), DoubleType))
)
ruleTest(TypeCoercion.CaseWhenCoercion,
CaseWhen(Seq((Literal(true), Literal(100L))), Literal.create(1, DecimalType(7, 2))),
CaseWhen(Seq((Literal(true), Cast(Literal(100L), DecimalType(22, 2)))),
Cast(Literal.create(1, DecimalType(7, 2)), DecimalType(22, 2)))
)
}
test("BooleanEquality type cast") {
val be = TypeCoercion.BooleanEquality
// Use something more than a literal to avoid triggering the simplification rules.
val one = Add(Literal(Decimal(1)), Literal(Decimal(0)))
ruleTest(be,
EqualTo(Literal(true), one),
EqualTo(Cast(Literal(true), one.dataType), one)
)
ruleTest(be,
EqualTo(one, Literal(true)),
EqualTo(one, Cast(Literal(true), one.dataType))
)
ruleTest(be,
EqualNullSafe(Literal(true), one),
EqualNullSafe(Cast(Literal(true), one.dataType), one)
)
ruleTest(be,
EqualNullSafe(one, Literal(true)),
EqualNullSafe(one, Cast(Literal(true), one.dataType))
)
}
test("BooleanEquality simplification") {
val be = TypeCoercion.BooleanEquality
ruleTest(be,
EqualTo(Literal(true), Literal(1)),
Literal(true)
)
ruleTest(be,
EqualTo(Literal(true), Literal(0)),
Not(Literal(true))
)
ruleTest(be,
EqualNullSafe(Literal(true), Literal(1)),
And(IsNotNull(Literal(true)), Literal(true))
)
ruleTest(be,
EqualNullSafe(Literal(true), Literal(0)),
And(IsNotNull(Literal(true)), Not(Literal(true)))
)
ruleTest(be,
EqualTo(Literal(true), Literal(1L)),
Literal(true)
)
ruleTest(be,
EqualTo(Literal(new java.math.BigDecimal(1)), Literal(true)),
Literal(true)
)
ruleTest(be,
EqualTo(Literal(BigDecimal(0)), Literal(true)),
Not(Literal(true))
)
ruleTest(be,
EqualTo(Literal(Decimal(1)), Literal(true)),
Literal(true)
)
ruleTest(be,
EqualTo(Literal.create(Decimal(1), DecimalType(8, 0)), Literal(true)),
Literal(true)
)
}
private def checkOutput(logical: LogicalPlan, expectTypes: Seq[DataType]): Unit = {
logical.output.zip(expectTypes).foreach { case (attr, dt) =>
assert(attr.dataType === dt)
}
}
test("WidenSetOperationTypes for except and intersect") {
val firstTable = LocalRelation(
AttributeReference("i", IntegerType)(),
AttributeReference("u", DecimalType.SYSTEM_DEFAULT)(),
AttributeReference("b", ByteType)(),
AttributeReference("d", DoubleType)())
val secondTable = LocalRelation(
AttributeReference("s", StringType)(),
AttributeReference("d", DecimalType(2, 1))(),
AttributeReference("f", FloatType)(),
AttributeReference("l", LongType)())
val wt = TypeCoercion.WidenSetOperationTypes
val expectedTypes = Seq(StringType, DecimalType.SYSTEM_DEFAULT, FloatType, DoubleType)
val r1 = wt(Except(firstTable, secondTable)).asInstanceOf[Except]
val r2 = wt(Intersect(firstTable, secondTable)).asInstanceOf[Intersect]
checkOutput(r1.left, expectedTypes)
checkOutput(r1.right, expectedTypes)
checkOutput(r2.left, expectedTypes)
checkOutput(r2.right, expectedTypes)
// Check if a Project is added
assert(r1.left.isInstanceOf[Project])
assert(r1.right.isInstanceOf[Project])
assert(r2.left.isInstanceOf[Project])
assert(r2.right.isInstanceOf[Project])
}
test("WidenSetOperationTypes for union") {
val firstTable = LocalRelation(
AttributeReference("i", IntegerType)(),
AttributeReference("u", DecimalType.SYSTEM_DEFAULT)(),
AttributeReference("b", ByteType)(),
AttributeReference("d", DoubleType)())
val secondTable = LocalRelation(
AttributeReference("s", StringType)(),
AttributeReference("d", DecimalType(2, 1))(),
AttributeReference("f", FloatType)(),
AttributeReference("l", LongType)())
val thirdTable = LocalRelation(
AttributeReference("m", StringType)(),
AttributeReference("n", DecimalType.SYSTEM_DEFAULT)(),
AttributeReference("p", FloatType)(),
AttributeReference("q", DoubleType)())
val forthTable = LocalRelation(
AttributeReference("m", StringType)(),
AttributeReference("n", DecimalType.SYSTEM_DEFAULT)(),
AttributeReference("p", ByteType)(),
AttributeReference("q", DoubleType)())
val wt = TypeCoercion.WidenSetOperationTypes
val expectedTypes = Seq(StringType, DecimalType.SYSTEM_DEFAULT, FloatType, DoubleType)
val unionRelation = wt(
Union(firstTable :: secondTable :: thirdTable :: forthTable :: Nil)).asInstanceOf[Union]
assert(unionRelation.children.length == 4)
checkOutput(unionRelation.children.head, expectedTypes)
checkOutput(unionRelation.children(1), expectedTypes)
checkOutput(unionRelation.children(2), expectedTypes)
checkOutput(unionRelation.children(3), expectedTypes)
assert(unionRelation.children.head.isInstanceOf[Project])
assert(unionRelation.children(1).isInstanceOf[Project])
assert(unionRelation.children(2).isInstanceOf[Project])
assert(unionRelation.children(3).isInstanceOf[Project])
}
test("Transform Decimal precision/scale for union except and intersect") {
def checkOutput(logical: LogicalPlan, expectTypes: Seq[DataType]): Unit = {
logical.output.zip(expectTypes).foreach { case (attr, dt) =>
assert(attr.dataType === dt)
}
}
val dp = TypeCoercion.WidenSetOperationTypes
val left1 = LocalRelation(
AttributeReference("l", DecimalType(10, 8))())
val right1 = LocalRelation(
AttributeReference("r", DecimalType(5, 5))())
val expectedType1 = Seq(DecimalType(10, 8))
val r1 = dp(Union(left1, right1)).asInstanceOf[Union]
val r2 = dp(Except(left1, right1)).asInstanceOf[Except]
val r3 = dp(Intersect(left1, right1)).asInstanceOf[Intersect]
checkOutput(r1.children.head, expectedType1)
checkOutput(r1.children.last, expectedType1)
checkOutput(r2.left, expectedType1)
checkOutput(r2.right, expectedType1)
checkOutput(r3.left, expectedType1)
checkOutput(r3.right, expectedType1)
val plan1 = LocalRelation(AttributeReference("l", DecimalType(10, 5))())
val rightTypes = Seq(ByteType, ShortType, IntegerType, LongType, FloatType, DoubleType)
val expectedTypes = Seq(DecimalType(10, 5), DecimalType(10, 5), DecimalType(15, 5),
DecimalType(25, 5), DoubleType, DoubleType)
rightTypes.zip(expectedTypes).foreach { case (rType, expectedType) =>
val plan2 = LocalRelation(
AttributeReference("r", rType)())
val r1 = dp(Union(plan1, plan2)).asInstanceOf[Union]
val r2 = dp(Except(plan1, plan2)).asInstanceOf[Except]
val r3 = dp(Intersect(plan1, plan2)).asInstanceOf[Intersect]
checkOutput(r1.children.last, Seq(expectedType))
checkOutput(r2.right, Seq(expectedType))
checkOutput(r3.right, Seq(expectedType))
val r4 = dp(Union(plan2, plan1)).asInstanceOf[Union]
val r5 = dp(Except(plan2, plan1)).asInstanceOf[Except]
val r6 = dp(Intersect(plan2, plan1)).asInstanceOf[Intersect]
checkOutput(r4.children.last, Seq(expectedType))
checkOutput(r5.left, Seq(expectedType))
checkOutput(r6.left, Seq(expectedType))
}
}
test("rule for date/timestamp operations") {
val dateTimeOperations = TypeCoercion.DateTimeOperations
val date = Literal(new java.sql.Date(0L))
val timestamp = Literal(new Timestamp(0L))
val interval = Literal(new CalendarInterval(0, 0))
val str = Literal("2015-01-01")
ruleTest(dateTimeOperations, Add(date, interval), Cast(TimeAdd(date, interval), DateType))
ruleTest(dateTimeOperations, Add(interval, date), Cast(TimeAdd(date, interval), DateType))
ruleTest(dateTimeOperations, Add(timestamp, interval),
Cast(TimeAdd(timestamp, interval), TimestampType))
ruleTest(dateTimeOperations, Add(interval, timestamp),
Cast(TimeAdd(timestamp, interval), TimestampType))
ruleTest(dateTimeOperations, Add(str, interval), Cast(TimeAdd(str, interval), StringType))
ruleTest(dateTimeOperations, Add(interval, str), Cast(TimeAdd(str, interval), StringType))
ruleTest(dateTimeOperations, Subtract(date, interval), Cast(TimeSub(date, interval), DateType))
ruleTest(dateTimeOperations, Subtract(timestamp, interval),
Cast(TimeSub(timestamp, interval), TimestampType))
ruleTest(dateTimeOperations, Subtract(str, interval), Cast(TimeSub(str, interval), StringType))
// interval operations should not be effected
ruleTest(dateTimeOperations, Add(interval, interval), Add(interval, interval))
ruleTest(dateTimeOperations, Subtract(interval, interval), Subtract(interval, interval))
}
/**
* There are rules that need to not fire before child expressions get resolved.
* We use this test to make sure those rules do not fire early.
*/
test("make sure rules do not fire early") {
// InConversion
val inConversion = TypeCoercion.InConversion
ruleTest(inConversion,
In(UnresolvedAttribute("a"), Seq(Literal(1))),
In(UnresolvedAttribute("a"), Seq(Literal(1)))
)
ruleTest(inConversion,
In(Literal("test"), Seq(UnresolvedAttribute("a"), Literal(1))),
In(Literal("test"), Seq(UnresolvedAttribute("a"), Literal(1)))
)
ruleTest(inConversion,
In(Literal("a"), Seq(Literal(1), Literal("b"))),
In(Cast(Literal("a"), StringType),
Seq(Cast(Literal(1), StringType), Cast(Literal("b"), StringType)))
)
}
test("SPARK-15776 Divide expression's dataType should be casted to Double or Decimal " +
"in aggregation function like sum") {
val rules = Seq(FunctionArgumentConversion, Division)
// Casts Integer to Double
ruleTest(rules, sum(Divide(4, 3)), sum(Divide(Cast(4, DoubleType), Cast(3, DoubleType))))
// Left expression is Double, right expression is Int. Another rule ImplicitTypeCasts will
// cast the right expression to Double.
ruleTest(rules, sum(Divide(4.0, 3)), sum(Divide(4.0, 3)))
// Left expression is Int, right expression is Double
ruleTest(rules, sum(Divide(4, 3.0)), sum(Divide(Cast(4, DoubleType), Cast(3.0, DoubleType))))
// Casts Float to Double
ruleTest(
rules,
sum(Divide(4.0f, 3)),
sum(Divide(Cast(4.0f, DoubleType), Cast(3, DoubleType))))
// Left expression is Decimal, right expression is Int. Another rule DecimalPrecision will cast
// the right expression to Decimal.
ruleTest(rules, sum(Divide(Decimal(4.0), 3)), sum(Divide(Decimal(4.0), 3)))
}
test("SPARK-17117 null type coercion in divide") {
val rules = Seq(FunctionArgumentConversion, Division, ImplicitTypeCasts)
val nullLit = Literal.create(null, NullType)
ruleTest(rules, Divide(1L, nullLit), Divide(Cast(1L, DoubleType), Cast(nullLit, DoubleType)))
ruleTest(rules, Divide(nullLit, 1L), Divide(Cast(nullLit, DoubleType), Cast(1L, DoubleType)))
}
test("binary comparison with string promotion") {
ruleTest(PromoteStrings,
GreaterThan(Literal("123"), Literal(1)),
GreaterThan(Cast(Literal("123"), IntegerType), Literal(1)))
ruleTest(PromoteStrings,
LessThan(Literal(true), Literal("123")),
LessThan(Literal(true), Cast(Literal("123"), BooleanType)))
ruleTest(PromoteStrings,
EqualTo(Literal(Array(1, 2)), Literal("123")),
EqualTo(Literal(Array(1, 2)), Literal("123")))
}
}
object TypeCoercionSuite {
case class AnyTypeUnaryExpression(child: Expression)
extends UnaryExpression with ExpectsInputTypes with Unevaluable {
override def inputTypes: Seq[AbstractDataType] = Seq(AnyDataType)
override def dataType: DataType = NullType
}
case class NumericTypeUnaryExpression(child: Expression)
extends UnaryExpression with ExpectsInputTypes with Unevaluable {
override def inputTypes: Seq[AbstractDataType] = Seq(NumericType)
override def dataType: DataType = NullType
}
case class AnyTypeBinaryOperator(left: Expression, right: Expression)
extends BinaryOperator with Unevaluable {
override def dataType: DataType = NullType
override def inputType: AbstractDataType = AnyDataType
override def symbol: String = "anytype"
}
case class NumericTypeBinaryOperator(left: Expression, right: Expression)
extends BinaryOperator with Unevaluable {
override def dataType: DataType = NullType
override def inputType: AbstractDataType = NumericType
override def symbol: String = "numerictype"
}
}
|
jianran/spark
|
sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/TypeCoercionSuite.scala
|
Scala
|
apache-2.0
| 47,236
|
/*******************************************************************************
* (C) Copyright 2015 ADP, LLC.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
package unicorn.search
import unicorn._, json._
import unicorn.core.Document
import unicorn.store.Dataset
import smile.nlp.relevance.BM25
/**
* @author Haifeng Li
*/
class TextSearch(storage: Dataset, numTexts: Long) extends TextIndex {
val pagerank = new Document("unicorn.text.corpus.text.page_rank", "text_index").from(storage)
val defaultPageRank = math.log(0.85 / numTexts)
val textLength = new Document(TextBodyLengthKey, TextIndexFamily).from(storage)
val titleLength = new Document(TextTitleLengthKey, TextIndexFamily).from(storage)
val anchorLength = new Document(TextAnchorLengthKey, TextIndexFamily).from(storage)
/**
* Relevance ranking algorithm.
*/
val ranker = new BM25
/**
* Search terms in corpus. The results are sorted by relevance.
*/
def search(terms: String*): Array[((Document, String), Double)] = {
val rank = scala.collection.mutable.Map[(Document, String), Double]().withDefaultValue(0.0)
terms.foreach { term => search(term, rank) }
rank.toArray.sortBy(_._2).reverse
}
def search(term: String, rank: scala.collection.mutable.Map[(Document, String), Double]) {
val lower = term.toLowerCase
val word = stemmer match {
case Some(stemmer) => stemmer.stem(lower)
case None => lower
}
val key = word + TermIndexSuffix
val invertedText = new Document(word + TermIndexSuffix, TextIndexFamily).from(storage).loadAttributes
if (invertedText.attributes.size == 0) return
val invertedTitle = new Document(word + TermTitleIndexSuffix, TextIndexFamily).from(storage).loadAttributes
val invertedAnchor = new Document(word + TermAnchorIndexSuffix, TextIndexFamily).from(storage).loadAttributes
val docs = (invertedText.map { case (docField, value) => docField }).toSeq
textLength.select(docs: _*)
titleLength.select(docs: _*)
anchorLength.select(docs: _*)
var avgTextLength = 0.0
var avgTitleLength = 0.0
var avgAnchorLength = 0.0
var numMatchedTexts = 0
var numMatchedTitles = 0
var numMatchedAnchors = 0
invertedText.foreach { case (docField, value) =>
val n1: Int = textLength(docField)
if (n1 > 0) {
numMatchedTexts += 1
avgTextLength += n1
}
val n2: Int = titleLength(docField)
if (n2 > 0) {
numMatchedTitles += 1
avgTitleLength += n2
}
val n3: Int = anchorLength(docField)
if (n3 > 0) {
numMatchedAnchors += 1
avgAnchorLength += n3
}
}
if (numMatchedTexts > 0) avgTextLength /= numMatchedTexts
if (numMatchedTitles > 0) avgTitleLength /= numMatchedTitles
if (numMatchedAnchors > 0) avgAnchorLength /= numMatchedAnchors
pagerank.select(invertedText.map { case (docField, _) => docField }.toArray : _*)
invertedText.foreach { case (docField, value) =>
val id = docField.split(DocFieldSeparator, 2)
if (id.length == 2) {
val doc = Document(id(0)).from(storage)
val field = id(1).replace(DocFieldSeparator, Document.FieldSeparator)
val termFreq: Int = value
val titleTermFreq: Int = invertedTitle(docField)
val anchorTermFreq: Int = invertedAnchor(docField)
val bm25 = ranker.score(termFreq, textLength(docField), avgTextLength,
titleTermFreq, titleLength(docField), avgTitleLength,
anchorTermFreq, anchorLength(docField), avgAnchorLength,
numTexts, invertedText.size)
val pr = pagerank(docField) match {
case JsDouble(value) => math.log(value)
case _ => defaultPageRank
}
rank((doc, field)) += (bm25 + pr)
}
}
}
}
object TextSearch {
def apply(storage: Dataset, numTexts: Long): TextSearch = {
new TextSearch(storage, numTexts)
}
}
|
adplabs/unicorn
|
search/src/main/scala/unicorn/search/TextSearch.scala
|
Scala
|
apache-2.0
| 4,669
|
package com.productfoundry.akka.cqrs
import akka.actor.{Actor, ActorLogging, ActorSystem, Props}
import akka.event.Logging
import akka.pattern.ask
import akka.util.Timeout
import scala.concurrent.{Await, ExecutionContext}
/**
* Collects all commits published on the system event stream.
*
* Can be used to check which commits are persisted by aggregates under test.
*
* @param system test actor system.
*/
case class LocalCommitCollector(actorName: String = "CommitCollector")(implicit system: ActorSystem) {
/**
* Actor to handle messages from the system event stream and collect commits.
*/
class CollectorActor extends Actor with ActorLogging {
override def receive: Receive = receiveCommits(Vector.empty)
def receiveCommits(commits: Vector[Commit]): Receive = {
case commit: Commit =>
context.become(receiveCommits(commits :+ commit))
case DumpCommits =>
dumpCommits(commits, Logging.ErrorLevel)
case GetCommitsRequest =>
sender ! GetCommitsResponse(commits)
case message =>
log.error("Unexpected: {}", message)
}
/**
* Subscribe to the system event stream.
*/
override def preStart(): Unit = {
system.eventStream.subscribe(self, classOf[Commit])
super.preStart()
}
/**
* Unsubscribe from the event stream.
*/
override def postStop(): Unit = {
system.eventStream.unsubscribe(self)
super.postStop()
}
/**
* Log all collected commits.
*
* @param level the logging level.
*/
def dumpCommits(commits: Vector[Commit], level: Logging.LogLevel): Unit = {
if (log.isEnabled(level)) {
val commitsWithIndex = commits.zipWithIndex
val commitLines = commitsWithIndex.map { case (commit, i) => s" ${i + 1}. $commit\\n" }
log.log(level, s"${commitLines.size} Commits collected\\n\\n${commitLines.mkString}\\n")
}
}
}
case object DumpCommits
case object GetCommitsRequest
case class GetCommitsResponse(commits: Vector[Commit])
/**
* Reference to the commit collector.
*/
val ref = system.actorOf(Props(new CollectorActor), actorName)
/**
* Tells the commit collector to dump all commits.
*/
def dumpCommits(): Unit = {
ref ! DumpCommits
}
def eventRecords(implicit ec: ExecutionContext, timeout: Timeout): Vector[AggregateEventRecord] = {
val res = ref ? GetCommitsRequest collect {
case GetCommitsResponse(commits) => commits.flatMap(_.records)
}
Await.result(res, timeout.duration)
}
/**
* @return a view of all the committed events extracted from the commits.
*/
def events(implicit ec: ExecutionContext, timeout: Timeout): Vector[AggregateEvent] = {
eventRecords.map(_.event)
}
}
|
Product-Foundry/akka-cqrs
|
test/src/main/scala/com/productfoundry/akka/cqrs/LocalCommitCollector.scala
|
Scala
|
apache-2.0
| 2,804
|
package scala.virtualization.lms
package epfl
package test7
import common._
import test1._
import util.OverloadHack
import scala.reflect.SourceContext
import java.io.{PrintWriter,StringWriter,FileOutputStream}
import scala.reflect.SourceContext
trait ScalaGenFatArrayLoopsFusionOpt extends ScalaGenArrayLoopsFat with ScalaGenIfThenElseFat with LoopFusionOpt {
val IR: ArrayLoopsFatExp with IfThenElseFatExp
import IR._
override def unapplySimpleIndex(e: Def[Any]) = e match {
case ArrayIndex(a, i) => Some((a,i))
case _ => super.unapplySimpleIndex(e)
}
override def unapplySimpleDomain(e: Def[Int]): Option[Exp[Any]] = e match {
case ArrayLength(a) => Some(a)
case _ => super.unapplySimpleDomain(e)
}
override def unapplySimpleCollect(e: Def[Any]) = e match {
case ArrayElem(Block(a)) => Some(a) //TODO: block??
case _ => super.unapplySimpleCollect(e)
}
override def unapplySimpleCollectIf(e: Def[Any]) = e match {
case ArrayIfElem(c,Block(a)) => Some((a,List(c))) //TODO: block?
case _ => super.unapplySimpleCollectIf(e)
}
override def applyAddCondition(e: Def[Any], c: List[Exp[Boolean]]) = e match { //TODO: should c be list or not?
case ArrayElem(a) if c.length == 1 => ArrayIfElem(c(0),a)
case ReduceElem(a) if c.length == 1 => ReduceIfElem(c(0),a)
case _ => super.applyAddCondition(e,c)
}
}
// trait NestLambdaProg extends Arith with Functions with Print
// --> from TestCodeMotion.scala
trait FusionProg extends Arith with ArrayLoops with Print {
implicit def bla(x: Rep[Int]): Rep[Double] = x.asInstanceOf[Rep[Double]]
def test(x: Rep[Unit]) = {
val constant = array(100) { i => 1 }
val linear = array(100) { i => 2*i }
val affine = array(100) { i => constant.at(i) + linear.at(i) }
def square(x: Rep[Double]) = x*x
def mean(x: Rep[Array[Double]]) = sum(x.length) { i => x.at(i) } / x.length
def variance(x: Rep[Array[Double]]) = sum(x.length) { i => square(x.at(i)) } / x.length - square(mean(x))
val data = affine
val m = mean(data)
val v = variance(data)
print(m)
print(v)
}
}
trait FusionProg2 extends Arith with ArrayLoops with Print with OrderingOps {
implicit def bla(x: Rep[Int]): Rep[Double] = x.asInstanceOf[Rep[Double]]
def test(x: Rep[Unit]) = {
def filter[T:Manifest](x: Rep[Array[T]])(p: Rep[T] => Rep[Boolean]) =
arrayIf(x.length) { i => (p(x.at(i)), x.at(i)) }
val range = array(100) { i => i }
val odds = filter(range) { z => z > 50 }
val res = sum(odds.length) { i => odds.at(i) }
print(res)
}
}
/*
some thoughts on cse/gvn :
- currently cse works fine for first-order, point-free things:
val x = a + b
val y = a + b
will always be represented internally as
val x = a + b
val y = x
- if bound variables are involved, cse no longer works:
val a = array { i => 0 }
val b = array { i => 0 }
will create two separate objects:
val a = array { i0 => 0 }
val b = array { i1 => 0 }
the same holds for lambdas.
- this is due to the choice of representing bound vars using fresh symbols.
alternatively we could use DeBruijn indices.
however, some care would have to be taken in managing the indices:
val a = array { i =>
val b = array { j => f(j) }
sum(b)
}
code motion will move b out of a ... but we know that only after looking at b's body
- for now this is not really a problem because loop fusion will take
care of duplicate loops (effectively lifting scalar cse to array cse)
- another solution (as done by delite) is to wrap array { i => 0 }
as ArrayZero(len) extends DeliteOP(array(len) { i => 0}).
here, cse will be done on the case class representation
*/
class TestFusion extends FileDiffSuite {
val prefix = home + "test-out/epfl/test7-"
def testFusion1 = {
withOutFile(prefix+"fusion1") {
new FusionProg with ArithExp with ArrayLoopsExp with PrintExp { self =>
val codegen = new ScalaGenArrayLoops with ScalaGenArith with ScalaGenPrint { val IR: self.type = self }
codegen.emitSource(test, "Test", new PrintWriter(System.out))
}
}
assertFileEqualsCheck(prefix+"fusion1")
}
def testFusion2 = {
withOutFile(prefix+"fusion2") {
// LoopsExp2 with ArithExp with PrintExp with BaseFatExp
new FusionProg with ArithExp with ArrayLoopsFatExp with IfThenElseFatExp with PrintExp { self =>
override val verbosity = 1
val codegen = new ScalaGenFatArrayLoopsFusionOpt with ScalaGenArith with ScalaGenPrint { val IR: self.type = self }
codegen.emitSource(test, "Test", new PrintWriter(System.out))
}
}
assertFileEqualsCheck(prefix+"fusion2")
}
def testFusion3 = {
withOutFile(prefix+"fusion3") {
new FusionProg2 with ArithExp with ArrayLoopsFatExp with IfThenElseFatExp with PrintExp with IfThenElseExp with OrderingOpsExp { self =>
override val verbosity = 1
val codegen = new ScalaGenFatArrayLoopsFusionOpt with ScalaGenArith with ScalaGenPrint
with ScalaGenIfThenElse with ScalaGenOrderingOps { val IR: self.type = self;
override def shouldApplyFusion(currentScope: List[Stm])(result: List[Exp[Any]]): Boolean = false }
codegen.emitSource(test, "Test", new PrintWriter(System.out))
}
}
assertFileEqualsCheck(prefix+"fusion3")
}
def testFusion4 = {
withOutFile(prefix+"fusion4") {
new FusionProg2 with ArithExp with ArrayLoopsFatExp with IfThenElseFatExp with PrintExp with IfThenElseExp with OrderingOpsExp { self =>
override val verbosity = 1
val codegen = new ScalaGenFatArrayLoopsFusionOpt with ScalaGenArith with ScalaGenPrint
with ScalaGenIfThenElse with ScalaGenOrderingOps { val IR: self.type = self;
override def shouldApplyFusion(currentScope: List[Stm])(result: List[Exp[Any]]): Boolean = true }
codegen.emitSource(test, "Test", new PrintWriter(System.out))
}
}
assertFileEqualsCheck(prefix+"fusion4")
}
}
|
afernandez90/virtualization-lms-core
|
test-src/epfl/test7-analysis/TestFusion.scala
|
Scala
|
bsd-3-clause
| 6,254
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.producer
import java.util.Properties
// A base producer used whenever we need to have options for both old and new producers;
// this class will be removed once we fully rolled out 0.9
trait BaseProducer {
def send(topic: String, key: Array[Byte], value: Array[Byte])
def close()
}
class NewShinyProducer(producerProps: Properties) extends BaseProducer {
import org.apache.kafka.clients.producer.{KafkaProducer, ProducerConfig, ProducerRecord}
import org.apache.kafka.clients.producer.internals.ErrorLoggingCallback
// decide whether to send synchronously based on producer properties
val sync = producerProps.getProperty("producer.type", "async").equals("sync")
val producer = new KafkaProducer[Array[Byte],Array[Byte]](producerProps)
override def send(topic: String, key: Array[Byte], value: Array[Byte]) {
val record = new ProducerRecord[Array[Byte],Array[Byte]](topic, key, value)
if(sync) {
this.producer.send(record).get()
} else {
this.producer.send(record,
new ErrorLoggingCallback(topic, key, value, false))
}
}
override def close() {
this.producer.close()
}
}
class OldProducer(producerProps: Properties) extends BaseProducer {
import kafka.producer.{KeyedMessage, ProducerConfig}
// default to byte array partitioner
if (producerProps.getProperty("partitioner.class") == null)
producerProps.setProperty("partitioner.class", classOf[kafka.producer.ByteArrayPartitioner].getName)
val producer = new kafka.producer.Producer[Array[Byte], Array[Byte]](new ProducerConfig(producerProps))
override def send(topic: String, key: Array[Byte], value: Array[Byte]) {
this.producer.send(new KeyedMessage[Array[Byte], Array[Byte]](topic, key, value))
}
override def close() {
this.producer.close()
}
}
|
cran/rkafkajars
|
java/kafka/producer/BaseProducer.scala
|
Scala
|
apache-2.0
| 2,618
|
package io.koff.generator
import akka.actor.ActorSystem
import akka.util.Timeout
import io.koff.services.{SimpleService, SimpleServiceImpl}
import scala.concurrent.Await
import scala.concurrent.duration._
import scala.language.postfixOps
object GeneratorMain {
implicit val timeout = Timeout(5 seconds)
def main(args: Array[String]) {
val system = ActorSystem("akka-system")
val impl = new SimpleServiceImpl
val service = ActorGenerator.gen[SimpleService](system, impl)
val result = Await.result(service.hello("scala"), 10 seconds)
println(s"result: $result")
}
}
|
coffius/macro-actors
|
src/main/scala/io/koff/generator/GeneratorMain.scala
|
Scala
|
mit
| 594
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.bwsw.sj.engine.output.benchmark
import java.util.logging.LogManager
import com.bwsw.sj.engine.output.OutputTaskRunner
/**
* @author Kseniya Tomskikh
*/
object SjOutputModuleRunner extends App {
LogManager.getLogManager.reset()
OutputTaskRunner.main(Array())
}
class SjOutputModuleRunner
|
bwsw/sj-platform
|
core/sj-output-streaming-engine/src/test/scala/com/bwsw/sj/engine/output/benchmark/SjOutputModuleRunner.scala
|
Scala
|
apache-2.0
| 1,119
|
/**
* Copyright (c) 2013-2015 Patrick Nicolas - Scala for Machine Learning - All rights reserved
*
* The source code in this file is provided by the author for the sole purpose of illustrating the
* concepts and algorithms presented in "Scala for Machine Learning". It should not be used to build commercial applications. ISBN: 978-1-783355-874-2 Packt Publishing.
* Unless required by applicable law or agreed to in writing, software is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*
* Version 0.98
*/
package org.scalaml.app.chap8
import org.scalaml.supervised.svm.{SVMConfig, SVM}
import org.scalaml.supervised.svm.formulation.CSVCFormulation
import org.scalaml.supervised.svm.kernel.RbfKernel
import org.scalaml.core.XTSeries
import org.scalaml.core.Types.ScalaMl
import org.scalaml.util.{FormatUtils, DisplayUtils}
import org.scalaml.app.Eval
/**
* <p>Singleton to evaluate the impact of margin value on =
* on the accuracy of the classification of a binary support vector
* classifier using synthetic features. The synthetic values are generated
* using a combination of random generators. </p>
* @author Patrick Nicolas
* @note Scala for Machine Learning Chapter 8 Kernel models and support vector machines.
*/
object SVCMarginEval extends Eval {
import scala.util.{Random, Try, Success, Failure}
import org.apache.log4j.Logger
import XTSeries._, ScalaMl._
/**
* Name of the evaluation
*/
val name: String = "SVCMarginEval"
private val GAMMA = 0.8
val N = 100
private var status: Int = 0
/** <p>Execution of the scalatest for evaluating margin in <b>SVC</b> class.
* This method is invoked by the actor-based test framework function, ScalaMlTest.evaluate<br>
* Main evaluation routine that consists of two steps:<br>
* Generation of synthetic features<br>
* Computation of the margin for a specific C penalty value</p>
* @param args array of arguments used in the test
* @return -1 in case error a positive or null value if the test succeeds.
*/
def run(args: Array[String]): Int = {
DisplayUtils.show(s"$header Evaluation of impact of C penalty on margin", logger)
val values = generate
Try {
Range(0, 50).foreach(i => evalMargin(values._1, values._2, i*0.1))
status
} match {
case Success(status) => status
case Failure(e) => failureHandler(e)
}
}
private def generate: (DblMatrix, DblVector) = {
val z = Array.tabulate(N)(i =>{
val ri = i*(1.0 + 0.2*Random.nextDouble)
Array[Double](i, ri)
}) ++
Array.tabulate(N)(i => Array[Double](i, i*Random.nextDouble))
(z, Array.fill(N)(1) ++ Array.fill(N)(-1))
}
private def evalMargin(features: DblMatrix, lbl: DblVector, c: Double): Int = {
val config = SVMConfig(new CSVCFormulation(c), new RbfKernel(GAMMA))
val svc = SVM[Double](config, XTSeries[DblVector](features), lbl)
DisplayUtils.show(s"\\n$name Margin for SVC with\\nC\\tMargin", logger)
svc.margin.map(_margin => {
val margin_str = FormatUtils.format(_margin, "", FormatUtils.ShortFormat)
DisplayUtils.show(s"\\n${c.floor}\\t${margin_str}", logger)
})
.getOrElse(DisplayUtils.error(s"$name CSVC Formulation training failed", logger))
}
}
// --------------------------- EOF --------------------------------------------------
|
batermj/algorithm-challenger
|
books/cs/machine-learning/scala-for-machine-learning/1rst-edition/original-src-from-the-book/src/test/scala/org/scalaml/app/chap8/SVCMarginEval.scala
|
Scala
|
apache-2.0
| 3,357
|
/*
* Copyright 2013 Julian Peeters
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package artisanal.pickle.maker
package tags
import scala.reflect.internal.pickling._
case class AnnotatedTpe(currentPosition: Position, valueMember: ValueMember, annotInfo: AnnotInfo) {
def write(myPickleBuffer: PickleBuffer) = {
val argsA = List(valueMember.typeRefPosition, currentPosition.current + 1)
val argsB = List(valueMember.typeRefPosition, annotInfo.position)
annotInfo.position match {
case 0 => {
//tag
myPickleBuffer.writeByte(42)
//len
myPickleBuffer.writeNat(2 + argsA.filter(arg => arg > 127).length)
//data {
//reference to the next entry, EXTMODCLASSref
myPickleBuffer.writeNat(valueMember.typeRefPosition)
//reference to
myPickleBuffer.writeNat(currentPosition.current + 1)
}
case i: Int => {
//tag
myPickleBuffer.writeByte(42)
//len
myPickleBuffer.writeNat(2 + argsB.filter(arg => arg > 127).length)
//data {
//reference to the next entry, EXTMODCLASSref
myPickleBuffer.writeNat(valueMember.typeRefPosition)
//reference to
myPickleBuffer.writeNat(annotInfo.position)
//}
}
}
currentPosition.current += 1
}
}
|
julianpeeters/artisanal-pickle-maker
|
src/main/scala/tags/ANNOTATEDtpe.scala
|
Scala
|
apache-2.0
| 1,868
|
/***********************************************************************
* Copyright (c) 2013-2015 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0 which
* accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
*************************************************************************/
package org.locationtech.geomesa.accumulo.index
import org.geotools.data.Query
import org.geotools.factory.CommonFactoryFinder
import org.geotools.filter.text.ecql.ECQL
import org.junit.runner.RunWith
import org.locationtech.geomesa.CURRENT_SCHEMA_VERSION
import org.locationtech.geomesa.accumulo.filter.TestFilters._
import org.locationtech.geomesa.accumulo.index.Strategy.StrategyType
import org.locationtech.geomesa.accumulo.util.SftBuilder
import org.locationtech.geomesa.accumulo.util.SftBuilder.Opts
import org.locationtech.geomesa.utils.geotools.RichSimpleFeatureType.RichSimpleFeatureType
import org.locationtech.geomesa.utils.geotools.SimpleFeatureTypes
import org.locationtech.geomesa.utils.stats.Cardinality
import org.opengis.filter.Filter
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
import scala.collection.JavaConversions._
import scala.reflect.ClassTag
//Expand the test - https://geomesa.atlassian.net/browse/GEOMESA-308
@RunWith(classOf[JUnitRunner])
class QueryStrategyDeciderTest extends Specification {
val sftIndex = new SftBuilder()
.intType("id")
.point("geom", default = true)
.date("dtg", default = true)
.stringType("attr1")
.stringType("attr2", index = true)
.stringType("high", Opts(index = true, cardinality = Cardinality.HIGH))
.stringType("low", Opts(index = true, cardinality = Cardinality.LOW))
.date("dtgNonIdx")
.build("feature")
val sftNonIndex = new SftBuilder()
.intType("id")
.point("geom", default = true)
.date("dtg", default = true)
.stringType("attr1")
.stringType("attr2")
.build("featureNonIndex")
def getStrategy(filterString: String, version: Int = CURRENT_SCHEMA_VERSION): Strategy = {
val sft = if (version > 0) sftIndex else sftNonIndex
sft.setSchemaVersion(version)
val filter = ECQL.toFilter(filterString)
val hints = new UserDataStrategyHints()
val query = new Query(sft.getTypeName)
query.setFilter(filter)
val strats = QueryStrategyDecider.chooseStrategies(sft, query, hints, None)
strats must haveLength(1)
strats.head
}
def getStrategyT[T <: Strategy](filterString: String, ct: ClassTag[T]) =
getStrategy(filterString) must beAnInstanceOf[T](ct)
def getRecordStrategy(filterString: String) =
getStrategyT(filterString, ClassTag(classOf[RecordIdxStrategy]))
def getStStrategy(filterString: String) =
getStrategyT(filterString, ClassTag(classOf[STIdxStrategy]))
def getAttributeIdxStrategy(filterString: String) =
getStrategyT(filterString, ClassTag(classOf[AttributeIdxStrategy]))
def getZ3Strategy(filterString: String) =
getStrategyT(filterString, ClassTag(classOf[Z3IdxStrategy]))
"Good spatial predicates" should {
"get the stidx strategy" in {
forall(goodSpatialPredicates){ getStStrategy }
}
}
"Attribute filters" should {
"get the attribute equals strategy" in {
getAttributeIdxStrategy("attr2 = 'val56'")
}
"get the record strategy for non indexed attributes" in {
getRecordStrategy("attr1 = 'val56'")
}
"get the attribute likes strategy" in {
val fs = "attr2 ILIKE '2nd1%'"
getStrategy(fs) must beAnInstanceOf[AttributeIdxStrategy]
}
"get the record strategy if attribute non-indexed" in {
getRecordStrategy("attr1 ILIKE '2nd1%'")
}
"get the attribute strategy for lte" in {
val fs = "attr2 <= 11"
getStrategy(fs) must beAnInstanceOf[AttributeIdxStrategy]
}
"get the attribute strategy for lt" in {
val fs = "attr2 < 11"
getStrategy(fs) must beAnInstanceOf[AttributeIdxStrategy]
}
"get the attribute strategy for gte" in {
val fs = "attr2 >= 11"
getStrategy(fs) must beAnInstanceOf[AttributeIdxStrategy]
}
"get the attribute strategy for gt" in {
val fs = "attr2 > 11"
getStrategy(fs) must beAnInstanceOf[AttributeIdxStrategy]
}
"get the attribute strategy for gt prop on right" in {
val fs = "11 > attr2"
getStrategy(fs) must beAnInstanceOf[AttributeIdxStrategy]
}
"get the attribute strategy for during" in {
val fs = "attr2 DURING 2012-01-01T11:00:00.000Z/2014-01-01T12:15:00.000Z"
getStrategy(fs) must beAnInstanceOf[AttributeIdxStrategy]
}
"get the attribute strategy for after" in {
val fs = "attr2 AFTER 2013-01-01T12:30:00.000Z"
getStrategy(fs) must beAnInstanceOf[AttributeIdxStrategy]
}
"get the attribute strategy for before" in {
val fs = "attr2 BEFORE 2014-01-01T12:30:00.000Z"
getStrategy(fs) must beAnInstanceOf[AttributeIdxStrategy]
}
"get the attribute strategy for between" in {
val fs = "attr2 BETWEEN 10 and 20"
getStrategy(fs) must beAnInstanceOf[AttributeIdxStrategy]
}
"get the attribute strategy for ANDed attributes" in {
val fs = "attr2 >= 11 AND attr2 < 20"
getStrategy(fs) must beAnInstanceOf[AttributeIdxStrategy]
}
"partition a query by selecting the best filter" >> {
val sftName = "attributeQuerySplitTest"
val spec = "name:String:index=true:cardinality=high," +
"age:Integer:index=true:cardinality=low," +
"weight:Double:index=false," +
"height:Float:index=false:cardinality=unknown," +
"count:Integer:index=true:cardinality=low," +
"*geom:Point:srid=4326"
val sft = SimpleFeatureTypes.createType(sftName, spec)
val ff = CommonFactoryFinder.getFilterFactory(null)
val ageFilter = ff.equals(ff.property("age"), ff.literal(21))
val nameFilter = ff.equals(ff.literal("foo"), ff.property("name"))
val heightFilter = ff.equals(ff.property("height"), ff.literal(12.0D))
val weightFilter = ff.equals(ff.literal(21.12D), ff.property("weight"))
val hints = new UserDataStrategyHints()
"when best is first" >> {
val filter = ff.and(Seq(nameFilter, heightFilter, weightFilter, ageFilter))
val primary = Seq(nameFilter)
val secondary = ff.and(Seq(heightFilter, weightFilter, ageFilter))
val query = new Query(sft.getTypeName, filter)
val strats = QueryStrategyDecider.chooseStrategies(sft, query, hints, None)
strats must haveLength(1)
strats.head.filter.strategy mustEqual StrategyType.ATTRIBUTE
strats.head.filter.primary mustEqual primary
strats.head.filter.secondary must beSome(secondary)
}
"when best is in the middle" >> {
val filter = ff.and(Seq[Filter](ageFilter, nameFilter, heightFilter, weightFilter))
val primary = Seq(nameFilter)
val secondary = ff.and(Seq(heightFilter, weightFilter, ageFilter))
val query = new Query(sft.getTypeName, filter)
val strats = QueryStrategyDecider.chooseStrategies(sft, query, hints, None)
strats must haveLength(1)
strats.head.filter.strategy mustEqual StrategyType.ATTRIBUTE
strats.head.filter.primary mustEqual primary
strats.head.filter.secondary must beSome(secondary)
}
"when best is last" >> {
val filter = ff.and(Seq[Filter](ageFilter, heightFilter, weightFilter, nameFilter))
val primary = Seq(nameFilter)
val secondary = ff.and(Seq(heightFilter, weightFilter, ageFilter))
val query = new Query(sft.getTypeName, filter)
val strats = QueryStrategyDecider.chooseStrategies(sft, query, hints, None)
strats must haveLength(1)
strats.head.filter.strategy mustEqual StrategyType.ATTRIBUTE
strats.head.filter.primary mustEqual primary
strats.head.filter.secondary must beSome(secondary)
}
"use best indexable attribute if like and retain all children for > 2 filters" in {
val filter = ECQL.toFilter("name LIKE 'baddy' AND age=21 AND count<5")
val query = new Query(sft.getTypeName, filter)
val strats = QueryStrategyDecider.chooseStrategies(sft, query, hints, None)
strats must haveLength(1)
strats.head.filter.strategy mustEqual StrategyType.ATTRIBUTE
strats.head.filter.primary mustEqual Seq(ECQL.toFilter("name LIKE 'baddy'"))
strats.head.filter.secondary must beSome(ECQL.toFilter("age=21 AND count<5"))
}
}
}
"Attribute filters" should {
"get the record strategy if not catalog" in {
getRecordStrategy("attr1 ILIKE '2nd1%'")
}
}
"Id filters" should {
"get the attribute equals strategy" in {
val fs = "IN ('val56')"
getStrategy(fs) must beAnInstanceOf[RecordIdxStrategy]
}
}
"Id and Spatio-temporal filters" should {
"get the records strategy" in {
val fs = "IN ('val56') AND INTERSECTS(geom, POLYGON ((45 23, 48 23, 48 27, 45 27, 45 23)))"
getStrategy(fs) must beAnInstanceOf[RecordIdxStrategy]
}
}
"Id and Attribute filters" should {
"get the records strategy" in {
val fs = "IN ('val56') AND attr2 = val56"
getStrategy(fs) must beAnInstanceOf[RecordIdxStrategy]
}
}
"Really complicated Id AND * filters" should {
"get the records strategy" in {
val fsFragment1="INTERSECTS(geom, POLYGON ((45 23, 48 23, 48 27, 45 27, 45 23)))"
val fsFragment2="AND IN ('val56','val55') AND attr2 = val56 AND IN('val59','val54') AND attr2 = val60"
val fs = s"$fsFragment1 $fsFragment2"
getStrategy(fs) must beAnInstanceOf[RecordIdxStrategy]
}
}
"IS NOT NULL filters" should {
"get the attribute strategy if attribute is indexed" in {
val fs = "attr2 IS NOT NULL"
getStrategy(fs) must beAnInstanceOf[AttributeIdxStrategy]
}
"get the stidx strategy if attribute is not indexed" in {
getRecordStrategy("attr1 IS NOT NULL")
}
}
"Anded Attribute filters" should {
"get the STIdx strategy with stIdxStrategyPredicates" in {
forall(stIdxStrategyPredicates) { getStStrategy }
}
"get the stidx strategy with attributeAndGeometricPredicates" in {
forall(attributeAndGeometricPredicates) { getStStrategy }
}
"get the record strategy for non-indexed queries" in {
forall(idPredicates ++ nonIndexedPredicates) { getRecordStrategy }
}
"get the z3 strategy with spatio-temporal queries" in {
forall(spatioTemporalPredicates) { getZ3Strategy }
val morePredicates = temporalPredicates.drop(1).flatMap(p => goodSpatialPredicates.map(_ + " AND " + p))
forall(morePredicates) { getZ3Strategy }
val withAttrs = temporalPredicates.drop(1).flatMap(p => attributeAndGeometricPredicates.map(_ + " AND " + p))
forall(withAttrs) { getZ3Strategy }
val wholeWorld = "BBOX(geom,-180,-90,180,90) AND dtg DURING 2010-08-08T00:00:00.000Z/2010-08-08T23:59:59.000Z"
getZ3Strategy(wholeWorld)
}
"get the z3 strategy with temporal queries" in {
forall(z3Predicates) { getZ3Strategy }
}
"get the stidx strategy with non-bounded time intervals" in {
val predicates = Seq(
"bbox(geom, 35, 59, 45, 70) AND dtg before 2010-05-12T12:00:00.000Z",
"bbox(geom, 35, 59, 45, 70) AND dtg after 2010-05-12T12:00:00.000Z",
"bbox(geom, 35, 59, 45, 70) AND dtg < '2010-05-12T12:00:00.000Z'",
"bbox(geom, 35, 59, 45, 70) AND dtg <= '2010-05-12T12:00:00.000Z'",
"bbox(geom, 35, 59, 45, 70) AND dtg > '2010-05-12T12:00:00.000Z'",
"bbox(geom, 35, 59, 45, 70) AND dtg >= '2010-05-12T12:00:00.000Z'"
)
forall(predicates) { getStStrategy }
}
"get the attribute strategy with attrIdxStrategyPredicates" in {
forall(attrIdxStrategyPredicates) { getAttributeIdxStrategy }
}
"respect high cardinality attributes regardless of order" in {
val attr = "high = 'test'"
val geom = "BBOX(geom, -10,-10,10,10)"
getStrategy(s"$attr AND $geom") must beAnInstanceOf[AttributeIdxStrategy]
getStrategy(s"$geom AND $attr") must beAnInstanceOf[AttributeIdxStrategy]
}
"respect low cardinality attributes regardless of order" in {
val attr = "low = 'test'"
val geom = "BBOX(geom, -10,-10,10,10)"
getStrategy(s"$attr AND $geom") must beAnInstanceOf[STIdxStrategy]
getStrategy(s"$geom AND $attr") must beAnInstanceOf[STIdxStrategy]
}
"respect cardinality with multiple attributes" in {
val attr1 = "low = 'test'"
val attr2 = "high = 'test'"
val geom = "BBOX(geom, -10,-10,10,10)"
getStrategy(s"$geom AND $attr1 AND $attr2") must beAnInstanceOf[AttributeIdxStrategy]
getStrategy(s"$geom AND $attr2 AND $attr1") must beAnInstanceOf[AttributeIdxStrategy]
getStrategy(s"$attr1 AND $attr2 AND $geom") must beAnInstanceOf[AttributeIdxStrategy]
getStrategy(s"$attr2 AND $attr1 AND $geom") must beAnInstanceOf[AttributeIdxStrategy]
getStrategy(s"$attr1 AND $geom AND $attr2") must beAnInstanceOf[AttributeIdxStrategy]
getStrategy(s"$attr2 AND $geom AND $attr1") must beAnInstanceOf[AttributeIdxStrategy]
}
}
"QueryStrategyDecider" should {
"handle complex filters" in {
skipped("debugging")
implicit val ff = CommonFactoryFinder.getFilterFactory2
val filter = ECQL.toFilter("BBOX(geom,-180,-90,180,90) AND " +
"dtg DURING 2010-08-08T00:00:00.000Z/2010-08-08T23:59:59.000Z")
println(filter)
println(org.locationtech.geomesa.filter.rewriteFilterInDNF(filter))
success
}
}
}
|
AndrewAnnex/geomesa
|
geomesa-accumulo/geomesa-accumulo-datastore/src/test/scala/org/locationtech/geomesa/accumulo/index/QueryStrategyDeciderTest.scala
|
Scala
|
apache-2.0
| 13,851
|
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.play.language
import com.ibm.icu.text.SimpleDateFormat
import com.ibm.icu.util.{TimeZone, ULocale}
import java.time.{LocalDate, LocalDateTime, ZoneId}
import javax.inject.Inject
import play.api.i18n.{Lang, Langs, Messages, MessagesApi}
import play.api.mvc._
import play.api.{Configuration, Play}
/** This object provides access to common language utilities.
*
* This object contains language codes for English and Welsh and a
* function to return the current language based on a request header.
*
* Additionally, a Dates object is provided which provides helper
* functions to return correctly formatted dates in both English
* and Welsh.
*/
class LanguageUtils @Inject() (langs: Langs, configuration: Configuration)(implicit messagesApi: MessagesApi) {
/** Returns the current language as a Lang object.
*
* This function returns the current language as an i18n Lang object. It first checks
* that the PLAY_LANG cookie exists from the request object and then gets the value from it.
* If it does not exist then it returns the accepted language from the request object. If there
* is no Play application then it just defaults to return the accepted language in the request or
* use the default language.
*
* @param request The RequestHeader object to extract the language information from.
* @return Lang object containing the current langugage.
*/
def getCurrentLang(implicit request: RequestHeader): Lang = {
val maybeLangFromCookie = request.cookies.get(Play.langCookieName).flatMap(c => Lang.get(c.value))
maybeLangFromCookie.getOrElse(langs.preferred(request.acceptLanguages))
}
/** Returns true if the lang passed exists within `play.i18n.langs` config value
*
* @param lang The language to check against
* @return A boolean on wether this language is supported in the current application
*/
def isLangAvailable(lang: Lang): Boolean =
configuration.get[Seq[String]]("play.i18n.langs").exists(_.contains(lang.code))
/** Filters a Map of languages against what languages are enabled in the current application
*
* This function returns a filtered Map containing only languages which are enabled in the `play.i18n.langs`
* configuration value. This function is to be used to dynamically populate which languages should be displayed
* on the applications language switcher.
*
* @param langMap List of all supported languages
* @return filtered list of enabled languages
*/
def onlyAvailableLanguages(langMap: Map[String, Lang]): Map[String, Lang] =
langMap.filter(t => isLangAvailable(t._2))
/** Helper object to correctly display and format dates in both English and Welsh.
*
* This object provides a default implementation of the Dates trait in order to provide
* support for Welsh and English dates.
*/
object Dates extends Dates {
override def defaultTimeZone: TimeZone = TimeZone.getTimeZone("Europe/London")
override def to(implicit messages: Messages): String = messages("language.to")
override def singular(implicit messages: Messages): String = messages("language.day.singular")
override def plural(implicit messages: Messages): String = messages("language.day.plural")
}
/**
* A trait that correctly displays and formats dates in multiple languages.
*
* This object contains helper methods to correctly format dates in any language supported
* by the IBM ICU library.
*
* This trait requires a default timezone to be defined, as well as String values for the English words:
* - to
* - day
* - days
*
* These values should come from a Messages file for each language that needs to be supported.
*/
trait Dates {
/** The timezone to use when formatting dates */
def defaultTimeZone: TimeZone
/** The value of the word 'to' * */
def to(implicit messages: Messages): String
/** The value of the singular of the word 'day' * */
def singular(implicit messages: Messages): String
/** The value of the plural of the word 'day' * */
def plural(implicit messages: Messages): String
/** The java.time.ZoneId of the com.ibm.icu.util.TimeZone */
private val zoneId: ZoneId = ZoneId.of(defaultTimeZone.getID)
/** Helper methods to format dates using various patterns * */
private def dateFormat(implicit messages: Messages) = createDateFormatForPattern("d MMMM y")
private def dateFormatAbbrMonth(implicit messages: Messages) = createDateFormatForPattern("d MMM y")
private def shortDateFormat(implicit messages: Messages) = createDateFormatForPattern("yyyy-MM-dd")
private def easyReadingDateFormat(implicit messages: Messages) = createDateFormatForPattern("EEEE d MMMM yyyy")
private def easyReadingTimestampFormat(implicit messages: Messages) = createDateFormatForPattern("h:mmaa")
/**
* Function that returns a simple date format object based on the locale defined in the Lang object.
*
* If the lang does not contain a value that is supported by the IBM ICU library then the default
* Locale is used instead.
*
* @param pattern - The date format pattern as a String.
* @param messages - The implicit lang object.
* @return - The SimpleDateFormat configured using the current language and pattern.
*/
private def createDateFormatForPattern(pattern: String)(implicit messages: Messages): SimpleDateFormat = {
val uLocale = new ULocale(messages.lang.code)
val validLang: Boolean = ULocale.getAvailableLocales.contains(uLocale)
val locale: ULocale = if (validLang) uLocale else ULocale.getDefault
val sdf = new SimpleDateFormat(pattern, locale)
sdf.setTimeZone(defaultTimeZone)
sdf
}
/**
* Converts a java.time.LocalDate object into a String with the format "D MMMM Y".
*
* This function will return a translated string based on the implicit lang object
* that is passed through with it.
*
* Lang("en") example: 25 January 2015
* Lang("cy") example: 25 Ionawr 2015
*
* @param date The java.time.LocalDate object to convert.
* @param messages The implicit lang object.
* @return The date as a "D MMMM Y" formatted string.
*/
def formatDate(date: LocalDate)(implicit messages: Messages): String = dateFormat.format(toMilli(date))
/**
* Converts an Option java.time.LocalDate object into a String with the format "D MMMM Y"
*
* This function will return a translated string based on the implicit lang object
* that is passed through with it. If the option is None then the default value is
* returned back to the caller.
*
* Lang("en") example: 25 January 2015
* Lang("cy") example: 25 Ionawr 2015
* None example: default
*
* @param date The Optional java.time.LocalDate object to convert.
* @param default A default value to return if the date option is not set.
* @param messages The implicit lang object.
* @return Either the date as a "D MMMM Y" formatted string or the default value if not set.
*/
def formatDate(date: Option[LocalDate], default: String)(implicit messages: Messages): String =
date match {
case Some(d) => formatDate(d)
case None => default
}
/**
* Converts a java.time.LocalDate object into a human readable String with the format "D MMM Y"
*
* This function will return a translated string based on the implicit lang object
* that is passed through with it.
*
* Lang("en") example: 25 Jan 2015
* Lang("cy") example: 25 Ion 2015
*
* @param date The java.time.LocalDate object to convert.
* @param messages The implicit lang object.
* @return The date as a "D MMM Y" formatted string.
*/
def formatDateAbbrMonth(date: LocalDate)(implicit messages: Messages): String =
dateFormatAbbrMonth.format(toMilli(date))
/**
* Converts an optional DateTime object into a human readable String with the format: "h:mmaa, EEEE d MMMM yyyy"
*
* This function will return a translated string based on the implicit lang object
* that is passed through with it. If the option is None then the default value is
* returned back to the caller.
*
* Lang("en") example: "3:45am, Sunday 25 January 2015"
* Lang("cy" example: "3:45am, Dydd Sul 25 Ionawr 2015"
*
* @param date The optional java.time.LocalDateTime object to convert.
* @param default The default value to return if the date is missing.
* @param messages The implicit lang object.
* @return The date and time as a "h:mmaa, EEEE d MMMM yyyy" formatted string.
*/
def formatEasyReadingTimestamp(date: Option[LocalDateTime], default: String)(implicit messages: Messages): String =
date match {
case Some(d) =>
val time = easyReadingTimestampFormat.format(toMilli(d)).toLowerCase
val date = easyReadingDateFormat.format(toMilli(d))
s"$time, $date"
case None => default
}
/**
* Converts a java.time.LocalDate object into a human readable String with the format: "yyyy-MM-dd"
*
* This function will return a translated string based on the implicit lang object
* that is passed through with it.
*
* Lang("en") example: 2015-01-25
* Lang("cy") example: 2015-01-25
*
* @param date - The java.time.LocalDate object to be converted.
* @param messages - The implicit language object.
* @return The date as a "yyyy-MM-dd" formatted string.
*/
def shortDate(date: LocalDate)(implicit messages: Messages): String = shortDateFormat.format(toMilli(date))
/**
* Converts two java.time.LocalDate objects into a human readable String to show a date range.
*
* This function will return a translated string based on the implicit lang object.
*
* Lang("en") example: "25 January 2015 to 25 January 2015"
* Lang("cy") example: "25 Ionawr 2015 i 25 Ionawr 2015"
*
* @param startDate The first date.
* @param endDate The second date.
* @param messages The implicit lang value.
* @return A string in the format of "D MMMM Y to D MMMM Y"
*/
def formatDateRange(startDate: LocalDate, endDate: LocalDate)(implicit messages: Messages): String =
Seq(formatDate(startDate), to, formatDate(endDate)).mkString(" ")
/**
* Converts an Int into a string appended by 'days'.
*
* This function will return a translated string based on the implicit lang object.
* It checks to see if the number of days is equal to 1 or not, and then responds with
* the correct plural or singular value for the word "day".
*
* 1, Lang("en") example: 1 day
* 5, Lang("en") example: 5 days
*
* @param numberOfDays - The number of days.
* @param messages - The implicit language object.
* @return A string denoting "x" days.
*/
def formatDays(numberOfDays: Int)(implicit messages: Messages): String = {
val dayOrDays = if (numberOfDays == 1) singular else plural
s"$numberOfDays $dayOrDays"
}
private def toMilli(localDate: LocalDate): Long =
localDate.atStartOfDay(zoneId).toInstant.toEpochMilli
private def toMilli(localDateTime: LocalDateTime): Long =
localDateTime.atZone(zoneId).toInstant.toEpochMilli
}
}
|
hmrc/play-language
|
src/main/scala/uk/gov/hmrc/play/language/LanguageUtils.scala
|
Scala
|
apache-2.0
| 12,254
|
/*
* Copyright 2010 Twitter, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License. You may obtain
* a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.twitter.naggati
import org.jboss.netty.buffer.ChannelBuffer
/**
* The next step for processing after this stage.
* - Incomplete: Need more data; call the same stage again later when more data arrives.
* - GoToStage(stage): Finished with this stage; continue with another stage.
* - Emit(obj): Complete protocol object decoded; return to the first stage and start a new object.
*/
sealed trait NextStep
case object Incomplete extends NextStep
case class GoToStage(stage: Stage) extends NextStep
case class Emit(obj: AnyRef) extends NextStep
/**
* A decoder stage.
*/
trait Stage {
def apply(buffer: ChannelBuffer): NextStep
}
|
taihsun/Gitest
|
src/main/scala/com/twitter/naggati/Stage.scala
|
Scala
|
apache-2.0
| 1,247
|
package com.rocketfuel.sdbc.base
/**
* A `Select` is an operation on a resource that produces some values, T.
* @tparam Connection is the resource that produces values.
* @tparam T is the values.
*/
abstract class Select[Connection, T] {
self: Logging =>
def iterator()(implicit connection: Connection): Iterator[T]
def option()(implicit connection: Connection): Option[T]
}
|
wdacom/sdbc
|
base/src/main/scala/com/rocketfuel/sdbc/base/Select.scala
|
Scala
|
bsd-3-clause
| 389
|
package com.sksamuel.scapegoat.inspections.collections
import com.sksamuel.scapegoat._
/**
* @author
* Stephen Samuel
*/
class PredefSeqIsMutable
extends Inspection(
text = "Predef.Seq is mutable",
defaultLevel = Levels.Info,
description = "Checks for use of mutable Seq.",
explanation = "Predef.Seq aliases scala.collection.mutable.Seq. Did you intend to use an immutable Seq?"
) {
override def isEnabled: Boolean = !isScala213
def inspector(context: InspectionContext): Inspector =
new Inspector(context) {
override def postTyperTraverser: context.Traverser =
new context.Traverser {
import context.global._
override def inspect(tree: Tree): Unit = {
tree match {
case DefDef(_, _, _, _, _, _) if tree.symbol.isAccessor =>
case TypeTree() if tree.tpe.erasure.toString() == "Seq[Any]" =>
context.warn(tree.pos, self)
case _ => continue(tree)
}
}
}
}
}
|
sksamuel/scapegoat
|
src/main/scala/com/sksamuel/scapegoat/inspections/collections/PredefSeqIsMutable.scala
|
Scala
|
apache-2.0
| 1,041
|
package controllers
import play.api.Play.current
import play.api.db._
import com.lucidchart.open.relate._
import com.lucidchart.open.relate.Query._
object RelateTests {
val nums = (1 to 100)
val numsLong = (1L to 100L)
val numsInt = nums
val numsDouble = numsLong.map { _.toDouble}
val numsString = numsLong.map { _.toString}
val numsInt_1 = (1 to 1000)
val numsLong_1 = (1L to 1000L)
val numsDouble_1 = numsInt_1.map{_.toDouble}
val numsString_1 = numsInt_1.map{_.toString}
val numsInt_2 = (1 to 10000)
val numsLong_2 = (1L to 10000L)
val numsDouble_2 = numsInt_2.map{_.toDouble}
val numsString_2 = numsInt_2.map{_.toString}
val rows = 250000
val rowsString = (1 to rows).map{_.toString}
val rowsDouble = (1 to rows).map{_.toDouble}
def select1_int(nums:Seq[Int]) = {
DB.withConnection { implicit connection =>
SQL("SELECT `col14` FROM `sel_50` WHERE `col44` IN ({nums}) ").expand { implicit ex =>
commaSeparated("nums", nums.size)
}.on { implicit stmt =>
ints("nums",nums)
}.asList(RowParser {row =>
row.int("col14")
})
}
}
def select1_long(nums:Seq[Long]) = {
DB.withConnection { implicit connection =>
SQL("SELECT `col14` FROM `sel_50` WHERE `col44` IN ({nums}) ").expand { implicit ex =>
commaSeparated("nums", nums.size)
}.on { implicit stmt =>
longs("nums",nums)
}.asList(RowParser { row =>
row.long("col14")
})
}
}
def select1_string(nums:Seq[String]) = {
DB.withConnection { implicit connection =>
SQL("SELECT `col14` FROM `sel_50` WHERE `col44` IN ({nums}) ").expand { implicit ex =>
commaSeparated("nums", nums.size)
}.on { implicit stmt =>
strings("nums",nums)
}.asList(RowParser { row =>
row.string("col14")
})
}
}
def select1_double(nums:Seq[Double]) = {
DB.withConnection { implicit connection =>
SQL("SELECT `col14` FROM `sel_50` WHERE `col44` IN ({nums}) ").expand { implicit ex =>
commaSeparated("nums", nums.size)
}.on { implicit stmt =>
doubles("nums",nums)
}.asList( RowParser {row => row.double("col14")
} )
}
}
def select2_long(nums :Seq[Long]) = {
DB.withConnection { implicit connection =>
SQL("SELECT `col45`,`col46`,`col47`,`col48`,`col49`,`col50`,`col1`,`col2`,`col3`,`col4`,`col5` FROM `sel_50` WHERE `col18` IN ({nums}) ").expand { implicit ex =>
commaSeparated("nums", nums.size)
}.on { implicit stmt =>
longs("nums",nums)
}
.asList( RowParser {row =>
row.long("col45")
row.long("col46")
row.long("col47")
row.long("col48")
row.long("col49")
row.long("col50")
row.long("col1")
row.long("col2")
row.long("col3")
row.long("col4")
row.long("col5")
}
)
}
}
def select2_int(nums :Seq[Int]) = {
DB.withConnection { implicit connection =>
SQL("SELECT `col45`,`col46`,`col47`,`col48`,`col49`,`col50`,`col1`,`col2`,`col3`,`col4`,`col5` FROM `sel_50` WHERE `col18` IN ({nums}) ").expand { implicit ex =>
commaSeparated("nums", nums.size)
}.on { implicit stmt =>
ints("nums",nums)
}
.asList( RowParser {row =>
row.int("col45")
row.int("col46")
row.int("col47")
row.int("col48")
row.int("col49")
row.int("col50")
row.int("col1")
row.int("col2")
row.int("col3")
row.int("col4")
row.int("col5")
}
)
}
}
def select2_string(nums :Seq[String]) = {
DB.withConnection { implicit connection =>
SQL("SELECT `col45`,`col46`,`col47`,`col48`,`col49`,`col50`,`col1`,`col2`,`col3`,`col4`,`col5` FROM `sel_50` WHERE `col18` IN ({nums}) ").expand { implicit ex =>
commaSeparated("nums", nums.size)
}.on { implicit stmt =>
strings("nums",nums)
}
.asList( RowParser {row =>
row.string("col45")
row.string("col46")
row.string("col47")
row.string("col48")
row.string("col49")
row.string("col50")
row.string("col1")
row.string("col2")
row.string("col3")
row.string("col4")
row.string("col5")
}
)
}
}
def select2_double(nums :Seq[Double]) = {
DB.withConnection { implicit connection =>
SQL("SELECT `col45`,`col46`,`col47`,`col48`,`col49`,`col50`,`col1`,`col2`,`col3`,`col4`,`col5` FROM `sel_50` WHERE `col18` IN ({nums}) ").expand { implicit ex =>
commaSeparated("nums", nums.size)
}.on { implicit stmt =>
doubles("nums",numsDouble)
}
.asList( RowParser {row =>
row.long("col45")
row.long("col46")
row.long("col47")
row.long("col48")
row.long("col49")
row.long("col50")
row.long("col1")
row.long("col2")
row.long("col3")
row.long("col4")
row.long("col5")
}
)
}
}
def select3_int(nums:Seq[Int]) = {
DB.withConnection { implicit connection =>
SQL("SELECT `col1`,`col2`,`col3`,`col4`,`col5`,`col6`,`col7`,`col8`,`col9`,`col10`,`col11`,`col12`,`col13`,`col14`,`col15`,`col16`,`col17`,`col18`,`col19`,`col20`,`col21`,`col22`,`col23`,`col24`,`col25` FROM `sel_50` WHERE `col18` IN ({nums}) ").expand { implicit ex =>
commaSeparated("nums", nums.size)
}.on { implicit stmt =>
ints("nums",nums)
}.asList( RowParser {row =>
row.int("col1")
row.int("col2")
row.int("col3")
row.int("col4")
row.int("col5")
row.int("col6")
row.int("col7")
row.int("col8")
row.int("col9")
row.int("col10")
row.int("col11")
row.int("col12")
row.int("col13")
row.int("col14")
row.int("col15")
row.int("col16")
row.int("col17")
row.int("col18")
row.int("col19")
row.int("col20")
row.int("col21")
row.int("col22")
row.int("col23")
row.int("col24")
row.int("col25")
})
}
}
def select3_long(nums:Seq[Long]) = {
DB.withConnection { implicit connection =>
SQL("SELECT `col1`,`col2`,`col3`,`col4`,`col5`,`col6`,`col7`,`col8`,`col9`,`col10`,`col11`,`col12`,`col13`,`col14`,`col15`,`col16`,`col17`,`col18`,`col19`,`col20`,`col21`,`col22`,`col23`,`col24`,`col25` FROM `sel_50` WHERE `col18` IN ({nums}) ").expand { implicit ex =>
commaSeparated("nums", nums.size)
}.on { implicit stmt =>
longs("nums",nums)
}.asList( RowParser {row =>
row.long("col1")
row.long("col2")
row.long("col3")
row.long("col4")
row.long("col5")
row.long("col6")
row.long("col7")
row.long("col8")
row.long("col9")
row.long("col10")
row.long("col11")
row.long("col12")
row.long("col13")
row.long("col14")
row.long("col15")
row.long("col16")
row.long("col17")
row.long("col18")
row.long("col19")
row.long("col20")
row.long("col21")
row.long("col22")
row.long("col23")
row.long("col24")
row.long("col25")
})
}
}
def select3_string(nums:Seq[String]) = {
DB.withConnection { implicit connection =>
SQL("SELECT `col1`,`col2`,`col3`,`col4`,`col5`,`col6`,`col7`,`col8`,`col9`,`col10`,`col11`,`col12`,`col13`,`col14`,`col15`,`col16`,`col17`,`col18`,`col19`,`col20`,`col21`,`col22`,`col23`,`col24`,`col25` FROM `sel_50` WHERE `col18` IN ({nums}) ").expand { implicit ex =>
commaSeparated("nums", nums.size)
}.on { implicit stmt =>
strings("nums",nums)
}.asList( RowParser {row =>
row.string("col1")
row.string("col2")
row.string("col3")
row.string("col4")
row.string("col5")
row.string("col6")
row.string("col7")
row.string("col8")
row.string("col9")
row.string("col10")
row.string("col11")
row.string("col12")
row.string("col13")
row.string("col14")
row.string("col15")
row.string("col16")
row.string("col17")
row.string("col18")
row.string("col19")
row.string("col20")
row.string("col21")
row.string("col22")
row.string("col23")
row.string("col24")
row.string("col25")
})
}
}
def select3_double(nums:Seq[Double]) = {
DB.withConnection { implicit connection =>
SQL("SELECT `col1`,`col2`,`col3`,`col4`,`col5`,`col6`,`col7`,`col8`,`col9`,`col10`,`col11`,`col12`,`col13`,`col14`,`col15`,`col16`,`col17`,`col18`,`col19`,`col20`,`col21`,`col22`,`col23`,`col24`,`col25` FROM `sel_50` WHERE `col18` IN ({nums}) ").expand { implicit ex =>
commaSeparated("nums", nums.size)
}.on { implicit stmt =>
doubles("nums",nums)
}.asList( RowParser {row =>
row.double("col1")
row.double("col2")
row.double("col3")
row.double("col4")
row.double("col5")
row.double("col6")
row.double("col7")
row.double("col8")
row.double("col9")
row.double("col10")
row.double("col11")
row.double("col12")
row.double("col13")
row.double("col14")
row.double("col15")
row.double("col16")
row.double("col17")
row.double("col18")
row.double("col19")
row.double("col20")
row.double("col21")
row.double("col22")
row.double("col23")
row.double("col24")
row.double("col25")
})
}
}
def insert1_int(rows:Seq[Int]) = {
DB.withConnection { connection =>
rows.foreach { row =>
SQL("""INSERT INTO `ins_10` VALUES ({a1},{a2},{a3},{a4},{a5},{a6},{a7},{a8},{a9},{a10})""")
.on { implicit query =>
int("a1",row)
int("a2",row)
int("a3",row)
int("a4",row)
int("a5",row)
int("a6",row)
int("a7",row)
int("a8",row)
int("a9",row)
int("a10",row)
}.executeUpdate()(connection)
}
}
}
def insert1_long(rows:Seq[Long]) = {
DB.withConnection { connection =>
rows.foreach { row =>
SQL("""INSERT INTO `ins_10` VALUES ({a1},{a2},{a3},{a4},{a5},{a6},{a7},{a8},{a9},{a10})""")
.on { implicit query =>
long("a1",row)
long("a2",row)
long("a3",row)
long("a4",row)
long("a5",row)
long("a6",row)
long("a7",row)
long("a8",row)
long("a9",row)
long("a10",row)
}.executeUpdate()(connection)
}
}
}
def insert1_double(rows:Seq[Double]) = {
DB.withConnection { connection =>
rows.foreach { row =>
SQL("""INSERT INTO `ins_10` VALUES ({a1},{a2},{a3},{a4},{a5},{a6},{a7},{a8},{a9},{a10})""")
.on { implicit query =>
double("a1",row)
double("a2",row)
double("a3",row)
double("a4",row)
double("a5",row)
double("a6",row)
double("a7",row)
double("a8",row)
double("a9",row)
double("a10",row)
}.executeUpdate()(connection)
}
}
}
def insert1_string(rows:Seq[String]) = {
DB.withConnection { connection =>
rows.foreach { row =>
SQL("""INSERT INTO `ins_10` VALUES ({a1},{a2},{a3},{a4},{a5},{a6},{a7},{a8},{a9},{a10})""")
.on { implicit query =>
string("a1",row)
string("a2",row)
string("a3",row)
string("a4",row)
string("a5",row)
string("a6",row)
string("a7",row)
string("a8",row)
string("a9",row)
string("a10",row)
}.executeUpdate()(connection)
}
}
}
def insert2_int(rows:Seq[Int]) = {
DB.withConnection { connection =>
rows.foreach { row =>
SQL("""INSERT INTO `ins_50` VALUES ({a1},{a2},{a3},{a4},{a5},{a6},{a7},{a8},{a9},{a10},{a11},{a12},{a13},{a14},{a15},{a16},{a17},{a18},{a19},{a20},{a21},{a22},{a23},{a24},{a25},{a26},{a27},{a28},{a29},{a30},{a31},{a32},{a33},{a34},{a35},{a36},{a37},{a38},{a39},{a40},{a41},{a42},{a43},{a44},{a45},{a46},{a47},{a48},{a49},{a50})""")
.on { implicit query =>
int("a1",row)
int("a2",row)
int("a3",row)
int("a4",row)
int("a5",row)
int("a6",row)
int("a7",row)
int("a8",row)
int("a9",row)
int("a10",row)
int("a11",row)
int("a12",row)
int("a13",row)
int("a14",row)
int("a15",row)
int("a16",row)
int("a17",row)
int("a18",row)
int("a19",row)
int("a20",row)
int("a21",row)
int("a22",row)
int("a23",row)
int("a24",row)
int("a25",row)
int("a26",row)
int("a27",row)
int("a28",row)
int("a29",row)
int("a30",row)
int("a31",row)
int("a32",row)
int("a33",row)
int("a34",row)
int("a35",row)
int("a36",row)
int("a37",row)
int("a38",row)
int("a39",row)
int("a40",row)
int("a41",row)
int("a42",row)
int("a43",row)
int("a44",row)
int("a45",row)
int("a46",row)
int("a47",row)
int("a48",row)
int("a49",row)
int("a50",row)
}.executeUpdate()(connection)
}
}
}
def insert2_long(rows:Seq[Long]) = {
DB.withConnection { connection =>
rows.foreach { row =>
SQL("""INSERT INTO `ins_50` VALUES ({a1},{a2},{a3},{a4},{a5},{a6},{a7},{a8},{a9},{a10},{a11},{a12},{a13},{a14},{a15},{a16},{a17},{a18},{a19},{a20},{a21},{a22},{a23},{a24},{a25},{a26},{a27},{a28},{a29},{a30},{a31},{a32},{a33},{a34},{a35},{a36},{a37},{a38},{a39},{a40},{a41},{a42},{a43},{a44},{a45},{a46},{a47},{a48},{a49},{a50})""")
.on { implicit query =>
long("a1",row)
long("a2",row)
long("a3",row)
long("a4",row)
long("a5",row)
long("a6",row)
long("a7",row)
long("a8",row)
long("a9",row)
long("a10",row)
long("a11",row)
long("a12",row)
long("a13",row)
long("a14",row)
long("a15",row)
long("a16",row)
long("a17",row)
long("a18",row)
long("a19",row)
long("a20",row)
long("a21",row)
long("a22",row)
long("a23",row)
long("a24",row)
long("a25",row)
long("a26",row)
long("a27",row)
long("a28",row)
long("a29",row)
long("a30",row)
long("a31",row)
long("a32",row)
long("a33",row)
long("a34",row)
long("a35",row)
long("a36",row)
long("a37",row)
long("a38",row)
long("a39",row)
long("a40",row)
long("a41",row)
long("a42",row)
long("a43",row)
long("a44",row)
long("a45",row)
long("a46",row)
long("a47",row)
long("a48",row)
long("a49",row)
long("a50",row)
}.executeUpdate()(connection)
}
}
}
def insert2_double(rows:Seq[Double]) = {
DB.withConnection { connection =>
rows.foreach { row =>
SQL("""INSERT INTO `ins_50` VALUES ({a1},{a2},{a3},{a4},{a5},{a6},{a7},{a8},{a9},{a10},{a11},{a12},{a13},{a14},{a15},{a16},{a17},{a18},{a19},{a20},{a21},{a22},{a23},{a24},{a25},{a26},{a27},{a28},{a29},{a30},{a31},{a32},{a33},{a34},{a35},{a36},{a37},{a38},{a39},{a40},{a41},{a42},{a43},{a44},{a45},{a46},{a47},{a48},{a49},{a50})""")
.on { implicit query =>
double("a1",row)
double("a2",row)
double("a3",row)
double("a4",row)
double("a5",row)
double("a6",row)
double("a7",row)
double("a8",row)
double("a9",row)
double("a10",row)
double("a11",row)
double("a12",row)
double("a13",row)
double("a14",row)
double("a15",row)
double("a16",row)
double("a17",row)
double("a18",row)
double("a19",row)
double("a20",row)
double("a21",row)
double("a22",row)
double("a23",row)
double("a24",row)
double("a25",row)
double("a26",row)
double("a27",row)
double("a28",row)
double("a29",row)
double("a30",row)
double("a31",row)
double("a32",row)
double("a33",row)
double("a34",row)
double("a35",row)
double("a36",row)
double("a37",row)
double("a38",row)
double("a39",row)
double("a40",row)
double("a41",row)
double("a42",row)
double("a43",row)
double("a44",row)
double("a45",row)
double("a46",row)
double("a47",row)
double("a48",row)
double("a49",row)
double("a50",row)
}.executeUpdate()(connection)
}
}
}
def insert2_string(rows:Seq[String]) = {
DB.withConnection { connection =>
rows.foreach { row =>
SQL("""INSERT INTO `ins_50` VALUES ({a1},{a2},{a3},{a4},{a5},{a6},{a7},{a8},{a9},{a10},{a11},{a12},{a13},{a14},{a15},{a16},{a17},{a18},{a19},{a20},{a21},{a22},{a23},{a24},{a25},{a26},{a27},{a28},{a29},{a30},{a31},{a32},{a33},{a34},{a35},{a36},{a37},{a38},{a39},{a40},{a41},{a42},{a43},{a44},{a45},{a46},{a47},{a48},{a49},{a50})""")
.on { implicit query =>
string("a1",row)
string("a2",row)
string("a3",row)
string("a4",row)
string("a5",row)
string("a6",row)
string("a7",row)
string("a8",row)
string("a9",row)
string("a10",row)
string("a11",row)
string("a12",row)
string("a13",row)
string("a14",row)
string("a15",row)
string("a16",row)
string("a17",row)
string("a18",row)
string("a19",row)
string("a20",row)
string("a21",row)
string("a22",row)
string("a23",row)
string("a24",row)
string("a25",row)
string("a26",row)
string("a27",row)
string("a28",row)
string("a29",row)
string("a30",row)
string("a31",row)
string("a32",row)
string("a33",row)
string("a34",row)
string("a35",row)
string("a36",row)
string("a37",row)
string("a38",row)
string("a39",row)
string("a40",row)
string("a41",row)
string("a42",row)
string("a43",row)
string("a44",row)
string("a45",row)
string("a46",row)
string("a47",row)
string("a48",row)
string("a49",row)
string("a50",row)
}.executeUpdate()(connection)
}
}
}
def insert3_int(rows:Seq[Int]) = {
DB.withConnection { connection =>
rows.foreach { row =>
SQL("""INSERT INTO `ins_100` VALUES ({a1},{a2},{a3},{a4},{a5},{a6},{a7},{a8},{a9},{a10},{a11},{a12},{a13},{a14},{a15},{a16},{a17},{a18},{a19},{a20},{a21},{a22},{a23},{a24},{a25},{a26},{a27},{a28},{a29},{a30},{a31},{a32},{a33},{a34},{a35},{a36},{a37},{a38},{a39},{a40},{a41},{a42},{a43},{a44},{a45},{a46},{a47},{a48},{a49},{a50},{a51},{a52},{a53},{a54},{a55},{a56},{a57},{a58},{a59},{a60},{a61},{a62},{a63},{a64},{a65},{a66},{a67},{a68},{a69},{a70},{a71},{a72},{a73},{a74},{a75},{a76},{a77},{a78},{a79},{a80},{a81},{a82},{a83},{a84},{a85},{a86},{a87},{a88},{a89},{a90},{a91},{a92},{a93},{a94},{a95},{a96},{a97},{a98},{a99},{a100})""")
.on { implicit query =>
int("a1",row)
int("a2",row)
int("a3",row)
int("a4",row)
int("a5",row)
int("a6",row)
int("a7",row)
int("a8",row)
int("a9",row)
int("a10",row)
int("a11",row)
int("a12",row)
int("a13",row)
int("a14",row)
int("a15",row)
int("a16",row)
int("a17",row)
int("a18",row)
int("a19",row)
int("a20",row)
int("a21",row)
int("a22",row)
int("a23",row)
int("a24",row)
int("a25",row)
int("a26",row)
int("a27",row)
int("a28",row)
int("a29",row)
int("a30",row)
int("a31",row)
int("a32",row)
int("a33",row)
int("a34",row)
int("a35",row)
int("a36",row)
int("a37",row)
int("a38",row)
int("a39",row)
int("a40",row)
int("a41",row)
int("a42",row)
int("a43",row)
int("a44",row)
int("a45",row)
int("a46",row)
int("a47",row)
int("a48",row)
int("a49",row)
int("a50",row)
int("a51",row)
int("a52",row)
int("a53",row)
int("a54",row)
int("a55",row)
int("a56",row)
int("a57",row)
int("a58",row)
int("a59",row)
int("a60",row)
int("a61",row)
int("a62",row)
int("a63",row)
int("a64",row)
int("a65",row)
int("a66",row)
int("a67",row)
int("a68",row)
int("a69",row)
int("a70",row)
int("a71",row)
int("a72",row)
int("a73",row)
int("a74",row)
int("a75",row)
int("a76",row)
int("a77",row)
int("a78",row)
int("a79",row)
int("a80",row)
int("a81",row)
int("a82",row)
int("a83",row)
int("a84",row)
int("a85",row)
int("a86",row)
int("a87",row)
int("a88",row)
int("a89",row)
int("a90",row)
int("a91",row)
int("a92",row)
int("a93",row)
int("a94",row)
int("a95",row)
int("a96",row)
int("a97",row)
int("a98",row)
int("a99",row)
int("a100",row)
}.executeUpdate()(connection)
}
}
}
def insert3_long(rows:Seq[Long]) = {
DB.withConnection { connection =>
rows.foreach { row =>
SQL("""INSERT INTO `ins_100` VALUES ({a1},{a2},{a3},{a4},{a5},{a6},{a7},{a8},{a9},{a10},{a11},{a12},{a13},{a14},{a15},{a16},{a17},{a18},{a19},{a20},{a21},{a22},{a23},{a24},{a25},{a26},{a27},{a28},{a29},{a30},{a31},{a32},{a33},{a34},{a35},{a36},{a37},{a38},{a39},{a40},{a41},{a42},{a43},{a44},{a45},{a46},{a47},{a48},{a49},{a50},{a51},{a52},{a53},{a54},{a55},{a56},{a57},{a58},{a59},{a60},{a61},{a62},{a63},{a64},{a65},{a66},{a67},{a68},{a69},{a70},{a71},{a72},{a73},{a74},{a75},{a76},{a77},{a78},{a79},{a80},{a81},{a82},{a83},{a84},{a85},{a86},{a87},{a88},{a89},{a90},{a91},{a92},{a93},{a94},{a95},{a96},{a97},{a98},{a99},{a100})""")
.on { implicit query =>
long("a1",row)
long("a2",row)
long("a3",row)
long("a4",row)
long("a5",row)
long("a6",row)
long("a7",row)
long("a8",row)
long("a9",row)
long("a10",row)
long("a11",row)
long("a12",row)
long("a13",row)
long("a14",row)
long("a15",row)
long("a16",row)
long("a17",row)
long("a18",row)
long("a19",row)
long("a20",row)
long("a21",row)
long("a22",row)
long("a23",row)
long("a24",row)
long("a25",row)
long("a26",row)
long("a27",row)
long("a28",row)
long("a29",row)
long("a30",row)
long("a31",row)
long("a32",row)
long("a33",row)
long("a34",row)
long("a35",row)
long("a36",row)
long("a37",row)
long("a38",row)
long("a39",row)
long("a40",row)
long("a41",row)
long("a42",row)
long("a43",row)
long("a44",row)
long("a45",row)
long("a46",row)
long("a47",row)
long("a48",row)
long("a49",row)
long("a50",row)
long("a51",row)
long("a52",row)
long("a53",row)
long("a54",row)
long("a55",row)
long("a56",row)
long("a57",row)
long("a58",row)
long("a59",row)
long("a60",row)
long("a61",row)
long("a62",row)
long("a63",row)
long("a64",row)
long("a65",row)
long("a66",row)
long("a67",row)
long("a68",row)
long("a69",row)
long("a70",row)
long("a71",row)
long("a72",row)
long("a73",row)
long("a74",row)
long("a75",row)
long("a76",row)
long("a77",row)
long("a78",row)
long("a79",row)
long("a80",row)
long("a81",row)
long("a82",row)
long("a83",row)
long("a84",row)
long("a85",row)
long("a86",row)
long("a87",row)
long("a88",row)
long("a89",row)
long("a90",row)
long("a91",row)
long("a92",row)
long("a93",row)
long("a94",row)
long("a95",row)
long("a96",row)
long("a97",row)
long("a98",row)
long("a99",row)
long("a100",row)
}.executeUpdate()(connection)
}
}
}
def insert3_double(rows:Seq[Double]) = {
DB.withConnection { connection =>
rows.foreach { row =>
SQL("""INSERT INTO `ins_100` VALUES ({a1},{a2},{a3},{a4},{a5},{a6},{a7},{a8},{a9},{a10},{a11},{a12},{a13},{a14},{a15},{a16},{a17},{a18},{a19},{a20},{a21},{a22},{a23},{a24},{a25},{a26},{a27},{a28},{a29},{a30},{a31},{a32},{a33},{a34},{a35},{a36},{a37},{a38},{a39},{a40},{a41},{a42},{a43},{a44},{a45},{a46},{a47},{a48},{a49},{a50},{a51},{a52},{a53},{a54},{a55},{a56},{a57},{a58},{a59},{a60},{a61},{a62},{a63},{a64},{a65},{a66},{a67},{a68},{a69},{a70},{a71},{a72},{a73},{a74},{a75},{a76},{a77},{a78},{a79},{a80},{a81},{a82},{a83},{a84},{a85},{a86},{a87},{a88},{a89},{a90},{a91},{a92},{a93},{a94},{a95},{a96},{a97},{a98},{a99},{a100})""")
.on { implicit query =>
double("a1",row)
double("a2",row)
double("a3",row)
double("a4",row)
double("a5",row)
double("a6",row)
double("a7",row)
double("a8",row)
double("a9",row)
double("a10",row)
double("a11",row)
double("a12",row)
double("a13",row)
double("a14",row)
double("a15",row)
double("a16",row)
double("a17",row)
double("a18",row)
double("a19",row)
double("a20",row)
double("a21",row)
double("a22",row)
double("a23",row)
double("a24",row)
double("a25",row)
double("a26",row)
double("a27",row)
double("a28",row)
double("a29",row)
double("a30",row)
double("a31",row)
double("a32",row)
double("a33",row)
double("a34",row)
double("a35",row)
double("a36",row)
double("a37",row)
double("a38",row)
double("a39",row)
double("a40",row)
double("a41",row)
double("a42",row)
double("a43",row)
double("a44",row)
double("a45",row)
double("a46",row)
double("a47",row)
double("a48",row)
double("a49",row)
double("a50",row)
double("a51",row)
double("a52",row)
double("a53",row)
double("a54",row)
double("a55",row)
double("a56",row)
double("a57",row)
double("a58",row)
double("a59",row)
double("a60",row)
double("a61",row)
double("a62",row)
double("a63",row)
double("a64",row)
double("a65",row)
double("a66",row)
double("a67",row)
double("a68",row)
double("a69",row)
double("a70",row)
double("a71",row)
double("a72",row)
double("a73",row)
double("a74",row)
double("a75",row)
double("a76",row)
double("a77",row)
double("a78",row)
double("a79",row)
double("a80",row)
double("a81",row)
double("a82",row)
double("a83",row)
double("a84",row)
double("a85",row)
double("a86",row)
double("a87",row)
double("a88",row)
double("a89",row)
double("a90",row)
double("a91",row)
double("a92",row)
double("a93",row)
double("a94",row)
double("a95",row)
double("a96",row)
double("a97",row)
double("a98",row)
double("a99",row)
double("a100",row)
}.executeUpdate()(connection)
}
}
}
def insert3_string(rows:Seq[String]) = {
DB.withConnection { connection =>
rows.foreach { row =>
SQL("""INSERT INTO `ins_100` VALUES ({a1},{a2},{a3},{a4},{a5},{a6},{a7},{a8},{a9},{a10},{a11},{a12},{a13},{a14},{a15},{a16},{a17},{a18},{a19},{a20},{a21},{a22},{a23},{a24},{a25},{a26},{a27},{a28},{a29},{a30},{a31},{a32},{a33},{a34},{a35},{a36},{a37},{a38},{a39},{a40},{a41},{a42},{a43},{a44},{a45},{a46},{a47},{a48},{a49},{a50},{a51},{a52},{a53},{a54},{a55},{a56},{a57},{a58},{a59},{a60},{a61},{a62},{a63},{a64},{a65},{a66},{a67},{a68},{a69},{a70},{a71},{a72},{a73},{a74},{a75},{a76},{a77},{a78},{a79},{a80},{a81},{a82},{a83},{a84},{a85},{a86},{a87},{a88},{a89},{a90},{a91},{a92},{a93},{a94},{a95},{a96},{a97},{a98},{a99},{a100})""")
.on { implicit query =>
string("a1",row)
string("a2",row)
string("a3",row)
string("a4",row)
string("a5",row)
string("a6",row)
string("a7",row)
string("a8",row)
string("a9",row)
string("a10",row)
string("a11",row)
string("a12",row)
string("a13",row)
string("a14",row)
string("a15",row)
string("a16",row)
string("a17",row)
string("a18",row)
string("a19",row)
string("a20",row)
string("a21",row)
string("a22",row)
string("a23",row)
string("a24",row)
string("a25",row)
string("a26",row)
string("a27",row)
string("a28",row)
string("a29",row)
string("a30",row)
string("a31",row)
string("a32",row)
string("a33",row)
string("a34",row)
string("a35",row)
string("a36",row)
string("a37",row)
string("a38",row)
string("a39",row)
string("a40",row)
string("a41",row)
string("a42",row)
string("a43",row)
string("a44",row)
string("a45",row)
string("a46",row)
string("a47",row)
string("a48",row)
string("a49",row)
string("a50",row)
string("a51",row)
string("a52",row)
string("a53",row)
string("a54",row)
string("a55",row)
string("a56",row)
string("a57",row)
string("a58",row)
string("a59",row)
string("a60",row)
string("a61",row)
string("a62",row)
string("a63",row)
string("a64",row)
string("a65",row)
string("a66",row)
string("a67",row)
string("a68",row)
string("a69",row)
string("a70",row)
string("a71",row)
string("a72",row)
string("a73",row)
string("a74",row)
string("a75",row)
string("a76",row)
string("a77",row)
string("a78",row)
string("a79",row)
string("a80",row)
string("a81",row)
string("a82",row)
string("a83",row)
string("a84",row)
string("a85",row)
string("a86",row)
string("a87",row)
string("a88",row)
string("a89",row)
string("a90",row)
string("a91",row)
string("a92",row)
string("a93",row)
string("a94",row)
string("a95",row)
string("a96",row)
string("a97",row)
string("a98",row)
string("a99",row)
string("a100",row)
}.executeUpdate()(connection)
}
}
}
def update1_int(nums:Seq[Int]) = {
DB.withConnection { connection =>
SQL("""UPDATE `sel_50` SET `col13`=16 ,`col33`=19 where `col22` IN ({nums})""")
.expand { implicit ex => commaSeparated("nums",nums.size)}
.on { implicit query =>
ints("nums",nums)
}.executeUpdate()(connection)
}
}
def update1_long(nums:Seq[Long]) = {
DB.withConnection { connection =>
SQL("""UPDATE `sel_50` SET `col13`=16 ,`col33`=19 where `col22` IN ({nums})""")
.expand { implicit ex => commaSeparated("nums",nums.size)}
.on { implicit query =>
longs("nums",nums)
}.executeUpdate()(connection)
}
}
def update1_string(nums:Seq[String]) = {
DB.withConnection { connection =>
SQL("""UPDATE `sel_50` SET `col13`='16' ,`col33`='19' where `col22` IN ({nums})""")
.expand { implicit ex => commaSeparated("nums",nums.size)}
.on { implicit query =>
strings("nums",nums)
}.executeUpdate()(connection)
}
}
def update1_double(nums:Seq[Double]) = {
DB.withConnection { connection =>
SQL("""UPDATE `sel_50` SET `col13`=16 ,`col33`=19 where `col22` IN ({nums})""")
.expand { implicit ex => commaSeparated("nums",nums.size)}
.on { implicit query =>
doubles("nums",nums)
}.executeUpdate()(connection)
}
}
def update2_int(nums:Seq[Int]) = {
DB.withConnection { connection =>
SQL("""UPDATE `sel_50` SET `col1`=16 ,`col43`=19, `col45`=14, `col32`=45, `col26`=254,`col3`=34, `col5`=235, `col12`=5, `col29`=234, `col17`=34 where `col12` IN ({nums})""")
.expand { implicit ex => commaSeparated("nums",nums.size)}
.on { implicit query =>
ints("nums",nums)
}.executeUpdate()(connection)
}
}
def update2_long(nums:Seq[Long]) = {
DB.withConnection { connection =>
SQL("""UPDATE `sel_50` SET `col1`=16 ,`col43`=19, `col45`=14, `col32`=45, `col26`=254,`col3`=34, `col5`=235, `col12`=5, `col29`=234, `col17`=34 where `col12` IN ({nums})""")
.expand { implicit ex => commaSeparated("nums",nums.size)}
.on {
implicit query =>
longs("nums",nums)
}.executeUpdate()(connection)
}
}
def update2_string(nums:Seq[String]) = {
DB.withConnection { connection =>
SQL("""UPDATE `sel_50` SET `col1`='16' ,`col43`='19', `col45`='14', `col32`='45', `col26`='254',`col3`='34', `col5`='235', `col12`='5', `col29`='234', `col17`='34' where `col12` IN ({nums})""")
.expand { implicit ex => commaSeparated("nums",nums.size)}
.on { implicit query =>
strings("nums",nums)
}.executeUpdate()(connection)
}
}
def update2_double(nums:Seq[Double]) = {
DB.withConnection { connection =>
SQL("""UPDATE `sel_50` SET `col1`=16 ,`col43`=19, `col45`=14, `col32`=45, `col26`=254,`col3`=34, `col5`=235, `col12`=5, `col29`=234, `col17`=34 where `col12` IN ({nums})""")
.expand { implicit ex => commaSeparated("nums",nums.size)}
.on {
implicit query =>
doubles("nums",nums)
}.executeUpdate()(connection)
}
}
def update3_int(nums:Seq[Int]) = {
DB.withConnection { connection =>
SQL("""UPDATE `sel_50` SET `col1`=16 ,`col43`=19, `col45`=14, `col32`=45, `col26`=254,`col3`=34, `col5`=235, `col12`=5, `col29`=234,`col17`=34 ,`col18`=423, `col19`=341, `col27`=3243, `col28`=2315, `col25`=1234, `col33` = 34126, `col34` = 23425 ,`col35`=342 ,`col1`=234, `col49`=3576 where `col12` IN ({nums})""")
.expand { implicit ex => commaSeparated("nums",nums.size)}
.on { implicit query =>
ints("nums",nums)
}.executeUpdate()(connection)
}
}
def update3_long(nums:Seq[Long]) = {
DB.withConnection { connection =>
SQL("""UPDATE `sel_50` SET `col1`=16 ,`col43`=19, `col45`=14, `col32`=45, `col26`=254,`col3`=34, `col5`=235, `col12`=5, `col29`=234, `col17`=34,`col18`=423, `col19`=341, `col27`=3243, `col28`=2315, `col25`=1234, `col33` = 34126, `col34` = 23425 ,`col35`=342 ,`col1`=234, `col49`=3576 where `col12` IN ({nums})""")
.expand { implicit ex => commaSeparated("nums",nums.size)}
.on {
implicit query =>
longs("nums",nums)
}.executeUpdate()(connection)
}
}
def update3_string(nums:Seq[String]) = {
DB.withConnection { connection =>
SQL("""UPDATE `sel_50` SET `col1`='16' ,`col43`='19', `col45`='14', `col32`='45', `col26`='254',`col3`='34', `col5`='235', `col12`='5', `col29`='234', `col17`='34',`col18`=423, `col19`=341, `col27`=3243, `col28`=2315, `col25`=1234, `col33` = 34126, `col34` = 23425, `col35`=342 ,`col1`=234, `col49`=3576 where `col12` IN ({nums})""")
.expand { implicit ex => commaSeparated("nums",nums.size)}
.on { implicit query =>
strings("nums",nums)
}.executeUpdate()(connection)
}
}
def update3_double(nums:Seq[Double]) = {
DB.withConnection { connection =>
SQL("""UPDATE `sel_50` SET `col1`=16 ,`col43`=19, `col45`=14, `col32`=45, `col26`=254,`col3`=34, `col5`=235, `col12`=5, `col29`=234, `col17`=34,,`col18`=423, `col19`=341. `col27`=3243, `col28`=2315, `col25`=1234, `col33` = 34126, `col34` = 23425 ,`col35`=342 ,`col1`=234, `col49`=3576 where `col12` IN ({nums})""")
.expand { implicit ex => commaSeparated("nums",nums.size)}
.on {
implicit query =>
doubles("nums",nums)
}.executeUpdate()(connection)
}
}
}
|
lucidsoftware/relate-benchmarks
|
app/controllers/RelateTests.scala
|
Scala
|
mit
| 42,841
|
package com.github.ekroth.spotify
object SpotifyExamples {
val Image = """
{
"height" : null,
"url" : "http://profile-images.scdn.co/artists/default/d4f208d4d49c6f3e1363765597d10c4277f5b74f",
"width" : null
}
"""
val Followers = """
{
"href" : null,
"total" : 4561
}
"""
val Category = """
{
"href" : "https://api.spotify.com/v1/browse/categories/party",
"icons" : [ {
"height" : 274,
"url" : "https://datsnxq1rwndn.cloudfront.net/media/derived/party-274x274_73d1907a7371c3bb96a288390a96ee27_0_0_274_274.jpg",
"width" : 274
} ],
"id" : "party",
"name" : "Party"
}
"""
val Copyright = """
{
"text" : "(C) 2013 Universal Island Records, a division of Universal Music Operations Limited",
"type" : "C"
}
"""
val Error = """
{
"status": 202,
"message": "this is bad"
}
"""
val ExternalID = """
{
"isrc": "USR",
"kalo": "POP"
}
"""
val ExternalURL = """
{
"spotify": "https://url",
"spowtify": "https://orl"
}
"""
val AlbumSimplified = """
{
"album_type": "album",
"available_markets": [ "AD", "AR" ],
"external_urls": {
"spotify": "https://open.spotify.com/album/34EYk8vvJHCUlNrpGxepea"
},
"href": "https://api.spotify.com/v1/albums/34EYk8vvJHCUlNrpGxepea",
"id": "34EYk8vvJHCUlNrpGxepea",
"images": [ {
"height": 640,
"url": "https://i.scdn.co/image/6324fe377dcedf110025527873dafc9b7ee0bb34",
"width": 640
}, {
"height": 300,
"url": "https://i.scdn.co/image/d2e2148023e8a87b7a2f8d2abdfa936154e470b8",
"width": 300
} ],
"name": "Elvis 75 - Good Rockin' Tonight",
"type": "album",
"uri": "spotify:album:34EYk8vvJHCUlNrpGxepea"
}
"""
val ArtistSimplified = """
{
"external_urls": {
"spotify": "https://open.spotify.com/artist/43ZHCT0cAZBISjO8DG9PnE"
},
"href": "https://api.spotify.com/v1/artists/43ZHCT0cAZBISjO8DG9PnE",
"id": "43ZHCT0cAZBISjO8DG9PnE",
"name": "Elvis Presley",
"type": "artist",
"uri": "spotify:artist:43ZHCT0cAZBISjO8DG9PnE"
}
"""
val PagingArrayTrackSimplified = """
{
"href" : "https://api.spotify.com/v1/albums/0sNOF9WDwhWunNAHPD3Baj/tracks?offset=0&limit=50",
"items" : [ {
"artists" : [ ],
"available_markets" : [ "AD" ],
"disc_number" : 1,
"duration_ms" : 305560,
"explicit" : false,
"external_urls" : {
"spotify" : "https://open.spotify.com/track/3f9zqUnrnIq0LANhmnaF0V"
},
"href" : "https://api.spotify.com/v1/tracks/3f9zqUnrnIq0LANhmnaF0V",
"id" : "3f9zqUnrnIq0LANhmnaF0V",
"name" : "Money Changes Everything",
"preview_url" : "https://p.scdn.co/mp3-preview/01bb2a6c9a89c05a4300aea427241b1719a26b06",
"track_number" : 1,
"type" : "track",
"uri" : "spotify:track:3f9zqUnrnIq0LANhmnaF0V"
} ],
"limit" : 50,
"next" : null,
"offset" : 0,
"previous" : null,
"total" : 13
}
"""
val AlbumFull = s"""
{
"album_type" : "album",
"artists" : [ {
"external_urls" : {
"spotify" : "https://open.spotify.com/artist/2BTZIqw0ntH9MvilQ3ewNY"
},
"href" : "https://api.spotify.com/v1/artists/2BTZIqw0ntH9MvilQ3ewNY",
"id" : "2BTZIqw0ntH9MvilQ3ewNY",
"name" : "Cyndi Lauper",
"type" : "artist",
"uri" : "spotify:artist:2BTZIqw0ntH9MvilQ3ewNY"
} ],
"available_markets" : [ "AD" ],
"copyrights" : [ {
"text" : "(P) 2000 Sony Music Entertainment Inc.",
"type" : "P"
} ],
"external_ids" : {
"upc" : "5099749994324"
},
"external_urls" : {
"spotify" : "https://open.spotify.com/album/0sNOF9WDwhWunNAHPD3Baj"
},
"genres" : [ ],
"href" : "https://api.spotify.com/v1/albums/0sNOF9WDwhWunNAHPD3Baj",
"id" : "0sNOF9WDwhWunNAHPD3Baj",
"images" : [ {
"height" : 640,
"url" : "https://i.scdn.co/image/07c323340e03e25a8e5dd5b9a8ec72b69c50089d",
"width" : 640
}, {
"height" : 300,
"url" : "https://i.scdn.co/image/8b662d81966a0ec40dc10563807696a8479cd48b",
"width" : 300
}, {
"height" : 64,
"url" : "https://i.scdn.co/image/54b3222c8aaa77890d1ac37b3aaaa1fc9ba630ae",
"width" : 64
} ],
"name" : "She's So Unusual",
"popularity" : 39,
"release_date" : "1983",
"release_date_precision" : "year",
"tracks" : $PagingArrayTrackSimplified,
"type" : "album",
"uri" : "spotify:album:0sNOF9WDwhWunNAHPD3Baj"
}
"""
val ArtistFull = """
{
"external_urls" : {
"spotify" : "https://open.spotify.com/artist/0OdUWJ0sBjDrqHygGUXeCF"
},
"followers" : {
"href" : null,
"total" : 306565
},
"genres" : [ "indie folk", "indie pop" ],
"href" : "https://api.spotify.com/v1/artists/0OdUWJ0sBjDrqHygGUXeCF",
"id" : "0OdUWJ0sBjDrqHygGUXeCF",
"images" : [ ],
"name" : "Band of Horses",
"popularity" : 59,
"type" : "artist",
"uri" : "spotify:artist:0OdUWJ0sBjDrqHygGUXeCF"
}
"""
val PlaylistFull = """
{
"collaborative" : false,
"description" : "Having friends over for dinner? Here´s the perfect playlist.",
"external_urls" : {
"spotify" : "http://open.spotify.com/user/spotify/playlist/59ZbFPES4DQwEjBpWHzrtC"
},
"followers" : {
"href" : null,
"total" : 143350
},
"href" : "https://api.spotify.com/v1/users/spotify/playlists/59ZbFPES4DQwEjBpWHzrtC",
"id" : "59ZbFPES4DQwEjBpWHzrtC",
"images" : [ ],
"name" : "Dinner with Friends",
"owner" : {
"external_urls" : {
"spotify" : "http://open.spotify.com/user/spotify"
},
"href" : "https://api.spotify.com/v1/users/spotify",
"id" : "spotify",
"type" : "user",
"uri" : "spotify:user:spotify"
},
"public" : null,
"snapshot_id" : "bNLWdmhh+HDsbHzhckXeDC0uyKyg4FjPI/KEsKjAE526usnz2LxwgyBoMShVL+z+",
"tracks" : {
"href" : "https://api.spotify.com/v1/users/spotify/playlists/59ZbFPES4DQwEjBpWHzrtC/tracks",
"items" : [ ],
"limit" : 100,
"next" : "https://api.spotify.com/v1/users/spotify/playlists/59ZbFPES4DQwEjBpWHzrtC/tracks?offset=100&limit=100",
"offset" : 0,
"previous" : null,
"total" : 105
},
"type" : "playlist",
"uri" : "spotify:user:spotify:playlist:59ZbFPES4DQwEjBpWHzrtC"
}
"""
val TrackLink = s"""
{
"external_urls": $ExternalURL,
"href": "localhost",
"id": "123",
"type": "track",
"uri": "spotify://spot"
}
"""
val UserPrivate = """
{
"birthdate": "1937-06-01",
"country": "SE",
"display_name": "JM Wizzler",
"email": "email@example.com",
"external_urls": {
"spotify": "https://open.spotify.com/user/wizzler"
},
"followers" : {
"href" : null,
"total" : 3829
},
"href": "https://api.spotify.com/v1/users/wizzler",
"id": "wizzler",
"images": [
{
"height": null,
"url": "https://fbcdn-profile-a.akamaihd.net/hprofile-ak-frc3/t1.0-1/1970403_10152215092574354_1798272330_n.jpg",
"width": null
}
],
"product": "premium",
"type": "user",
"uri": "spotify:user:wizzler"
}
"""
val UserPublic = """
{
"display_name" : "Lilla Namo",
"external_urls" : {
"spotify" : "https://open.spotify.com/user/tuggareutangranser"
},
"followers" : {
"href" : null,
"total" : 4561
},
"href" : "https://api.spotify.com/v1/users/tuggareutangranser",
"id" : "tuggareutangranser",
"images" : [ {
"height" : null,
"url" : "http://profile-images.scdn.co/artists/default/d4f208d4d49c6f3e1363765597d10c4277f5b74f",
"width" : null
} ],
"type" : "user",
"uri" : "spotify:user:tuggareutangranser"
}
"""
val UserPublicOther = """
{
"external_urls" : {
"spotify" : "https://open.spotify.com/user/tuggareutangranser"
},
"href" : "https://api.spotify.com/v1/users/tuggareutangranser",
"id" : "tuggareutangranser",
"type" : "user",
"uri" : "spotify:user:tuggareutangranser"
}
"""
val TrackSimplified = """
{
"artists": [ {
"external_urls": {
"spotify": "https://open.spotify.com/artist/08td7MxkoHQkXnWAYD8d6Q"
},
"href": "https://api.spotify.com/v1/artists/08td7MxkoHQkXnWAYD8d6Q",
"id": "08td7MxkoHQkXnWAYD8d6Q",
"name": "Tania Bowra",
"type": "artist",
"uri": "spotify:artist:08td7MxkoHQkXnWAYD8d6Q"
} ],
"available_markets": [ "AD", "AR", "AT", "AU", "BE", "BG", "BO", "BR", "CA", "CH", "CL", "CO", "CR", "CY", "CZ", "DE", "DK", "DO", "EC", "EE", "ES", "FI", "FR", "GB", "GR", "GT", "HK", "HN", "HU", "IE", "IS", "IT", "LI", "LT", "LU", "LV", "MC", "MT", "MX", "MY", "NI", "NL", "NO", "NZ", "PA", "PE", "PH", "PL", "PT", "PY", "RO", "SE", "SG", "SI", "SK", "SV", "TR", "TW", "US", "UY" ],
"disc_number": 1,
"duration_ms": 276773,
"explicit": false,
"external_urls": {
"spotify": "https://open.spotify.com/track/2TpxZ7JUBn3uw46aR7qd6V"
},
"href": "https://api.spotify.com/v1/tracks/2TpxZ7JUBn3uw46aR7qd6V",
"id": "2TpxZ7JUBn3uw46aR7qd6V",
"name": "All I Want",
"preview_url": "https://p.scdn.co/mp3-preview/6d00206e32194d15df329d4770e4fa1f2ced3f57",
"track_number": 1,
"type": "track",
"uri": "spotify:track:2TpxZ7JUBn3uw46aR7qd6V"
}
"""
val PagingEmpty = """
{
"href" : "https://api.spotify.com/v1/browse/categories/party/playlists?country=BR&offset=0&limit=20",
"items" : [ ],
"limit" : 20,
"next" : "https://api.spotify.com/v1/browse/categories/party/playlists?country=BR&offset=20&limit=20",
"offset" : 0,
"previous" : null,
"total" : 148
}
"""
val PlaylistSimplified = s"""
{
"collaborative" : false,
"external_urls" : {
"spotify" : "http://open.spotify.com/user/spotifybrazilian/playlist/4k7EZPI3uKMz4aRRrLVfen"
},
"href" : "https://api.spotify.com/v1/users/spotifybrazilian/playlists/4k7EZPI3uKMz4aRRrLVfen",
"id" : "4k7EZPI3uKMz4aRRrLVfen",
"images" : [ {
"height" : 300,
"url" : "https://i.scdn.co/image/bf6544c213532e9650088dfef76c8521093d970e",
"width" : 300
} ],
"name" : "Noite Eletrônica",
"owner" : $UserPublic,
"public" : null,
"tracks" : {
"href" : "https://api.spotify.com/v1/users/spotifybrazilian/playlists/4k7EZPI3uKMz4aRRrLVfen/tracks",
"total" : 80
},
"type" : "playlist",
"uri" : "spotify:user:spotifybrazilian:playlist:4k7EZPI3uKMz4aRRrLVfen"
}
"""
val Tracks = """
{
"href" : "https://api.spotify.com/v1/users/spotifybrazilian/playlists/4k7EZPI3uKMz4aRRrLVfen/tracks",
"total" : 80
}
"""
val TrackFull = """
{
"album": {
"album_type": "album",
"available_markets": [ "AD", "AR", "AT", "AU", "BE", "BG", "BO", "BR", "CA", "CH", "CL", "CO", "CR", "CY", "CZ", "DE", "DK", "DO", "EC", "EE", "ES", "FI", "FR", "GB", "GR", "GT", "HK", "HN", "HU", "IE", "IS", "IT", "LI", "LT", "LU", "LV", "MC", "MT", "MX", "MY", "NI", "NL", "NO", "NZ", "PA", "PE", "PH", "PL", "PT", "PY", "RO", "SE", "SG", "SI", "SK", "SV", "TR", "TW", "US", "UY" ],
"external_urls": {
"spotify": "https://open.spotify.com/album/34EYk8vvJHCUlNrpGxepea"
},
"href": "https://api.spotify.com/v1/albums/34EYk8vvJHCUlNrpGxepea",
"id": "34EYk8vvJHCUlNrpGxepea",
"images": [ ],
"name": "Elvis 75 - Good Rockin' Tonight",
"type": "album",
"uri": "spotify:album:34EYk8vvJHCUlNrpGxepea"
},
"artists": [ {
"external_urls": {
"spotify": "https://open.spotify.com/artist/43ZHCT0cAZBISjO8DG9PnE"
},
"href": "https://api.spotify.com/v1/artists/43ZHCT0cAZBISjO8DG9PnE",
"id": "43ZHCT0cAZBISjO8DG9PnE",
"name": "Elvis Presley",
"type": "artist",
"uri": "spotify:artist:43ZHCT0cAZBISjO8DG9PnE"
} ],
"available_markets": [ "AD", "AR", "AT", "AU", "BE", "BG", "BO", "BR", "CA", "CH", "CL", "CO", "CR", "CY", "CZ", "DE", "DK", "DO", "EC", "EE", "ES", "FI", "FR", "GB", "GR", "GT", "HK", "HN", "HU", "IE", "IS", "IT", "LI", "LT", "LU", "LV", "MC", "MT", "MX", "MY", "NI", "NL", "NO", "NZ", "PA", "PE", "PH", "PL", "PT", "PY", "RO", "SE", "SG", "SI", "SK", "SV", "TR", "TW", "US", "UY" ],
"disc_number": 3,
"duration_ms": 260973,
"explicit": false,
"external_ids": {
"isrc": "USRC16901355"
},
"external_urls": {
"spotify": "https://open.spotify.com/track/6fgjU6IfBOXHI3OKtndEeE"
},
"href": "https://api.spotify.com/v1/tracks/6fgjU6IfBOXHI3OKtndEeE",
"id": "6fgjU6IfBOXHI3OKtndEeE",
"name": "Suspicious Minds",
"popularity": 70,
"preview_url": "https://p.scdn.co/mp3-preview/3742af306537513a4f446d7c8f9cdb1cea6e36d1",
"track_number": 19,
"type": "track",
"uri": "spotify:track:6fgjU6IfBOXHI3OKtndEeE"
}
"""
val SavedTrack = s"""
{
"added_at": "2014-07-08T14:05:27Z",
"track": $TrackFull
}
"""
val PlaylistTrack = s"""
{
"added_at" : "2014-09-01T04:21:28Z",
"added_by" : {
"external_urls" : { },
"href" : "https://api.spotify.com/v1/users/spotify",
"id" : "spotify",
"type" : "user",
"uri" : "spotify:user:spotify"
},
"track" : $TrackFull
}
"""
val PagingSavedTrack = """
"href": "https://api.spotify.com/v1/me/tracks?offset=0&limit=20",
"items": [
{
"added_at": "2014-07-08T14:05:27Z",
"track": {
"album": {
"album_type": "album",
"available_markets": [
"AD",
"AR",
"AT",
"TR",
"TW",
"UY"
],
"external_urls": {
"spotify": "https://open.spotify.com/album/4kbE34G5bxaxwuCqz0NEw4"
},
"href": "https://api.spotify.com/v1/albums/4kbE34G5bxaxwuCqz0NEw4",
"id": "4kbE34G5bxaxwuCqz0NEw4",
"images": [
{
"height": 635,
"url": "https://i.scdn.co/image/5ac900806189613a98ce8d2a979265dabd3f7347",
"width": 640
},
{
"height": 298,
"url": "https://i.scdn.co/image/e531cef3541f3d9d7fef9dbede8f19223e2f1497",
"width": 300
},
{
"height": 64,
"url": "https://i.scdn.co/image/4be3ce9447365df0b8653f941058ab3fd7177b25",
"width": 64
}
],
"name": "The Best Of Me",
"type": "album",
"uri": "spotify:album:4kbE34G5bxaxwuCqz0NEw4"
},
"artists": [
{
"external_urls": {
"spotify": "https://open.spotify.com/artist/3Z02hBLubJxuFJfhacLSDc"
},
"href": "https://api.spotify.com/v1/artists/3Z02hBLubJxuFJfhacLSDc",
"id": "3Z02hBLubJxuFJfhacLSDc",
"name": "Bryan Adams",
"type": "artist",
"uri": "spotify:artist:3Z02hBLubJxuFJfhacLSDc"
}
],
"available_markets": [
"AD",
"AR",
"AT",
"TR",
"TW",
"UY"
],
"disc_number": 1,
"duration_ms": 265933,
"explicit": false,
"external_ids": {
"isrc": "USAM19774904"
},
"external_urls": {
"spotify": "https://open.spotify.com/track/1XjKmqLHqnzNLYqYSRBIZK"
},
"href": "https://api.spotify.com/v1/tracks/1XjKmqLHqnzNLYqYSRBIZK",
"id": "1XjKmqLHqnzNLYqYSRBIZK",
"name": "Back To You - MTV Unplugged Version",
"popularity": 43,
"preview_url": "https://p.scdn.co/mp3-preview/abeb349e0ea95846b4e4e01b115fcdbd5e9a991a",
"track_number": 11,
"type": "track",
"uri": "spotify:track:1XjKmqLHqnzNLYqYSRBIZK"
}
}
],
"limit": 20,
"next": "https://api.spotify.com/v1/me/tracks?offset=20&limit=20",
"offset": 0,
"previous": null,
"total": 53
"""
val TestCase1 = """
{
"added_at" : "2014-10-26T13:26:56Z",
"track" : {
"album" : {
"album_type" : "album",
"available_markets" : [ "AD", "AR", "AT", "BE", "BG", "BO", "BR", "CH", "CL", "CO", "CR", "CY", "CZ", "DE", "DK", "DO", "EC", "EE", "ES", "FI", "FR", "GB", "GR", "GT", "HK", "HN", "HU", "IE", "IS", "IT", "LI", "LT", "LU", "LV", "MC", "MT", "MY", "NI", "NL", "NO", "PA", "PE", "PH", "PL", "PT", "PY", "RO", "SE", "SG", "SI", "SK", "SV", "TR", "TW", "UY" ],
"external_urls" : {
"spotify" : "https://open.spotify.com/album/0JGCvHdtAPddamH80fV4Z6"
},
"href" : "https://api.spotify.com/v1/albums/0JGCvHdtAPddamH80fV4Z6",
"id" : "0JGCvHdtAPddamH80fV4Z6",
"images" : [ {
"height" : 640,
"url" : "https://i.scdn.co/image/3c6fd318d65fa1db702bf7fb2509c52e041596e1",
"width" : 640
}, {
"height" : 300,
"url" : "https://i.scdn.co/image/8081e470a8eb2ca14dd61102a06f8696aad2c2d2",
"width" : 300
}, {
"height" : 64,
"url" : "https://i.scdn.co/image/57a4a77fa7539212e03d17a9bc997ea8701c7c41",
"width" : 64
} ],
"name" : "In Rolling Waves",
"type" : "album",
"uri" : "spotify:album:0JGCvHdtAPddamH80fV4Z6"
},
"artists" : [ {
"external_urls" : {
"spotify" : "https://open.spotify.com/artist/3Qy1IxDSU8SLpUUOfbOpxM"
},
"href" : "https://api.spotify.com/v1/artists/3Qy1IxDSU8SLpUUOfbOpxM",
"id" : "3Qy1IxDSU8SLpUUOfbOpxM",
"name" : "The Chain Gang Of 1974",
"type" : "artist",
"uri" : "spotify:artist:3Qy1IxDSU8SLpUUOfbOpxM"
} ],
"available_markets" : [ ],
"disc_number" : 2,
"duration_ms" : 254400,
"explicit" : false,
"external_ids" : {
"isrc" : "USWB11401589"
},
"external_urls" : {
"spotify" : "https://open.spotify.com/track/4z7Ufu7CrBFyTHT3ymBdVi"
},
"href" : "https://api.spotify.com/v1/tracks/4z7Ufu7CrBFyTHT3ymBdVi",
"id" : "4z7Ufu7CrBFyTHT3ymBdVi",
"name" : "What We Want",
"popularity" : 0,
"preview_url" : null,
"track_number" : 9,
"type" : "track",
"uri" : "spotify:track:4z7Ufu7CrBFyTHT3ymBdVi"
}
}
"""
}
|
ekroth/play-spotify
|
src/test/scala/com/github/ekroth/spotify/SpotifyExamples.scala
|
Scala
|
mit
| 17,990
|
/*
* ScalaRay - Ray tracer based on pbrt (see http://pbrt.org) written in Scala
* Copyright (C) 2009, 2010, 2011 Jesper de Jong
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.jesperdj.scalaray
import org.jesperdj.scalaray.common._
package object vecmath {
// Implicit conversion for scaling vectors by multiplying a numeric type with a vector
implicit def implicitScaleVector[@specialized(Int, Double) T <% Double](f: T) = new MultipliableSame[Vector] {
@inline def *(v: Vector) = v * f
}
// Implicit conversion for scaling normals by multiplying a numeric type with a normal
implicit def implicitScaleNormal[@specialized(Int, Double) T <% Double](f: T) = new MultipliableSame[Normal] {
@inline def *(n: Normal) = n * f
}
// Implicit conversion for multiplying a point by a weight
implicit def implicitScalePoint[@specialized(Int, Double) T <% Double](f: T) = new MultipliableSame[Point] {
@inline def *(p: Point) = p * f
}
// Implicit conversion for scaling quaternions by multiplying a numeric type with a quaternion
implicit def implicitScaleQuaternion[@specialized(Int, Double) T <% Double](f: T) = new MultipliableSame[Quaternion] {
@inline def *(q: Quaternion) = q * f
}
// Implicit conversion to enable Vector to be used in interpolate()
implicit def vectorToInterpolatable(v: Vector) = new Interpolatable[Vector] {
@inline def *(f: Double): Vector = v * f
@inline def +(w: Vector): Vector = v + w
}
}
|
jesperdj/scalaray
|
src/main/scala/org/jesperdj/scalaray/vecmath/package.scala
|
Scala
|
gpl-3.0
| 2,090
|
package org.jetbrains.plugins.scala
package findUsages
package compilerReferences
import java.util.concurrent.TimeUnit
import java.util.concurrent.locks.{Condition, Lock, ReentrantLock}
import com.intellij.openapi.compiler.CompilerMessageCategory
import com.intellij.openapi.module.Module
import com.intellij.psi.PsiClass
import com.intellij.testFramework.fixtures.JavaCodeInsightFixtureTestCase
import com.intellij.testFramework.{CompilerTester, PsiTestUtil}
import org.jetbrains.plugins.scala.base.ScalaSdkOwner
import org.jetbrains.plugins.scala.base.libraryLoaders.{HeavyJDKLoader, LibraryLoader, ScalaSDKLoader}
import org.jetbrains.plugins.scala.debugger.ScalaCompilerTestBase
import org.jetbrains.plugins.scala.project._
import org.junit.Assert.{assertNotSame, fail}
import org.junit.experimental.categories.Category
import scala.jdk.CollectionConverters._
import scala.collection.mutable
import scala.reflect.ClassTag
import scala.util.control.NonFatal
@Category(Array(classOf[SlowTests]))
abstract class ScalaCompilerReferenceServiceFixture extends JavaCodeInsightFixtureTestCase with ScalaSdkOwner {
override protected def supportedIn(version: ScalaVersion): Boolean = version >= LatestScalaVersions.Scala_2_12
override protected def librariesLoaders: Seq[LibraryLoader] =
Seq(
HeavyJDKLoader(),
ScalaSDKLoader(includeScalaReflectIntoCompilerClasspath = true),
)
private[this] val compilerIndexLock: Lock = new ReentrantLock()
private[this] val indexReady: Condition = compilerIndexLock.newCondition()
@volatile private[this] var indexReadyPredicate: Boolean = false
protected var compiler: CompilerTester = _
private[this] val myLoaders = mutable.Set.empty[LibraryLoader]
override def setUp(): Unit = {
super.setUp()
try {
setUpLibrariesFor(getModule)
PsiTestUtil.addSourceRoot(getModule, myFixture.getTempDirFixture.findOrCreateDir("src"), true)
val project = getProject
compiler = new CompilerTester(project, project.modules.asJava, null)
} catch {
case NonFatal(e) => fail(e.getMessage)
}
}
override def tearDown(): Unit =
try {
disposeLibraries(getModule)
compiler.tearDown()
ScalaCompilerTestBase.stopAndWait()
} finally {
compiler = null
super.tearDown()
}
def setUpLibrariesFor(modules: Module*): Unit =
for {
module <- modules
loader <- librariesLoaders
} {
loader.init(module, version)
myLoaders += loader
}
override protected def disposeLibraries(implicit module: Module): Unit = {
for {
module <- getProject.modules
loader <- myLoaders
} loader.clean(module)
myLoaders.clear()
}
protected def buildProject(): Unit = {
getProject.getMessageBus
.connect(getProject.unloadAwareDisposable)
.subscribe(CompilerReferenceServiceStatusListener.topic, new CompilerReferenceServiceStatusListener {
override def onIndexingPhaseFinished(success: Boolean): Unit = compilerIndexLock.locked {
indexReadyPredicate = true
indexReady.signal()
}
})
compiler
.rebuild
.asScala
.foreach(m => assertNotSame(m.getMessage, CompilerMessageCategory.ERROR, m.getCategory))
compilerIndexLock.locked {
indexReady.await(30, TimeUnit.SECONDS)
if (!indexReadyPredicate) fail("Failed to updated compiler index.")
indexReadyPredicate = false
}
}
protected def findClass[T](implicit tag: ClassTag[T]): PsiClass =
myFixture.findClass(tag.runtimeClass.getCanonicalName)
}
|
JetBrains/intellij-scala
|
scala/scala-impl/test/org/jetbrains/plugins/scala/findUsages/compilerReferences/ScalaCompilerReferenceServiceFixture.scala
|
Scala
|
apache-2.0
| 3,617
|
package reopp.common.examples
import java.util.concurrent.Semaphore
import reopp.common.{Predicate, Utils, OptionSol, Function}
import reopp.common.guardedcommands.{GCSolution, GCConnector}
import reopp.common.guardedcommands.dataconnectors.ConnectorGen._
import Utils._
/**
* Created by jose on 14/04/15.
*/
object SungExperiments extends App {
///// SIZE OF CONNECTORS
val NUMSYNC = 50
val NUMTRANS = 50
val NUMFILTERS = 200
/**
* Uses a semaphore (not yet shared) to block execution while waiting for an update.
*/
class InputPortImpl(port: String, sem: Semaphore) extends GCConnector(List(port)) {
private var data: Option[Any] = None
def getConstraints = data match {
case None => !port
case Some(v) => port --> (port :== v)
}
override def update(s: OptionSol[GCSolution]) {
if (s.isDefined && (s.get hasFlowOnPort port)) {
data = None
sem.release()
}
}
def put(d:Any): Unit = {
data = Some(d)
sem.acquire()
}
}
// counts the time to execute an action
def time(fun: => OptionSol[GCSolution]): Long = {
val t = System.currentTimeMillis()
//val res = cons.solveChocoSat
fun
val spent = System.currentTimeMillis() - t
println(s"took $spent secs.")
spent
}
////// sequence of syncs ///////
val myInputPort = new InputPortImpl("1", new Semaphore(1))
def syncs(k: Int): GCConnector = {
var res: GCConnector = myInputPort
for (i <- 1 to k)
res ++= sync(i.toString,(i+1).toString)
res
}
myInputPort.put(())
val conn = syncs(NUMSYNC)
val const = conn.getConstraints
println("---- syncs ----")
time(const.solveXZ3)
time(const.quickDataSolveSAT4J)
time(const.quickDataSolveZ3)
time(const.solveChocoDyn)
time(const.solveChocoPredAbstVarOrdered)
// val sol1 = const.solveXZ3
// val sol2 = const.quickDataSolveSAT4J
// val sol3 = const.quickDataSolveZ3
// val sol4 = const.solveChocoDyn
// val sol5 = const.solveChocoPredAbstVarOrdered
//
// println(s"sol1:\n$sol1")
// println(s"sol2:\n$sol2")
// println(s"sol3:\n$sol3")
// println(s"sol4:\n$sol4")
// println(s"sol5:\n$sol5")
////// sequence of transformers ///////
val myInputPort2 = new InputPortImpl("1", new Semaphore(1))
myInputPort2.put(0)
val succ = Function("succ") {
case i:Int => i+1
case x =>
println(s"UNKNOWN x received - $x : ${x.getClass}")
42
}
def transfs(k: Int): GCConnector = {
var res: GCConnector = myInputPort2
for (i <- 1 to k)
res ++= transf(i.toString,(i+1).toString,succ)
res
}
// myInputPort.put(())
val conn2 = transfs(NUMTRANS)
val const2 = conn2.getConstraints
println("---- transformers ----")
// time(const2.solveXZ3) // NOT CALLING EXTERNAL! wrong.
time(const2.quickDataSolveSAT4J)
time(const2.quickDataSolveZ3)
time(const2.solveChocoDyn)
time(const2.solveChocoPredAbstVarOrdered)
// val sol21 = const2.solveXZ3 // buggy...
// val sol22 = const2.quickDataSolveSAT4J
// val sol23 = const2.quickDataSolveZ3
// val sol24 = const2.solveChocoDyn
// val sol25 = const2.solveChocoPredAbstVarOrdered
// println(s"sol21:\n$sol21")
// println(s"sol22:\n$sol22")
// println(s"sol23:\n$sol23")
// println(s"sol24:\n$sol24")
// println(s"sol25:\n$sol25")
////// sequence of filters - only the last fails ///////
val myInputPort3 = new InputPortImpl("1", new Semaphore(1))
myInputPort3.put(0)
def is(v:Int) = Predicate("is-"+v) { // ATTENTION: name must be unique for each v! (except for chocoDyn)
case i:Int => i != v
case x =>
println(s"UNKNOWN x received - $x : ${x.getClass}")
false
}
def filters(k: Int): GCConnector = {
var res: GCConnector = myInputPort3
for (i <- 1 to k)
res ++= filter(i.toString,(i+1).toString,is(k-i)) // fail only on the last filter
res
}
val conn3 = filters(NUMFILTERS)
val const3 = conn3.getConstraints
println("---- filters ----")
// time(const2.solveXZ3) // NOT CALLING EXTERNAL! wrong.
time(const3.quickDataSolveSAT4J)
time(const3.quickDataSolveZ3)
time(const3.solveChocoDyn)
time(const3.solveChocoPredAbstVarOrdered)
// val sol31 = const3.solveXZ3 // buggy (not calling external)
// println(s"sol31:\n$sol31")
// val sol32 = const3.quickDataSolveSAT4J
// println(s"sol32:\n$sol32")
// val sol33 = const3.quickDataSolveZ3
// println(s"sol33:\n$sol33")
// val sol34 = const3.solveChocoDyn
// println(s"sol34:\n$sol34")
// val sol35 = const3.solveChocoPredAbstVarOrdered
// println(s"sol35:\n$sol35")
}
|
joseproenca/ip-constraints
|
code/src/main/scala/reopp/common/examples/SungExperiments.scala
|
Scala
|
mit
| 4,637
|
package com.advancedtelematic.feed
import org.joda.time.DateTime
import play.api.libs.json.{Json, JsValue, Writes}
case class TraceEntry(id: String, timestamp: DateTime, lat: BigDecimal, lng: BigDecimal, isOccupied: Boolean) {
def toCsv() : String = s"$id $lat $lng $isOccupied ${timestamp.getMillis / 1000}"
}
object TraceEntry {
def parse(id: String)(str: String) : TraceEntry = {
val fields = str.split(" ")
TraceEntry(
id = id,
timestamp = new DateTime( fields(3).toLong * 1000 ),
lat = BigDecimal( fields(0) ),
lng = BigDecimal( fields(1) ),
isOccupied = fields(2) == "1" || fields(2) == "true"
)
}
implicit val TraceAndSpeedWrites = new Writes[(TraceEntry, BigDecimal)] {
override def writes(value: (TraceEntry, BigDecimal)): JsValue = {
val (entry, speed) = value
Json.obj(
"vin" -> entry.id,
"timestamp" -> entry.timestamp.getMillis,
"data" -> Json.arr(
Json.obj( "channel" -> "location", "value" -> Json.obj( "lat" -> entry.lat, "lon" -> entry.lng ) ),
Json.obj( "channel" -> "occupancy", "value" -> (if(entry.isOccupied) 1 else 0) ),
Json.obj( "channel" -> "speed", "value" -> speed)
)
)
}
}
}
|
PDXostc/rvi_big-data_datafeeds
|
src/main/scala/com/advancedtelematic/feed/TraceEntry.scala
|
Scala
|
mpl-2.0
| 1,249
|
package scalaz.stream
package examples
import scalaz.concurrent.Task
import org.scalacheck._
import Prop._
object WritingAndLogging extends Properties("writing-and-logging") {
/*
A `Writer[F,W,O]` is a `Process[F, W \/ O]`. See
`WriterSyntax` for convenience functions
for working with either the written values (the `W`)
or the output values (the `O`).
This is useful for logging or other situations where we
want to emit some values 'on the side' while doing something
else with the main output of a `Process`.
Let's look at an example:
*/
property("writer") = secure {
/* Give this a short name since we'll be using it a lot. */
val P = Process
/* For testing - we'll be accumulating into this buffer. */
val buf = new collection.mutable.ArrayBuffer[String]
/*
A full example, which we'll break down line by line
in a minute. For each number in 0 to 10, this writes
messages to the mutable `buf`:
Got input: 1
Got input: 2
...
The integers are still available for further transforms.
*/
val ex: Process[Task,Int] =
Process.range(0,10)
.flatMap(i => P.tell("Got input: " + i) ++ P.emitO(i))
.toSource
.observeW(io.fillBuffer(buf))
.stripW
/* This will have the side effect of filling `buf`. */
ex.run.run
/* Let's break this down. */
/* The original input. */
val step0: Process[Task,Int] = P.range(0,10)
/*
Log some output using `W.tell`, and echo the original
input with `W.emitO` (`O` for 'output').
*/
val step1: Writer[Task,String,Int] =
step0.flatMap { i => P.tell("Got input: " + i) ++ P.emitO(i) }
/*
A `Sink` which as a side effect writes to a mutable
`Buffer`. This is more useful for testing.
*/
val snk: Sink[Task,String] = io.fillBuffer(buf)
/*
Another `Sink` we could use for our `Writer`, if
we want to log the writes to standard out, with
a newline after each `String`.
Of course, rather than picking `snk` or `snk2`,
we could also take the `Sink` to use for logging
as an argument, if we want our code to be agnostic
to the logging strategy.
*/
val snk2: Sink[Task,String] = io.stdOutLines
/*
The `observeW` function observes the write values of
a `Writer` using some `Sink`, and then the `stripW`
function discards the write side of the writer to get
back an ordinary `Process`. Notice the `Int` output
is still available for further transformation.
*/
val step2: Process[Task,Int] =
step1.observeW(snk).stripW
/* Make sure all values got written to the buffer. */
buf.toList == List.range(0,10).map("Got input: " + _)
}
}
|
scalaz/scalaz-stream
|
src/test/scala/scalaz/stream/examples/WritingAndLogging.scala
|
Scala
|
mit
| 2,762
|
package org.jetbrains.sbt
package project.settings
import com.intellij.openapi.components._
import com.intellij.openapi.externalSystem.service.project.PlatformFacade
import com.intellij.openapi.externalSystem.settings.AbstractExternalSystemLocalSettings
import com.intellij.openapi.project.Project
import org.jetbrains.sbt.project.SbtProjectSystem
import scala.beans.BeanProperty
/**
* @author Pavel Fatin
*/
@State(
name = "SbtLocalSettings",
storages = Array(
new Storage(file = StoragePathMacros.WORKSPACE_FILE)
)
)
class SbtLocalSettings(platformFacade: PlatformFacade, project: Project)
extends AbstractExternalSystemLocalSettings(SbtProjectSystem.Id, project, platformFacade)
with PersistentStateComponent[SbtLocalSettingsState] {
var sbtSupportSuggested = false
var lastUpdateTimestamp = 0L
def getState = {
val state = new SbtLocalSettingsState
fillState(state)
state.setSbtSupportSuggested(sbtSupportSuggested)
state.setLastUpdateTimestamp(lastUpdateTimestamp)
state
}
def loadState(state: SbtLocalSettingsState) {
super[AbstractExternalSystemLocalSettings].loadState(state)
sbtSupportSuggested = state.getSbtSupportSuggested
lastUpdateTimestamp = state.getLastUpdateTimestamp
}
}
class SbtLocalSettingsState extends AbstractExternalSystemLocalSettings.State {
@BeanProperty
var sbtSupportSuggested: Boolean = false
@BeanProperty
var lastUpdateTimestamp: Long = 0
}
object SbtLocalSettings {
def getInstance(project: Project) = ServiceManager.getService(project, classOf[SbtLocalSettings])
}
|
triggerNZ/intellij-scala
|
src/org/jetbrains/sbt/project/settings/SbtLocalSettings.scala
|
Scala
|
apache-2.0
| 1,579
|
package org.scalatra
package object validation {
}
|
dozed/scalatra
|
commands/src/main/scala/org/scalatra/validation/package.scala
|
Scala
|
bsd-2-clause
| 53
|
package com.example.models
import javax.xml.bind.annotation.XmlRootElement
import javax.xml.bind.annotation.XmlRegistry
import org.neo4j.graphdb.GraphDatabaseService
/**
* Example of using a model object which can be automatically serialized/unserialized
* to/from JSON using JAXB. Unfortunately with this method you have to write a lot of
* Java-esque boilerplate code, so unless you have existing JAXB objects you may find
* it easier to do the JSON conversion explicitly, or use <tt>NeoJsonConverter</tt>
* (see <tt>NeoResource</tt> for an example).
*/
/** A simple model of a cow. */
@XmlRootElement
class Moo(var colourOfSpots: java.lang.String) {
/** Constructor which loads the colourOfSpots property from the Neo reference node */
def this(neo: GraphDatabaseService) =
this(neo.getReferenceNode.getProperty("cowColour", "brown").asInstanceOf[String])
/** Save this model object to Neo */
def save(neo: GraphDatabaseService) = neo.getReferenceNode.setProperty("cowColour", colourOfSpots)
/** Zero-argument constructor is required */
private [models] def this() = this(null.asInstanceOf[String])
/** Sorry, JAXB expects Java-style getters and setters :-( */
def getColourOfSpots = colourOfSpots
def setColourOfSpots(c: String) { colourOfSpots = c }
}
/**
* This is needed so that JAXB can find the Moo class.
* <tt>ObjectFactory</tt> is a magic name.
*/
@XmlRegistry
class ObjectFactory {
def createMoo = new Moo
}
|
ept/neo4j-scala-template
|
src/main/scala/com/example/models/Moo.scala
|
Scala
|
mit
| 1,466
|
package com.es.scala.chapter07
abstract class IntTree {
def contains(t: IntTree, v: Int): Boolean = t match {
case EmptyTree => false
case Node(elem, left, right) => elem == v || contains(left, v) || contains(right, v)
}
}
case object EmptyTree extends IntTree
case class Node(elem: Int, left: IntTree, right: IntTree) extends IntTree
|
elephantscale/learning-scala
|
ScalaByExample/src/main/scala/com/es/scala/chapter07/IntTree.scala
|
Scala
|
apache-2.0
| 348
|
package com.doanduyhai.elevator.actors
import java.io.PrintStream
import akka.actor.{ActorRef, Actor, ActorLogging}
import scala.collection.immutable.{Queue}
class ControlSystemActor(val expectedElevatorCount: Int,
private[actors] var orderQueue:Queue[Pickup] = Queue.empty[Pickup],
val maxQueueSize:Int = 10,
val printStream: PrintStream = System.out) extends Actor with ActorLogging with StatusPrinter {
private[actors] var elevatorsStatus: Map[Int, (ElevatorStatus, Option[Pickup])] = Map()
private[actors] var elevatorById:Map[Int, ActorRef] = Map()
def availableElevator:Option[Int] = elevatorsStatus.
filter{case(id,(status,scheduledOrder)) => (!status.isMoving) && scheduledOrder.isEmpty}.
map{case(id,_) => id}.headOption match {
case head @ Some(_) => head //First check all free elevators
case None => elevatorsStatus. //Then check elevators who have no scheduled order
filter{case(_,(_,scheduledOrder)) => scheduledOrder.isEmpty}.
map{case(id,_) => id}.
headOption
}
def receive: Receive = {
case UpdateStatus(elevatorId, status, scheduledOrder) => {
elevatorsStatus += ((elevatorId, (status, scheduledOrder)))
elevatorById = elevatorById.updated(elevatorId, sender())
if(elevatorsStatus.size >= expectedElevatorCount) {
printOrderQueue(this.orderQueue)
printElevatorsStatus(elevatorsStatus)
if(this.orderQueue.size > 0) {
availableElevator match {
case Some(freeElevator) => dequeueAnOrder(freeElevator)
case None => //Do nothing
}
}
}
}
case pickupOrder @ Pickup(_) => {
if(elevatorsStatus.size >= expectedElevatorCount) {
printPickup(pickupOrder)
availableElevator match {
case Some(freeElevator) =>
elevatorById(freeElevator) ! pickupOrder
proactivelyUpdateElevatorStatus(freeElevator, pickupOrder)
case None => enqueueAnOrder(pickupOrder)
}
} else {
enqueueAnOrder(pickupOrder)
}
}
case unknown @ _ => log.error(s"ControlSystemActor receiving unknown message $unknown")
}
def enqueueAnOrder(pickupOrder: Pickup): Unit = {
if (this.orderQueue.size < maxQueueSize) {
this.orderQueue = this.orderQueue.enqueue(pickupOrder)
} else {
log.error(s"Cannot enqueue order $pickupOrder because the queue is full")
}
}
def dequeueAnOrder(freeElevator: Int): Unit = {
val (pickup, tail) = this.orderQueue.dequeue
elevatorById(freeElevator) ! pickup
this.orderQueue = tail
proactivelyUpdateElevatorStatus(freeElevator, pickup)
printDequeueOperation(freeElevator, pickup.direction)
}
def proactivelyUpdateElevatorStatus(freeElevator: Int, pickup: Pickup): Unit = {
//Pro-actively update elevator status in map before getting the update from the elevator itself
val (status, _) = elevatorsStatus(freeElevator)
if (status.isMoving)
elevatorsStatus += ((freeElevator, (status, Some(pickup))))
else
elevatorsStatus += ((freeElevator, (pickup.direction, None)))
}
}
|
doanduyhai/elevator-control-system
|
src/main/scala/com/doanduyhai/elevator/actors/ControlSystemActor.scala
|
Scala
|
apache-2.0
| 3,228
|
/*
* Copyright (c) 2014-2020 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.reactive.internal.consumers
import monix.execution.Callback
import monix.execution.Ack.Stop
import monix.execution.{Ack, Scheduler}
import monix.execution.cancelables.AssignableCancelable
import monix.reactive.Consumer
import monix.reactive.observers.Subscriber
/** Implementation for [[monix.reactive.Consumer.headOption]] */
private[reactive] final class HeadOptionConsumer[A] extends Consumer.Sync[A, Option[A]] {
override def createSubscriber(
cb: Callback[Throwable, Option[A]],
s: Scheduler): (Subscriber.Sync[A], AssignableCancelable) = {
val out = new Subscriber.Sync[A] {
implicit val scheduler = s
private[this] var isDone = false
def onNext(elem: A): Ack = {
isDone = true
cb.onSuccess(Some(elem))
Stop
}
def onComplete(): Unit =
if (!isDone) {
isDone = true
cb.onSuccess(None)
}
def onError(ex: Throwable): Unit =
if (!isDone) {
isDone = true
cb.onError(ex)
}
}
(out, AssignableCancelable.dummy)
}
}
|
alexandru/monifu
|
monix-reactive/shared/src/main/scala/monix/reactive/internal/consumers/HeadOptionConsumer.scala
|
Scala
|
apache-2.0
| 1,763
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.streaming
import java.io._
import java.util.concurrent.Executors
import java.util.concurrent.RejectedExecutionException
import org.apache.hadoop.fs.Path
import org.apache.hadoop.conf.Configuration
import org.apache.spark.Logging
import org.apache.spark.io.CompressionCodec
import org.apache.spark.util.MetadataCleaner
private[streaming]
class Checkpoint(@transient ssc: StreamingContext, val checkpointTime: Time)
extends Logging with Serializable {
val master = ssc.sc.master
val framework = ssc.sc.appName
val sparkHome = ssc.sc.sparkHome
val jars = ssc.sc.jars
val environment = ssc.sc.environment
val graph = ssc.graph
val checkpointDir = ssc.checkpointDir
val checkpointDuration = ssc.checkpointDuration
val pendingTimes = ssc.scheduler.jobManager.getPendingTimes()
val delaySeconds = MetadataCleaner.getDelaySeconds
def validate() {
assert(master != null, "Checkpoint.master is null")
assert(framework != null, "Checkpoint.framework is null")
assert(graph != null, "Checkpoint.graph is null")
assert(checkpointTime != null, "Checkpoint.checkpointTime is null")
logInfo("Checkpoint for time " + checkpointTime + " validated")
}
}
/**
* Convenience class to speed up the writing of graph checkpoint to file
*/
private[streaming]
class CheckpointWriter(checkpointDir: String) extends Logging {
val file = new Path(checkpointDir, "graph")
// The file to which we actually write - and then "move" to file.
private val writeFile = new Path(file.getParent, file.getName + ".next")
private val bakFile = new Path(file.getParent, file.getName + ".bk")
private var stopped = false
val conf = new Configuration()
var fs = file.getFileSystem(conf)
val maxAttempts = 3
val executor = Executors.newFixedThreadPool(1)
private val compressionCodec = CompressionCodec.createCodec()
// Removed code which validates whether there is only one CheckpointWriter per path 'file' since
// I did not notice any errors - reintroduce it ?
class CheckpointWriteHandler(checkpointTime: Time, bytes: Array[Byte]) extends Runnable {
def run() {
var attempts = 0
val startTime = System.currentTimeMillis()
while (attempts < maxAttempts) {
attempts += 1
try {
logDebug("Saving checkpoint for time " + checkpointTime + " to file '" + file + "'")
// This is inherently thread unsafe .. so alleviating it by writing to '.new' and then doing moves : which should be pretty fast.
val fos = fs.create(writeFile)
fos.write(bytes)
fos.close()
if (fs.exists(file) && fs.rename(file, bakFile)) {
logDebug("Moved existing checkpoint file to " + bakFile)
}
// paranoia
fs.delete(file, false)
fs.rename(writeFile, file)
val finishTime = System.currentTimeMillis()
logInfo("Checkpoint for time " + checkpointTime + " saved to file '" + file +
"', took " + bytes.length + " bytes and " + (finishTime - startTime) + " milliseconds")
return
} catch {
case ioe: IOException =>
logWarning("Error writing checkpoint to file in " + attempts + " attempts", ioe)
}
}
logError("Could not write checkpoint for time " + checkpointTime + " to file '" + file + "'")
}
}
def write(checkpoint: Checkpoint) {
val bos = new ByteArrayOutputStream()
val zos = compressionCodec.compressedOutputStream(bos)
val oos = new ObjectOutputStream(zos)
oos.writeObject(checkpoint)
oos.close()
bos.close()
try {
executor.execute(new CheckpointWriteHandler(checkpoint.checkpointTime, bos.toByteArray))
} catch {
case rej: RejectedExecutionException =>
logError("Could not submit checkpoint task to the thread pool executor", rej)
}
}
def stop() {
synchronized {
if (stopped) {
return
}
stopped = true
}
executor.shutdown()
val startTime = System.currentTimeMillis()
val terminated = executor.awaitTermination(10, java.util.concurrent.TimeUnit.SECONDS)
val endTime = System.currentTimeMillis()
logInfo("CheckpointWriter executor terminated ? " + terminated + ", waited for " + (endTime - startTime) + " ms.")
}
}
private[streaming]
object CheckpointReader extends Logging {
def read(path: String): Checkpoint = {
val fs = new Path(path).getFileSystem(new Configuration())
val attempts = Seq(new Path(path, "graph"), new Path(path, "graph.bk"), new Path(path), new Path(path + ".bk"))
val compressionCodec = CompressionCodec.createCodec()
attempts.foreach(file => {
if (fs.exists(file)) {
logInfo("Attempting to load checkpoint from file '" + file + "'")
try {
val fis = fs.open(file)
// ObjectInputStream uses the last defined user-defined class loader in the stack
// to find classes, which maybe the wrong class loader. Hence, a inherited version
// of ObjectInputStream is used to explicitly use the current thread's default class
// loader to find and load classes. This is a well know Java issue and has popped up
// in other places (e.g., http://jira.codehaus.org/browse/GROOVY-1627)
val zis = compressionCodec.compressedInputStream(fis)
val ois = new ObjectInputStreamWithLoader(zis, Thread.currentThread().getContextClassLoader)
val cp = ois.readObject.asInstanceOf[Checkpoint]
ois.close()
fs.close()
cp.validate()
logInfo("Checkpoint successfully loaded from file '" + file + "'")
logInfo("Checkpoint was generated at time " + cp.checkpointTime)
return cp
} catch {
case e: Exception =>
logError("Error loading checkpoint from file '" + file + "'", e)
}
} else {
logWarning("Could not read checkpoint from file '" + file + "' as it does not exist")
}
})
throw new Exception("Could not read checkpoint from path '" + path + "'")
}
}
private[streaming]
class ObjectInputStreamWithLoader(inputStream_ : InputStream, loader: ClassLoader)
extends ObjectInputStream(inputStream_) {
override def resolveClass(desc: ObjectStreamClass): Class[_] = {
try {
return loader.loadClass(desc.getName())
} catch {
case e: Exception =>
}
return super.resolveClass(desc)
}
}
|
mkolod/incubator-spark
|
streaming/src/main/scala/org/apache/spark/streaming/Checkpoint.scala
|
Scala
|
apache-2.0
| 7,275
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License
*/
package org.apache.toree.kernel.interpreter.sparkr
import java.util.concurrent.{Semaphore, TimeUnit}
import org.apache.toree.interpreter.broker.BrokerService
import org.apache.toree.kernel.interpreter.sparkr.SparkRTypes.{Code, CodeResults}
import org.slf4j.LoggerFactory
import scala.concurrent.Future
import scala.tools.nsc.interpreter._
/**
* Represents the service that provides the high-level interface between the
* JVM and R.
*
* @param rBackend The backend to start to communicate between the JVM and R
* @param sparkRBridge The bridge to use for communication between the JVM and R
* @param sparkRProcessHandler The handler used for events that occur with the
* SparkR process
*/
class SparkRService(
private val rBackend: ReflectiveRBackend,
private val sparkRBridge: SparkRBridge,
private val sparkRProcessHandler: SparkRProcessHandler
) extends BrokerService {
private val logger = LoggerFactory.getLogger(this.getClass)
@volatile private var rBackendPort: Int = -1
@volatile private var _isRunning: Boolean = false
override def isRunning: Boolean = _isRunning
/** Represents the process used to execute R code via the bridge. */
private lazy val sparkRProcess: SparkRProcess = {
val p = new SparkRProcess(
sparkRBridge,
sparkRProcessHandler,
rBackendPort
)
// Update handlers to correctly reset and restart the process
sparkRProcessHandler.setResetMethod(message => {
p.stop()
sparkRBridge.state.reset(message)
})
sparkRProcessHandler.setRestartMethod(() => p.start())
p
}
/** Starts the SparkR service. */
override def start(): Unit = {
logger.debug("Initializing statically-accessible SparkR bridge")
SparkRBridge.sparkRBridge = sparkRBridge
val initialized = new Semaphore(0)
val classLoader = SparkRBridge.getClass.getClassLoader
import scala.concurrent.ExecutionContext.Implicits.global
val rBackendRun = Future {
logger.debug("Initializing RBackend")
rBackendPort = rBackend.init(classLoader)
logger.debug(s"RBackend running on port $rBackendPort")
initialized.release()
logger.debug("Running RBackend")
rBackend.run()
logger.debug("RBackend has finished")
}
// Wait for backend to start before starting R process to connect
val backendTimeout =
sys.env.getOrElse("SPARKR_BACKEND_TIMEOUT", "120").toInt
if (initialized.tryAcquire(backendTimeout, TimeUnit.SECONDS)) {
// Start the R process used to execute code
logger.debug("Launching process to execute R code")
sparkRProcess.start()
_isRunning = true
} else {
// Unable to initialize, so throw an exception
throw new SparkRException(
s"Unable to initialize R backend in $backendTimeout seconds!")
}
}
/**
* Submits code to the SparkR service to be executed and return a result.
*
* @param code The code to execute
*
* @return The result as a future to eventually return
*/
override def submitCode(code: Code, kernelOutputStream: Option[OutputStream]): Future[CodeResults] = {
sparkRBridge.state.pushCode(code, kernelOutputStream)
}
/** Stops the running SparkR service. */
override def stop(): Unit = {
// Stop the R process used to execute code
sparkRProcess.stop()
// Stop the server used as an entrypoint for R
rBackend.close()
// Clear the bridge
SparkRBridge.reset()
_isRunning = false
}
}
|
poplav/incubator-toree
|
sparkr-interpreter/src/main/scala/org/apache/toree/kernel/interpreter/sparkr/SparkRService.scala
|
Scala
|
apache-2.0
| 4,315
|
package org.vitrivr.adampro.query.query
import org.vitrivr.adampro.shared.catalog.CatalogManager
import org.vitrivr.adampro.data.datatypes.vector.Vector._
import org.vitrivr.adampro.data.entity.Entity
import org.vitrivr.adampro.data.entity.Entity.AttributeName
import org.vitrivr.adampro.distribution.partitioning.Partitioning.PartitionID
import org.vitrivr.adampro.data.index.Index
import org.vitrivr.adampro.process.SharedComponentContext
import org.vitrivr.adampro.query.distance.DistanceFunction
/**
* adamtwo
*
* Ivan Giangreco
* November 2015
*/
//TODO: use query class
class Query(qo: Seq[QueryObject], queryID: Option[String] = Some(java.util.UUID.randomUUID().toString)) extends Serializable
abstract class QueryObject(queryID: Option[String] = Some(java.util.UUID.randomUUID().toString)) extends Serializable {}
/**
* Boolean query parameters.
*
* @param where a where 'clause' in form of (String, String), if the first string ends with '!=' or 'IN' the
* operator is used in the query, otherwise a '=' is added in between; AND-ing is assumed
*/
case class FilteringQuery(
where: Seq[Predicate],
queryID: Option[String] = Some(java.util.UUID.randomUUID().toString))
extends QueryObject(queryID) {
override def equals(that: Any): Boolean = {
that match {
case that: FilteringQuery =>
this.where.equals(that.where)
case _ =>
false
}
}
override def hashCode(): Int = {
val prime = 31
var result = 1
result = prime * result + where.hashCode()
result
}
}
case class Predicate(attribute : String, operator : Option[String], values : Seq[Any]){
/**
* List of SQL operators to keep in query (otherwise a '=' is added)
*/
lazy val sqlString = {
val adjustedValues = values.map(value => {
if(value.isInstanceOf[String]){
"'" + value + "'"
} else {
value
}
})
if(adjustedValues.length > 1 && operator.getOrElse("=").equals("=")){
"(" + attribute + " IN " + adjustedValues.mkString("(", ",", ")") + ")"
} else if (adjustedValues.length > 1 && operator.getOrElse("=").equals("!=")){
"(" + attribute + " NOT IN " + adjustedValues.mkString("(", ",", ")") + ")"
} else if(adjustedValues.length == 1){
"(" + attribute + " " + operator.getOrElse(" = ") + " " + adjustedValues.head + ")"
} else {
""
}
}
}
/**
* Nearest neighbour query parameters.
*
* @param attribute name of attribute to perform query on
* @param q query vector
* @param distance distance function
* @param k number of elements to retrieve
* @param indexOnly if set to true, then only the index is scanned and the results are result candidates only
* and may contain false positives
* @param partitions partitions to query (if not set all partitions are queried)
* @param options options to pass to handler
*/
case class RankingQuery(
attribute: AttributeName,
q: MathVector,
weights: Option[MathVector],
distance: DistanceFunction,
k: Int,
indexOnly: Boolean = false,
options: Map[String, String] = Map[String, String](),
partitions: Option[Set[PartitionID]] = None,
queryID: Option[String] = Some(java.util.UUID.randomUUID().toString))
extends QueryObject(queryID) {
def isConform(entity: Entity)(implicit ac: SharedComponentContext): Boolean = {
if (options.getOrElse("nochecks", "false").equals("true")) {
true
} else {
//check if attribute exists
val attributeExists = entity.schema(Some(Seq(attribute))).nonEmpty
//check if feature data exists and dimensionality is correct
val featureData = if (entity.getFeatureData.isDefined) {
var ndims = ac.catalogManager.getAttributeOption(entity.entityname, attribute, Some("ndims")).get.get("ndims")
if(ndims.isEmpty){
ndims = Some(entity.getFeatureData.get.select(attribute).head().getAs[DenseSparkVector](attribute).length.toString)
ac.catalogManager.updateAttributeOption(entity.entityname, attribute, "ndims", ndims.get)
}
ndims.get.toInt == q.length
} else {
false
}
attributeExists && featureData
}
}
def isConform(index: Index): Boolean = {
if (options.getOrElse("nochecks", "false").equals("true")) {
true
} else {
index.isQueryConform(this)
}
}
override def equals(that: Any): Boolean = {
that match {
case that: RankingQuery =>
this.attribute.equals(that.attribute) &&
this.q.equals(that.q) &&
this.weights.isDefined == that.weights.isDefined &&
this.weights.map(w1 => that.weights.exists(w2 => w1.equals(w2))).getOrElse(true)
this.distance.getClass.equals(that.distance.getClass) &&
this.k == that.k &&
this.indexOnly == that.indexOnly &&
this.partitions.isDefined == that.partitions.isDefined &&
this.partitions.map(p1 => that.partitions.exists(p2 => p1.equals(p2))).getOrElse(true)
case _ =>
false
}
}
override def hashCode(): Int = {
val prime = 31
var result = 1
result = prime * result + attribute.hashCode
result = prime * result + q.hashCode()
result = prime * result + weights.map(_.hashCode()).getOrElse(0)
result = prime * result + distance.getClass.hashCode()
result = prime * result + k
result = prime * result + indexOnly.hashCode()
result = prime * result + partitions.map(_.hashCode()).getOrElse(0)
result
}
}
|
dbisUnibas/ADAMpro
|
src/main/scala/org/vitrivr/adampro/query/query/QueryObject.scala
|
Scala
|
mit
| 5,898
|
package com.dwolla.cloudflare
import cats._
import cats.syntax.all._
import com.dwolla.cloudflare.domain.model.accesscontrolrules._
import com.dwolla.cloudflare.domain.model.{AccountId, ZoneId, tagAccountId, tagZoneId}
import io.circe.syntax._
import io.circe._
import io.circe.optics.JsonPath._
import fs2._
import com.dwolla.cloudflare.domain.model.Exceptions.UnexpectedCloudflareErrorException
import org.http4s.Method._
import org.http4s._
import org.http4s.circe._
import org.http4s.client.dsl.Http4sClientDsl
trait AccessControlRuleClient[F[_]] {
def list(level: Level, mode: Option[String] = None): Stream[F, AccessControlRule]
def getById(level: Level, ruleId: String): Stream[F, AccessControlRule]
def create(level: Level, rule: AccessControlRule): Stream[F, AccessControlRule]
def update(level: Level, rule: AccessControlRule): Stream[F, AccessControlRule]
def delete(level: Level, ruleId: String): Stream[F, AccessControlRuleId]
def getByUri(uri: String): Stream[F, AccessControlRule] = parseUri(uri).fold(Stream.empty.covaryAll[F, AccessControlRule]) {
case (level, ruleId) => getById(level, ruleId)
}
def parseUri(uri: String): Option[(Level, AccessControlRuleId)] = uri match {
case AccessControlRuleClient.accountLevelUriRegex(accountId, ruleId) => Option((Level.Account(tagAccountId(accountId)), tagAccessControlRuleId(ruleId)))
case AccessControlRuleClient.zoneLevelUriRegex(zoneId, ruleId) => Option((Level.Zone(tagZoneId(zoneId)), tagAccessControlRuleId(ruleId)))
case _ => None
}
def buildUri(level: Level, ruleId: AccessControlRuleId): Uri =
buildBaseUrl(level) / ruleId
def buildBaseUrl(level: Level): Uri = {
val baseUrlWithLevel = level match {
case Level.Account(id) => BaseUrl / "accounts" / id
case Level.Zone(id) => BaseUrl / "zones" / id
}
baseUrlWithLevel / "firewall" / "access_rules" / "rules"
}
}
object AccessControlRuleClient {
def apply[F[_] : ApplicativeThrow](executor: StreamingCloudflareApiExecutor[F]): AccessControlRuleClient[F] = new AccessControlRuleClientImpl[F](executor)
val accountLevelUriRegex = """https://api.cloudflare.com/client/v4/accounts/(.+?)/firewall/access_rules/rules/(.+)""".r
val zoneLevelUriRegex = """https://api.cloudflare.com/client/v4/zones/(.+?)/firewall/access_rules/rules/(.+)""".r
}
class AccessControlRuleClientImpl[F[_] : ApplicativeThrow](executor: StreamingCloudflareApiExecutor[F]) extends AccessControlRuleClient[F] with Http4sClientDsl[F] {
private def fetch(req: Request[F]): Stream[F, AccessControlRule] =
executor.fetch[AccessControlRule](req)
override def list(level: Level, mode: Option[String] = None): Stream[F, AccessControlRule] = {
fetch(GET(mode.toSeq.foldLeft(buildBaseUrl(level))((uri: Uri, param: String) => uri.withQueryParam("mode", param))))
}
override def getById(level: Level, ruleId: String): Stream[F, AccessControlRule] =
fetch(GET(buildBaseUrl(level) / ruleId))
override def create(level: Level, rule: AccessControlRule): Stream[F, AccessControlRule] =
fetch(POST(rule.asJson, buildBaseUrl(level)))
override def update(level: Level, rule: AccessControlRule): Stream[F, AccessControlRule] =
// TODO it would really be better to do this check at compile time by baking the identification question into the types
if (rule.id.isDefined)
fetch(PATCH(rule.copy(id = None).asJson, buildBaseUrl(level) / rule.id.get))
else
Stream.raiseError[F](CannotUpdateUnidentifiedAccessControlRule(rule))
override def delete(level: Level, ruleId: String): Stream[F, AccessControlRuleId] =
for {
json <- executor.fetch[Json](DELETE(buildBaseUrl(level) / ruleId)).last.recover {
case ex: UnexpectedCloudflareErrorException if ex.errors.flatMap(_.code.toSeq).exists(notFoundCodes.contains) =>
None
}
} yield tagAccessControlRuleId(json.flatMap(deletedRecordLens).getOrElse(ruleId))
private val deletedRecordLens: Json => Option[String] = root.id.string.getOption
private val notFoundCodes = List(10001)
}
case class CannotUpdateUnidentifiedAccessControlRule(rule: AccessControlRule) extends RuntimeException(s"Cannot update unidentified access control rule $rule")
sealed trait Level
object Level {
case class Account(accountId: AccountId) extends Level
case class Zone(zoneId: ZoneId) extends Level
}
|
Dwolla/scala-cloudflare
|
client/src/main/scala/com/dwolla/cloudflare/AccessControlRuleClient.scala
|
Scala
|
mit
| 4,369
|
package org.freeour.app
import org.freeour.app.controllers.MainController
import org.scalatra.test.specs2._
// For more on Specs2, see http://etorreborre.github.com/specs2/guide/org.specs2.guide.QuickStart.html
class MainServletSpec extends ScalatraSpec {
def is =
"GET / on ProtectedServlet" ^
"should return status 200" ! root302 ^
end
addServlet(classOf[MainController], "/*")
def root302 = get("/") {
status must_== 302
}
}
|
ideaalloc/freeour
|
src/test/scala/org/freeour/app/MainServletSpec.scala
|
Scala
|
gpl-2.0
| 460
|
/*
* Copyright: Copyright (C) 2016, ATS Advanced Telematic Systems GmbH
* License: MPL-2.0
*/
package org.genivi.sota.monitoring
import java.util.concurrent.TimeUnit
import com.codahale.metrics.jvm.{GarbageCollectorMetricSet, MemoryUsageGaugeSet}
import scala.collection.JavaConverters._
import com.codahale.metrics._
import org.genivi.sota.db.DatabaseConfig
import slick.jdbc.hikaricp.HikariCPJdbcDataSource
object MetricsSupport {
lazy val metricRegistry = new MetricRegistry()
val JvmFilter = new MetricFilter {
override def matches(name: String, metric: Metric): Boolean = name.startsWith("jvm")
}
val DbFilter = new MetricFilter {
override def matches(name: String, metric: Metric): Boolean = name.startsWith("database")
}
}
trait MetricsSupport {
lazy val metricRegistry = MetricsSupport.metricRegistry
private lazy val reporter = Slf4jReporter.forRegistry(metricRegistry)
.convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS)
.filter(MetricsSupport.DbFilter)
.build()
private def registerAll(registry: MetricRegistry, prefix: String, metricSet: MetricSet): Unit = {
metricSet.getMetrics.asScala.foreach {
case (metricPrefix, set: MetricSet) =>
registerAll(registry, prefix + "." + metricPrefix, set)
case (metricPrefix, metric) =>
registry.register(prefix + "." + metricPrefix, metric)
}
}
registerAll(metricRegistry, "jvm.gc", new GarbageCollectorMetricSet())
registerAll(metricRegistry, "jvm.memory", new MemoryUsageGaugeSet())
reporter.start(1, TimeUnit.MINUTES)
}
trait DatabaseMetrics {
self: MetricsSupport with DatabaseConfig =>
db.source.asInstanceOf[HikariCPJdbcDataSource].ds.setMetricRegistry(metricRegistry)
}
|
PDXostc/rvi_sota_server
|
common/src/main/scala/org/genivi/sota/monitoring/MetricsSupport.scala
|
Scala
|
mpl-2.0
| 1,758
|
package scalapoi
package dragons
import org.apache.poi.hssf.usermodel._
import java.io.{File, FileOutputStream}
import scala.collection.JavaConverters._
import utils._
object Document {
// totes unsafe - should at least return an either
def create(name: String): Workbook = {
val file = new FileOutputStream(new File(name))
val wb = new HSSFWorkbook()
wb.write(file)
file.close()
Workbook(wb, name)
}
}
|
hamishdickson/scalapoi
|
src/main/scala/scalapoi/dragons/Document.scala
|
Scala
|
mit
| 432
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.jdbc
import java.sql.Connection
import java.util.Properties
import org.apache.spark.sql.Column
import org.apache.spark.sql.Row
import org.apache.spark.sql.catalyst.expressions.Literal
import org.apache.spark.sql.types.{ArrayType, DecimalType, FloatType, ShortType}
import org.apache.spark.tags.DockerTest
@DockerTest
class PostgresIntegrationSuite extends DockerJDBCIntegrationSuite {
override val db = new DatabaseOnDocker {
override val imageName = "postgres:11.4"
override val env = Map(
"POSTGRES_PASSWORD" -> "rootpass"
)
override val usesIpc = false
override val jdbcPort = 5432
override def getJdbcUrl(ip: String, port: Int): String =
s"jdbc:postgresql://$ip:$port/postgres?user=postgres&password=rootpass"
override def getStartupProcessName: Option[String] = None
}
override def dataPreparation(conn: Connection): Unit = {
conn.prepareStatement("CREATE DATABASE foo").executeUpdate()
conn.setCatalog("foo")
conn.prepareStatement("CREATE TYPE enum_type AS ENUM ('d1', 'd2')").executeUpdate()
conn.prepareStatement("CREATE TABLE bar (c0 text, c1 integer, c2 double precision, c3 bigint, "
+ "c4 bit(1), c5 bit(10), c6 bytea, c7 boolean, c8 inet, c9 cidr, "
+ "c10 integer[], c11 text[], c12 real[], c13 numeric(2,2)[], c14 enum_type, "
+ "c15 float4, c16 smallint, c17 numeric[])").executeUpdate()
conn.prepareStatement("INSERT INTO bar VALUES ('hello', 42, 1.25, 123456789012345, B'0', "
+ "B'1000100101', E'\\\\\\\\xDEADBEEF', true, '172.16.0.42', '192.168.0.0/16', "
+ """'{1, 2}', '{"a", null, "b"}', '{0.11, 0.22}', '{0.11, 0.22}', 'd1', 1.01, 1, """
+ "'{111.2222, 333.4444}')"
).executeUpdate()
conn.prepareStatement("INSERT INTO bar VALUES (null, null, null, null, null, "
+ "null, null, null, null, null, "
+ "null, null, null, null, null, null, null, null)"
).executeUpdate()
conn.prepareStatement("CREATE TABLE ts_with_timezone " +
"(id integer, tstz TIMESTAMP WITH TIME ZONE, ttz TIME WITH TIME ZONE)")
.executeUpdate()
conn.prepareStatement("INSERT INTO ts_with_timezone VALUES " +
"(1, TIMESTAMP WITH TIME ZONE '2016-08-12 10:22:31.949271-07', " +
"TIME WITH TIME ZONE '17:22:31.949271+00')")
.executeUpdate()
conn.prepareStatement("CREATE TABLE st_with_array (c0 uuid, c1 inet, c2 cidr," +
"c3 json, c4 jsonb, c5 uuid[], c6 inet[], c7 cidr[], c8 json[], c9 jsonb[])")
.executeUpdate()
conn.prepareStatement("INSERT INTO st_with_array VALUES ( " +
"'0a532531-cdf1-45e3-963d-5de90b6a30f1', '172.168.22.1', '192.168.100.128/25', " +
"""'{"a": "foo", "b": "bar"}', '{"a": 1, "b": 2}', """ +
"ARRAY['7be8aaf8-650e-4dbb-8186-0a749840ecf2'," +
"'205f9bfc-018c-4452-a605-609c0cfad228']::uuid[], ARRAY['172.16.0.41', " +
"'172.16.0.42']::inet[], ARRAY['192.168.0.0/24', '10.1.0.0/16']::cidr[], " +
"""ARRAY['{"a": "foo", "b": "bar"}', '{"a": 1, "b": 2}']::json[], """ +
"""ARRAY['{"a": 1, "b": 2, "c": 3}']::jsonb[])"""
)
.executeUpdate()
}
test("Type mapping for various types") {
val df = sqlContext.read.jdbc(jdbcUrl, "bar", new Properties)
val rows = df.collect().sortBy(_.toString())
assert(rows.length == 2)
// Test the types, and values using the first row.
val types = rows(0).toSeq.map(x => x.getClass)
assert(types.length == 18)
assert(classOf[String].isAssignableFrom(types(0)))
assert(classOf[java.lang.Integer].isAssignableFrom(types(1)))
assert(classOf[java.lang.Double].isAssignableFrom(types(2)))
assert(classOf[java.lang.Long].isAssignableFrom(types(3)))
assert(classOf[java.lang.Boolean].isAssignableFrom(types(4)))
assert(classOf[Array[Byte]].isAssignableFrom(types(5)))
assert(classOf[Array[Byte]].isAssignableFrom(types(6)))
assert(classOf[java.lang.Boolean].isAssignableFrom(types(7)))
assert(classOf[String].isAssignableFrom(types(8)))
assert(classOf[String].isAssignableFrom(types(9)))
assert(classOf[Seq[Int]].isAssignableFrom(types(10)))
assert(classOf[Seq[String]].isAssignableFrom(types(11)))
assert(classOf[Seq[Double]].isAssignableFrom(types(12)))
assert(classOf[Seq[BigDecimal]].isAssignableFrom(types(13)))
assert(classOf[String].isAssignableFrom(types(14)))
assert(classOf[java.lang.Float].isAssignableFrom(types(15)))
assert(classOf[java.lang.Short].isAssignableFrom(types(16)))
assert(classOf[Seq[BigDecimal]].isAssignableFrom(types(17)))
assert(rows(0).getString(0).equals("hello"))
assert(rows(0).getInt(1) == 42)
assert(rows(0).getDouble(2) == 1.25)
assert(rows(0).getLong(3) == 123456789012345L)
assert(!rows(0).getBoolean(4))
// BIT(10)'s come back as ASCII strings of ten ASCII 0's and 1's...
assert(java.util.Arrays.equals(rows(0).getAs[Array[Byte]](5),
Array[Byte](49, 48, 48, 48, 49, 48, 48, 49, 48, 49)))
assert(java.util.Arrays.equals(rows(0).getAs[Array[Byte]](6),
Array[Byte](0xDE.toByte, 0xAD.toByte, 0xBE.toByte, 0xEF.toByte)))
assert(rows(0).getBoolean(7))
assert(rows(0).getString(8) == "172.16.0.42")
assert(rows(0).getString(9) == "192.168.0.0/16")
assert(rows(0).getSeq(10) == Seq(1, 2))
assert(rows(0).getSeq(11) == Seq("a", null, "b"))
assert(rows(0).getSeq(12).toSeq == Seq(0.11f, 0.22f))
assert(rows(0).getSeq(13) == Seq("0.11", "0.22").map(BigDecimal(_).bigDecimal))
assert(rows(0).getString(14) == "d1")
assert(rows(0).getFloat(15) == 1.01f)
assert(rows(0).getShort(16) == 1)
assert(rows(0).getSeq(17) ==
Seq("111.222200000000000000", "333.444400000000000000").map(BigDecimal(_).bigDecimal))
// Test reading null values using the second row.
assert(0.until(16).forall(rows(1).isNullAt(_)))
}
test("Basic write test") {
val df = sqlContext.read.jdbc(jdbcUrl, "bar", new Properties)
// Test only that it doesn't crash.
df.write.jdbc(jdbcUrl, "public.barcopy", new Properties)
// Test that written numeric type has same DataType as input
assert(sqlContext.read.jdbc(jdbcUrl, "public.barcopy", new Properties).schema(13).dataType ==
ArrayType(DecimalType(2, 2), true))
// Test write null values.
df.select(df.queryExecution.analyzed.output.map { a =>
Column(Literal.create(null, a.dataType)).as(a.name)
}: _*).write.jdbc(jdbcUrl, "public.barcopy2", new Properties)
}
test("Creating a table with shorts and floats") {
sqlContext.createDataFrame(Seq((1.0f, 1.toShort)))
.write.jdbc(jdbcUrl, "shortfloat", new Properties)
val schema = sqlContext.read.jdbc(jdbcUrl, "shortfloat", new Properties).schema
assert(schema(0).dataType == FloatType)
assert(schema(1).dataType == ShortType)
}
test("SPARK-20557: column type TIMESTAMP with TIME ZONE and TIME with TIME ZONE " +
"should be recognized") {
// When using JDBC to read the columns of TIMESTAMP with TIME ZONE and TIME with TIME ZONE
// the actual types are java.sql.Types.TIMESTAMP and java.sql.Types.TIME
val dfRead = sqlContext.read.jdbc(jdbcUrl, "ts_with_timezone", new Properties)
val rows = dfRead.collect()
val types = rows(0).toSeq.map(x => x.getClass.toString)
assert(types(1).equals("class java.sql.Timestamp"))
assert(types(2).equals("class java.sql.Timestamp"))
}
test("SPARK-22291: Conversion error when transforming array types of " +
"uuid, inet and cidr to StingType in PostgreSQL") {
val df = sqlContext.read.jdbc(jdbcUrl, "st_with_array", new Properties)
val rows = df.collect()
assert(rows(0).getString(0) == "0a532531-cdf1-45e3-963d-5de90b6a30f1")
assert(rows(0).getString(1) == "172.168.22.1")
assert(rows(0).getString(2) == "192.168.100.128/25")
assert(rows(0).getString(3) == "{\\"a\\": \\"foo\\", \\"b\\": \\"bar\\"}")
assert(rows(0).getString(4) == "{\\"a\\": 1, \\"b\\": 2}")
assert(rows(0).getSeq(5) == Seq("7be8aaf8-650e-4dbb-8186-0a749840ecf2",
"205f9bfc-018c-4452-a605-609c0cfad228"))
assert(rows(0).getSeq(6) == Seq("172.16.0.41", "172.16.0.42"))
assert(rows(0).getSeq(7) == Seq("192.168.0.0/24", "10.1.0.0/16"))
assert(rows(0).getSeq(8) == Seq("""{"a": "foo", "b": "bar"}""", """{"a": 1, "b": 2}"""))
assert(rows(0).getSeq(9) == Seq("""{"a": 1, "b": 2, "c": 3}"""))
}
test("query JDBC option") {
val expectedResult = Set(
(42, 123456789012345L)
).map { case (c1, c3) =>
Row(Integer.valueOf(c1), java.lang.Long.valueOf(c3))
}
val query = "SELECT c1, c3 FROM bar WHERE c1 IS NOT NULL"
// query option to pass on the query string.
val df = spark.read.format("jdbc")
.option("url", jdbcUrl)
.option("query", query)
.load()
assert(df.collect.toSet === expectedResult)
// query option in the create table path.
sql(
s"""
|CREATE OR REPLACE TEMPORARY VIEW queryOption
|USING org.apache.spark.sql.jdbc
|OPTIONS (url '$jdbcUrl', query '$query')
""".stripMargin.replaceAll("\\n", " "))
assert(sql("select c1, c3 from queryOption").collect.toSet == expectedResult)
}
test("write byte as smallint") {
sqlContext.createDataFrame(Seq((1.toByte, 2.toShort)))
.write.jdbc(jdbcUrl, "byte_to_smallint_test", new Properties)
val df = sqlContext.read.jdbc(jdbcUrl, "byte_to_smallint_test", new Properties)
val schema = df.schema
assert(schema.head.dataType == ShortType)
assert(schema(1).dataType == ShortType)
val rows = df.collect()
assert(rows.length === 1)
assert(rows(0).getShort(0) === 1)
assert(rows(0).getShort(1) === 2)
}
}
|
pgandhi999/spark
|
external/docker-integration-tests/src/test/scala/org/apache/spark/sql/jdbc/PostgresIntegrationSuite.scala
|
Scala
|
apache-2.0
| 10,464
|
package com.googlecode.kanbanik.model
import org.bson.types.ObjectId
import com.googlecode.kanbanik.db.HasMidAirCollisionDetection
import com.googlecode.kanbanik.db.HasMongoConnection
import com.mongodb.DBObject
import com.mongodb.casbah.commons.MongoDBObject
import com.googlecode.kanbanik.dtos.WorkflowitemType
import com.googlecode.kanbanik.commons._
case class Workflowitem(
val id: Option[ObjectId],
val name: String,
val wipLimit: Int,
val verticalSize: Int,
val itemType: String,
val version: Int,
val nestedWorkflow: Workflow,
val _parentWorkflow: Option[Workflow])
extends HasMongoConnection
with HasMidAirCollisionDetection with Equals {
def this(id: Option[ObjectId],
name: String,
wipLimit: Int,
verticalSize: Int,
itemType: String,
version: Int,
nestedWorkflow: Workflow) = this(id, name, wipLimit, verticalSize, itemType, version, nestedWorkflow, None)
def parentWorkflow: Workflow = _parentWorkflow.getOrElse(loadWorkflow)
private def loadWorkflow(): Workflow = {
val parentBoard = Board.all(false).find(board => board.workflow.containsItem(this)).getOrElse(throw new IllegalArgumentException("The workflowitem '" + id + "' does not exist on any board!"))
parentBoard.workflow.findItem(this).get.parentWorkflow
}
def asDbObject(): DBObject = {
MongoDBObject(
Workflowitem.Fields.id.toString() -> id.getOrElse(new ObjectId),
Workflowitem.Fields.name.toString() -> name,
Workflowitem.Fields.wipLimit.toString() -> wipLimit,
Workflowitem.Fields.verticalSize.toString() -> verticalSize,
Workflowitem.Fields.itemType.toString() -> itemType,
Workflowitem.Fields.version.toString() -> version,
Workflowitem.Fields.nestedWorkflow.toString() -> nestedWorkflow.asDbObject)
}
def canEqual(other: Any) = {
other.isInstanceOf[com.googlecode.kanbanik.model.Workflowitem]
}
override def equals(other: Any) = {
other match {
case that: Workflowitem => that.canEqual(Workflowitem.this) && id == that.id
case _ => false
}
}
override def hashCode() = {
val prime = 41
prime + id.hashCode
}
override def toString = id.toString
}
object Workflowitem extends HasMongoConnection {
object Fields extends DocumentField {
val wipLimit = Value("wipLimit")
val verticalSize = Value("verticalSize")
val itemType = Value("itemType")
val nestedWorkflow = Value("nestedWorkflow")
}
def apply() = new Workflowitem(Some(new ObjectId()), "", -1, -1, WorkflowitemType.HORIZONTAL.toString, 1, Workflow(), None)
def apply(id: String) = new Workflowitem(Some(new ObjectId(id)), "", -1, -1, WorkflowitemType.HORIZONTAL.toString, 1, Workflow(), None)
def apply(id: ObjectId) = new Workflowitem(Some(id), "", -1, -1, WorkflowitemType.HORIZONTAL.toString, 1, Workflow(), None)
def apply(id: String, parent: Workflow) =
new Workflowitem(
Some(new ObjectId(id)),
"",
-1,
-1,
WorkflowitemType.HORIZONTAL.toString,
1,
Workflow(),
Some(parent)
)
def asEntity(dbObject: DBObject, workflow: Option[Workflow]): Workflowitem = {
new Workflowitem(
Some(dbObject.get(Fields.id.toString()).asInstanceOf[ObjectId]),
dbObject.get(Fields.name.toString()).asInstanceOf[String],
dbObject.get(Fields.wipLimit.toString()).asInstanceOf[Int],
dbObject.getWithDefault[Int](Fields.verticalSize, -1),
dbObject.get(Fields.itemType.toString()).asInstanceOf[String],
dbObject.get(Fields.version.toString()).asInstanceOf[Int],
{
val nestedWorkflow = dbObject.get(Fields.nestedWorkflow.toString()).asInstanceOf[DBObject]
Workflow.asEntity(nestedWorkflow)
},
workflow)
}
def asEntity(dbObject: DBObject): Workflowitem = {
asEntity(dbObject, None)
}
}
|
aymenhs/kanbanik
|
kanbanik-server/src/main/scala/com/googlecode/kanbanik/model/Workflowitem.scala
|
Scala
|
apache-2.0
| 3,839
|
package com.ignition.stream
import scala.xml.{ Elem, Node }
import org.apache.spark.sql.Row
import org.apache.spark.sql.catalyst.expressions.GenericRowWithSchema
import org.apache.spark.streaming.kafka.KafkaUtils
import org.json4s.JValue
import org.json4s.JsonDSL._
import org.json4s.jvalue2monadic
import com.ignition.types.{ fieldToStructType, string }
import com.ignition.util.JsonUtils.RichJValue
import com.ignition.util.XmlUtils.RichNodeSeq
import kafka.serializer.StringDecoder
/**
* Creates a text stream from Apache Kafka.
*
* @author Vlad Orzhekhovskiy
*/
case class KafkaInput(brokers: Iterable[String], topics: Iterable[String],
kafkaProperties: Map[String, String] = Map.empty, field: String = "payload") extends StreamProducer {
import KafkaInput._
def brokers(hosts: String*): KafkaInput = copy(brokers = hosts)
def brokers(hosts: String): KafkaInput = copy(brokers = hosts.split(",\\\\s*"))
def topics(t: String*): KafkaInput = copy(topics = t)
def topics(t: String): KafkaInput = copy(topics = t.split(",\\\\s*"))
def properties(prop: (String, String)*): KafkaInput = copy(kafkaProperties = prop.toMap)
val schema = string(field)
private val kafkaParams = Map("metadata.broker.list" -> brokers.mkString(",")) ++ kafkaProperties
protected def compute(implicit runtime: SparkStreamingRuntime): DataStream = {
val raw = KafkaUtils.createDirectStream[String, String, StringDecoder, StringDecoder](ssc, kafkaParams, topics.toSet)
raw map {
case (_, value) => new GenericRowWithSchema(Array(value), schema).asInstanceOf[Row]
}
}
def toXml: Elem =
<node>
<field>{ field }</field>
<brokers>
{ brokers map (b => <broker>{ b }</broker>) }
</brokers>
<topics>
{ topics map (t => <topic>{ t }</topic>) }
</topics>
{
if (!kafkaProperties.isEmpty)
<kafkaProperties>
{
kafkaProperties map {
case (name, value)=> <property name={ name }>{ value }</property>
}
}
</kafkaProperties>
}
</node>.copy(label = tag)
def toJson: JValue = {
val props = if (kafkaProperties.isEmpty) None else Some(kafkaProperties.map {
case (name, value) => ("name" -> name) ~ ("value" -> value)
})
("tag" -> tag) ~ ("field" -> field) ~ ("brokers" -> brokers) ~ ("topics" -> topics) ~ ("kafkaProperties" -> props)
}
}
/**
* Kafka Input companion object.
*/
object KafkaInput {
val tag = "stream-kafka-input"
def apply(): KafkaInput = apply(Nil, Nil)
def fromXml(xml: Node) = {
val field = xml \\ "field" asString
val brokers = xml \\\\ "broker" map (_.asString)
val topics = xml \\\\ "topic" map (_.asString)
val properties = xml \\ "kafkaProperties" \\ "property" map { node =>
val name = node \\ "@name" asString
val value = node.child.head asString
name -> value
} toMap
apply(brokers, topics, properties, field)
}
def fromJson(json: JValue) = {
val field = json \\ "field" asString
val brokers = (json \\ "brokers" asArray) map (_.asString)
val topics = (json \\ "topics" asArray) map (_.asString)
val properties = (json \\ "kafkaProperties" asArray) map { item =>
val name = item \\ "name" asString
val value = item \\ "value" asString
name -> value
} toMap
apply(brokers, topics, properties, field)
}
}
|
uralian/ignition
|
src/main/scala/com/ignition/stream/KafkaInput.scala
|
Scala
|
apache-2.0
| 3,438
|
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.