code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1 value | license stringclasses 15 values | size int64 5 1M |
|---|---|---|---|---|---|
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.dllib.nn
import com.intel.analytics.bigdl.dllib.nn.Graph.ModuleNode
import com.intel.analytics.bigdl.dllib.nn.abstractnn.{AbstractModule, Activity, TensorModule}
import com.intel.analytics.bigdl.dllib.optim.Regularizer
import com.intel.analytics.bigdl.dllib.tensor.Tensor
import com.intel.analytics.bigdl.dllib.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.dllib.utils.{T, Table}
import scala.reflect.ClassTag
/**
* Long Short Term Memory architecture with peephole.
* Ref. A.: http://arxiv.org/pdf/1303.5778v1 (blueprint for this module)
* B. http://web.eecs.utk.edu/~itamar/courses/ECE-692/Bobby_paper1.pdf
* C. http://arxiv.org/pdf/1503.04069v1.pdf
* D. https://github.com/wojzaremba/lstm
*
* @param inputSize the size of each input vector
* @param hiddenSize Hidden unit size in the LSTM
* @param p is used for [[Dropout]] probability. For more details about
* RNN dropouts, please refer to
* [RnnDrop: A Novel Dropout for RNNs in ASR]
* (http://www.stat.berkeley.edu/~tsmoon/files/Conference/asru2015.pdf)
* [A Theoretically Grounded Application of Dropout in Recurrent Neural Networks]
* (https://arxiv.org/pdf/1512.05287.pdf)
* @param wRegularizer: instance of [[Regularizer]]
* (eg. L1 or L2 regularization), applied to the input weights matrices.
* @param uRegularizer: instance [[Regularizer]]
(eg. L1 or L2 regularization), applied to the recurrent weights matrices.
* @param bRegularizer: instance of [[Regularizer]]
applied to the bias.
*/
@SerialVersionUID(- 7566757838561436619L)
class LSTMPeephole[T : ClassTag] (
val inputSize: Int,
val hiddenSize: Int,
val p: Double = 0.0,
var wRegularizer: Regularizer[T] = null,
var uRegularizer: Regularizer[T] = null,
var bRegularizer: Regularizer[T] = null
)
(implicit ev: TensorNumeric[T])
extends Cell[T](
hiddensShape = Array(hiddenSize, hiddenSize),
regularizers = Array(wRegularizer, uRegularizer, bRegularizer)
) {
var inputGate: ModuleNode[T] = _
var forgetGate: ModuleNode[T] = _
var outputGate: ModuleNode[T] = _
var hiddenLayer: ModuleNode[T] = _
var cellLayer: ModuleNode[T] = _
val featDim = 2
override var cell: AbstractModule[Activity, Activity, T] =
Sequential()
.add(FlattenTable())
.add(buildLSTM())
.add(ConcatTable()
.add(SelectTable(1))
.add(NarrowTable(2, 2)))
override var preTopology: TensorModule[T] = if (p != 0) {
null
} else {
Linear(inputSize, 4 * hiddenSize,
wRegularizer = wRegularizer, bRegularizer = bRegularizer)
}
override def hiddenSizeOfPreTopo: Int = hiddenSize * 4
def buildGate(dimension: Int, offset: Int, length: Int)
(input1: ModuleNode[T], input2: ModuleNode[T], input3: ModuleNode[T])
: ModuleNode[T] = {
/**
* f(input1 + U * input2)
*/
var i2g: ModuleNode[T] = null
var h2g: ModuleNode[T] = null
if (p != 0) {
val input1Drop = Dropout(p).inputs(input1)
i2g = Linear(inputSize, hiddenSize, wRegularizer = wRegularizer,
bRegularizer = bRegularizer).inputs(input1Drop)
val input2Drop = Dropout(p).inputs(input2)
h2g = Linear(hiddenSize, hiddenSize, withBias = false,
wRegularizer = uRegularizer).inputs(input2Drop)
} else {
i2g = Narrow(dimension, offset, length).inputs(input1)
h2g = Linear(hiddenSize, hiddenSize,
withBias = false, wRegularizer = uRegularizer).inputs(input2)
}
val cMul = CMul(Array(hiddenSize)).inputs(input3)
val cadd = CAddTable().inputs(i2g, h2g, cMul)
val sigmoid = Sigmoid().inputs(cadd)
sigmoid
}
def buildInputGate()
(input1: ModuleNode[T], input2: ModuleNode[T], input3: ModuleNode[T])
: ModuleNode[T] = {
inputGate = buildGate(featDim, 1, hiddenSize)(input1, input2, input3)
inputGate
}
def buildForgetGate()
(input1: ModuleNode[T], input2: ModuleNode[T], input3: ModuleNode[T])
: ModuleNode[T] = {
forgetGate =
buildGate(featDim, 1 + hiddenSize, hiddenSize)(input1, input2, input3)
forgetGate
}
def buildOutputGate()
(input1: ModuleNode[T], input2: ModuleNode[T], input3: ModuleNode[T])
: ModuleNode[T] = {
outputGate =
buildGate(featDim, 1 + 3 * hiddenSize, hiddenSize)(input1, input2, input3)
outputGate
}
def buildHidden()
(input1: ModuleNode[T], input2: ModuleNode[T])
: ModuleNode[T] = {
/**
* f(input1 + W * input2)
*/
var i2h: ModuleNode[T] = null
var h2h: ModuleNode[T] = null
if (p != 0) {
val input1Drop = Dropout(p).inputs(input1)
i2h = Linear(inputSize, hiddenSize, wRegularizer = wRegularizer,
bRegularizer = bRegularizer).inputs(input1Drop)
val input2Drop = Dropout(p).inputs(input2)
h2h = Linear(hiddenSize, hiddenSize, withBias = false,
wRegularizer = uRegularizer).inputs(input2Drop)
} else {
i2h = Narrow(featDim, 1 + 2 * hiddenSize, hiddenSize).inputs(input1)
h2h = Linear(hiddenSize, hiddenSize, withBias = false,
wRegularizer = uRegularizer).inputs(input2)
}
val cadd = CAddTable().inputs(i2h, h2h)
val tanh = Tanh().inputs(cadd)
this.hiddenLayer = tanh
tanh
}
def buildCell()
(input1: ModuleNode[T], input2: ModuleNode[T], input3: ModuleNode[T])
: ModuleNode[T] = {
buildInputGate()(input1, input2, input3)
buildForgetGate()(input1, input2, input3)
buildHidden()(input1, input2)
val forgetLayer = CMulTable().inputs(forgetGate, input3)
val inputLayer = CMulTable().inputs(inputGate, hiddenLayer)
val cellLayer = CAddTable().inputs(forgetLayer, inputLayer)
this.cellLayer = cellLayer
cellLayer
}
def buildLSTM(): Graph[T] = {
val input1 = Input()
val input2 = Input()
val input3 = Input()
/**
* f: sigmoid
* g: tanh
* forgetLayer = input3 * f(input1 + U1 * input2)
* inputLayer = f(input1 + U2 * input2) * g(input1 + U3 * input2)
* cellLayer = forgetLayer + inputLayer
*/
buildCell()(input1, input2, input3)
buildOutputGate()(input1, input2, cellLayer)
val tanh = Tanh().inputs(cellLayer)
val cMul = CMulTable().inputs(outputGate, tanh)
val out1 = Identity().inputs(cMul)
val out2 = Identity().inputs(cMul)
val out3 = cellLayer
/**
* out1 = outputGate * g(cellLayer)
* out2 = out1
* out3 = cellLayer
*/
Graph(Array(input1, input2, input3), Array(out1, out2, out3))
}
override def canEqual(other: Any): Boolean = other.isInstanceOf[LSTMPeephole[T]]
override def equals(other: Any): Boolean = other match {
case that: LSTMPeephole[T] =>
super.equals(that) &&
(that canEqual this) &&
inputSize == that.inputSize &&
hiddenSize == that.hiddenSize &&
p == that.p
case _ => false
}
override def hashCode(): Int = {
val state = Seq(super.hashCode(), inputSize, hiddenSize, p)
state.map(_.hashCode()).foldLeft(0)((a, b) => 31 * a + b)
}
override def reset(): Unit = {
super.reset()
cell.reset()
}
override def toString: String = s"LSTMPeephole($inputSize, $hiddenSize, $p)"
}
object LSTMPeephole {
def apply[@specialized(Float, Double) T: ClassTag](
inputSize: Int = 4,
hiddenSize: Int = 3,
p: Double = 0.0,
wRegularizer: Regularizer[T] = null,
uRegularizer: Regularizer[T] = null,
bRegularizer: Regularizer[T] = null
)
(implicit ev: TensorNumeric[T]): LSTMPeephole[T] = {
new LSTMPeephole[T](inputSize, hiddenSize, p, wRegularizer, uRegularizer,
bRegularizer)
}
}
| intel-analytics/BigDL | scala/dllib/src/main/scala/com/intel/analytics/bigdl/dllib/nn/LSTMPeephole.scala | Scala | apache-2.0 | 8,390 |
package com.github.j5ik2o.forseti.domain.support
import com.github.j5ik2o.forseti.domain.exception.EntityNotFoundException
import scala.concurrent.{ExecutionContext, Future}
import scalaz._
import scalaz.std.scalaFuture._
import scalaz.syntax.either._
trait EntityReader[ID <: EntityId, E <: Entity[ID]] {
def resolveOptById(id: ID)(implicit ec: ExecutionContext): EitherT[Future, Exception, Maybe[E]]
def resolveById(id: ID)(implicit ec: ExecutionContext): EitherT[Future, Exception, E] = {
resolveOptById(id).flatMap { entityOpt =>
EitherT(
Future.successful(
entityOpt.map {
_.right[Exception]
}.getOrElse {
new EntityNotFoundException(s"Not found entity: id = ${id.value}").left[E]
}
)
)
}
}
}
| j5ik2o/forseti | domain/src/main/scala/com/github/j5ik2o/forseti/domain/support/EntityReader.scala | Scala | mit | 800 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.nn.mkldnn
import com.intel.analytics.bigdl.mkl._
import com.intel.analytics.bigdl.nn.{InitializationMethod, RandomUniform, VariableFormat, Zeros}
import com.intel.analytics.bigdl.nn.abstractnn.{Activity, Initializable}
import com.intel.analytics.bigdl.tensor.{DnnTensor, Tensor}
import scala.collection.mutable.ArrayBuffer
/**
* @param mode : the type of RNN cell (LSTM / GRU)
* @param inputSize : the size of input vector
* @param hiddenSize : the size of hidden state
* @param f : the type of output activation function
* (AlgKind.EltwiseTanh or AlgKind.EltwiseRelu)
* @param direction : the direction to run RNN
* (e.g. Direction.UnidirectionalLeft2Right or Direction.BidirectionalConcat)
* @param layers : the number of RNN layers
*/
class RNN(
val mode: Int,
val inputSize: Int,
val hiddenSize: Int,
val f: Int,
val direction: Int,
val layers: Int = 1,
val flags: Int = RNNCellFlags.RNNCellWithRelu,
val alpha: Float = 0F,
val clipping: Float = 0F,
private val initWeight: Tensor[Float] = null,
private val initWeightIter: Tensor[Float] = null,
private val initBias: Tensor[Float] = null
) extends MklDnnLayer with Initializable {
private var updateOutputMemoryPrimitives: Array[Long] = _
private var updateOutputTensors: Array[Tensor[Float]] = _
private var updateGradInputMemoryPrimitives: Array[Long] = _
private var updateGradInputTensors: Array[Tensor[Float]] = _
private var fwdPD: Long = _
private var rnnCellDesc : Long = 0L
private[mkldnn] var weight: TensorMMap = _
private[mkldnn] var weight_i: TensorMMap = _
private[mkldnn] var bias: TensorMMap = _
private[mkldnn] var gradWeight: TensorMMap = _
private[mkldnn] var gradWeight_i: TensorMMap = _
private[mkldnn] var gradBias: TensorMMap = _
private var workSpaceFormat: MemoryData = _
private var workSpace : Tensor[Float] = _
@transient private lazy val reorderManager = new ReorderManager
private var weightForBackward: DnnTensor[Float] = _
private var weightForBackwardMemoryData: MemoryData = _
private var weightIterForBackward: DnnTensor[Float] = _
private var weightIterForBackwardMemoryData: MemoryData = _
private var batchSize: Int = _
private var stepSize: Int = _
private var inputShape: Array[Int] = _
private var outputShape: Array[Int] = _
private var weightShape: Array[Int] = _
private var weightIterShape: Array[Int] = _
private var biasShape: Array[Int] = _
private var commonIterShape: Array[Int] = _
private var src_i: DnnTensor[Float] = _
private var dst_i: DnnTensor[Float] = _
private var gradsrc_i: DnnTensor[Float] = _
private var graddst_i: DnnTensor[Float] = _
if(layers > 1) {
require(inputSize == hiddenSize,
"If layer number of RNN is more than 1, the input size and the hidden size should equal.\\n"
+ "inputSize: " + inputSize + '\\n'
+ "hiddenSize: " + hiddenSize)
}
var (ngates, nstates) = mode match {
case AlgKind.VanillaLstm => (4, 2)
case AlgKind.VanillaGru => (3, 1)
case _ =>
throw new UnsupportedOperationException("Not support such RNN Cell. Cell type: " + mode)
}
/** TODO: Multi-layer Bidirectional Sum RNN is available in MKLDNN,
* TODO: but the current version of BigDL BLAS does not support it.
*/
val (numOfDirections, outputSizeFactor) = direction match {
case Direction.UnidirectionalLeft2Right
| Direction.UnidirectionalRight2Left => (1, 1)
case Direction.BidirectionalConcat =>
require(layers == 1, "Bidirectional Concat RNN does not support multiple layers. " +
"layers = " + layers)
(2, 2)
case Direction.BidirectionalSum => (2, 1)
case _ => throw new UnsupportedOperationException("Not support such direction")
}
/**
* Gate order matching between MKLDNN LSTM and nn/LSTM:
* MKLDNN Gate 1 -> nn/LSTM Gate 1 (input gate)
* MKLDNN Gate 2 -> nn/LSTM Gate 3 (forget gate)
* MKLDNN Gate 3 -> nn/LSTM Gate 2 (hidden)
* MKLDNN Gate 4 -> nn/LSTM Gate 4 (output gate)
*
* Gate order matching between MKLDNN GRU and nn/GRU:
* MKLDNN Gate 1 -> nn/GRU Gate 2
* MKLDNN Gate 2 -> nn/GRU Gate 1
* MKLDNN Gate 3 -> nn/GRU Gate 3
*/
weightShape = Array(layers, numOfDirections, inputSize, ngates, hiddenSize)
weightIterShape = Array(layers, numOfDirections, hiddenSize, ngates, hiddenSize)
biasShape = Array(layers, numOfDirections, ngates, hiddenSize)
weight = new TensorMMap(weightShape)
weight_i = new TensorMMap(weightIterShape)
bias = new TensorMMap(biasShape)
gradWeight = new TensorMMap(weightShape)
gradWeight_i = new TensorMMap(weightIterShape)
gradBias = new TensorMMap(biasShape)
{
val stdv = 1.0 / math.sqrt(hiddenSize)
val wInit: InitializationMethod = RandomUniform(-stdv, stdv)
val bInit: InitializationMethod = Zeros
setInitMethod(wInit, bInit)
}
override def reset(): Unit = {
if (initWeight == null) {
weightInitMethod.init(weight.dense, VariableFormat.Default)
} else {
weight.dense.copy(initWeight)
}
if (initWeightIter == null) {
weightInitMethod.init(weight_i.dense, VariableFormat.Default)
} else {
weight_i.dense.copy(initWeightIter)
}
if (initBias == null) {
biasInitMethod.init(bias.dense, VariableFormat.Default)
} else {
bias.dense.copy(initBias)
}
}
override private[mkldnn] def initFwdPrimitives(inputs: Array[MemoryData], phase: Phase) = {
val kind = if (!isTraining()) {
PropKind.ForwardInference
} else {
PropKind.ForwardTraining
}
/**
* TODO: The default format of input is TNC
* Batch size of input is needed by creating memory descriptors of src iter and dst iter.
* Step size of input is needed by creating memory descriptor of dst layer.
* By default, batch size of input is the second element of inputShape
* and step size is the first element of inputShape.
*/
inputs(0).layout match {
case Memory.Format.tnc =>
batchSize = inputs(0).shape(1)
stepSize = inputs(0).shape(0)
case Memory.Format.ntc =>
batchSize = inputs(0).shape(0)
stepSize = inputs(0).shape(1)
case _ =>
throw new UnsupportedOperationException("Unsupported input format: " +
inputs(0).layout)
}
inputShape = Array(stepSize, batchSize, inputSize)
outputShape = Array(stepSize, batchSize, outputSizeFactor * hiddenSize)
commonIterShape = Array(layers, numOfDirections, nstates, batchSize, hiddenSize)
val src_layer = NativeData(inputShape, Memory.Format.any)
val src_iter = NativeData(commonIterShape, Memory.Format.any)
val wei_layer = NativeData(weightShape, Memory.Format.any)
val wei_iter = NativeData(weightIterShape, Memory.Format.any)
val bis = NativeData(biasShape, Memory.Format.any)
val dst_layer = NativeData(outputShape, Memory.Format.any)
val dst_iter = NativeData(commonIterShape, Memory.Format.any)
val src_layer_MD = src_layer.getMemoryDescription()
val src_iter_MD = src_iter.getMemoryDescription()
val weights_layer_MD = wei_layer.getMemoryDescription()
val weights_iter_MD = wei_iter.getMemoryDescription()
val bis_MD = bis.getMemoryDescription()
val dist_layer_MD = dst_layer.getMemoryDescription()
val dist_iter_MD = dst_iter.getMemoryDescription()
rnnCellDesc = mode match {
case AlgKind.VanillaLstm =>
MklDnnMemory.RNNCellDescInit(AlgKind.VanillaLstm, f, flags, alpha, clipping)
case AlgKind.VanillaGru =>
MklDnnMemory.RNNCellDescInit(AlgKind.VanillaGru, f, flags, alpha, clipping)
case _ => throw new UnsupportedOperationException("Not support such RNN cell. " +
"Cell type: " + mode)
}
val description = MklDnnMemory.RNNForwardDescInit(kind, rnnCellDesc, direction, src_layer_MD,
src_iter_MD, weights_layer_MD, weights_iter_MD, bis_MD, dist_layer_MD, dist_iter_MD)
fwdPD = MklDnnMemory.PrimitiveDescCreate(description, runtime.engine, 0L)
val realSrc = MemoryData.operationWant(fwdPD, Query.SrcPd, 0)
val realSrc_iter = MemoryData.operationWant(fwdPD, Query.SrcPd, 1)
val realWei = MemoryData.operationWant(fwdPD, Query.WeightsPd, 0)
val realWei_iter = MemoryData.operationWant(fwdPD, Query.WeightsPd, 1)
val realBias = MemoryData.operationWant(fwdPD, Query.WeightsPd, 2)
val realDst = MemoryData.operationWant(fwdPD, Query.DstPd, 0)
val realDst_iter = MemoryData.operationWant(fwdPD, Query.DstPd, 1)
require(weight.size().product == realWei.shape.product,
s"${getName} weight shape is not correct.")
require(weight_i.size().product == realWei_iter.shape.product,
s"${getName} weight iter shape is not correct.")
require(bias.size().product == realBias.shape.product,
s"${getName} bias shape is not correct.")
weight.setMemoryData(HeapData(weightShape, Memory.Format.ldigo), realWei, runtime)
weight_i.setMemoryData(HeapData(weightIterShape, Memory.Format.ldigo), realWei_iter, runtime)
bias.setMemoryData(HeapData(biasShape, Memory.Format.ldgo), realBias, runtime)
weight.sync()
weight_i.sync()
bias.sync()
src_i = initTensor(realSrc_iter).asInstanceOf[DnnTensor[Float]]
dst_i = initTensor(realDst_iter).asInstanceOf[DnnTensor[Float]]
src_i.zero()
dst_i.zero()
val srcs = Array(realSrc.getPrimitive(runtime), realSrc_iter.getPrimitive(runtime),
realWei.getPrimitive(runtime), realWei_iter.getPrimitive(runtime),
realBias.getPrimitive(runtime))
val indexes = Array.fill(srcs.length)(0)
if (isTraining()) {
workSpaceFormat = MemoryData.operationWant(fwdPD, Query.WorkspacePd, 0)
workSpace = initTensor(workSpaceFormat).asInstanceOf[Tensor[Float]]
}
val dsts = if (isTraining()) {
Array(realDst.getPrimitive(runtime),
realDst_iter.getPrimitive(runtime),
workSpaceFormat.getPrimitive(runtime))
}
else {
Array(realDst.getPrimitive(runtime),
realDst_iter.getPrimitive(runtime))
}
val primitive = MklDnnMemory.PrimitiveCreate2(fwdPD, srcs, indexes,
srcs.length, dsts, dsts.length)
updateOutputMemoryPrimitives = srcs ++ dsts
updateOutputPrimitives = Array(primitive)
output = initTensor(realDst)
_inputFormats = Array(realSrc)
_outputFormats = Array(realDst)
(_inputFormats, _outputFormats)
}
override def updateOutput(input: Activity): Activity = {
if (updateOutputTensors == null) {
val buffer = new ArrayBuffer[Tensor[Float]]()
buffer.append(input.asInstanceOf[Tensor[Float]])
buffer.append(src_i)
buffer.append(weight.native)
buffer.append(weight_i.native)
buffer.append(bias.native)
buffer.append(output.asInstanceOf[Tensor[Float]])
buffer.append(dst_i)
if (isTraining()) {
buffer.append(workSpace)
}
updateOutputTensors = buffer.toArray
}
if (isTraining()) {
weight.sync()
weight_i.sync()
bias.sync()
}
updateWithNewTensor(updateOutputTensors, 0, input)
MklDnnOps.streamSubmit(runtime.stream, 1, updateOutputPrimitives, updateOutputPrimitives.length,
updateOutputMemoryPrimitives, updateOutputTensors)
output
}
override private[mkldnn] def initBwdPrimitives(grad: Array[MemoryData], phase: Phase) = {
reorderManager.setRuntime(runtime)
val src_layer_bw = NativeData(inputShape, Memory.Format.any)
val src_iter_bw = NativeData(commonIterShape, Memory.Format.any)
val wei_layer_bw = NativeData(weightShape, Memory.Format.any)
val wei_iter_bw = NativeData(weightIterShape, Memory.Format.any)
val bis_bw = NativeData(biasShape, Memory.Format.any)
val dst_layer_bw = NativeData(outputShape, Memory.Format.any)
val dst_iter_bw = NativeData(commonIterShape, Memory.Format.any)
val diff_src_layer = NativeData(inputShape, Memory.Format.any)
val diff_src_iter = NativeData(commonIterShape, Memory.Format.any)
val diff_weights_layer = NativeData(weightShape, Memory.Format.ldigo)
// IMPORTANT : it has to be ldigo
val diff_weights_iter = NativeData(weightIterShape, Memory.Format.ldigo)
// IMPORTANT : it has to be ldigo
val diff_bias = NativeData(biasShape, Memory.Format.any)
val diff_dist_layer = NativeData(outputShape, Memory.Format.any)
val diff_dist_iter = NativeData(commonIterShape, Memory.Format.any)
val src_layer_bw_MD = src_layer_bw.getMemoryDescription()
val src_iter_bw_MD = src_iter_bw.getMemoryDescription()
val weights_layer_bw_MD = wei_layer_bw.getMemoryDescription()
val weights_iter_bw_MD = wei_iter_bw.getMemoryDescription()
val bis_bw_MD = bis_bw.getMemoryDescription()
val dist_layer_bw_MD = dst_layer_bw.getMemoryDescription()
val dist_iter_bw_MD = dst_iter_bw.getMemoryDescription()
val diff_src_layer_MD = diff_src_layer.getMemoryDescription()
val diff_src_iter_MD = diff_src_iter.getMemoryDescription()
val diff_weights_layer_MD = diff_weights_layer.getMemoryDescription()
val diff_weights_iter_MD = diff_weights_iter.getMemoryDescription()
val diff_bis_MD = diff_bias.getMemoryDescription()
val diff_dist_layer_MD = diff_dist_layer.getMemoryDescription()
val diff_dist_iter_MD = diff_dist_iter.getMemoryDescription()
val description = MklDnnMemory.RNNBackwardDescInit(PropKind.Backward, rnnCellDesc,
direction, src_layer_bw_MD,
src_iter_bw_MD, weights_layer_bw_MD,
weights_iter_bw_MD, bis_bw_MD,
dist_layer_bw_MD, dist_iter_bw_MD,
diff_src_layer_MD, diff_src_iter_MD,
diff_weights_layer_MD, diff_weights_iter_MD,
diff_bis_MD, diff_dist_layer_MD,
diff_dist_iter_MD
)
val bwdPD = MklDnnMemory.PrimitiveDescCreate(description, runtime.engine, fwdPD)
val realSrc = MemoryData.operationWant(bwdPD, Query.SrcPd, 0)
val realSrc_iter = MemoryData.operationWant(bwdPD, Query.SrcPd, 1)
val realWei = MemoryData.operationWant(bwdPD, Query.WeightsPd, 0)
val realWei_iter = MemoryData.operationWant(bwdPD, Query.WeightsPd, 1)
val realBias = MemoryData.operationWant(bwdPD, Query.WeightsPd, 2)
val realDst = MemoryData.operationWant(bwdPD, Query.DstPd, 0)
val realDst_iter = MemoryData.operationWant(bwdPD, Query.DstPd, 1)
val realDiffDst = MemoryData.operationWant(bwdPD, Query.DiffDstPd, 0)
val realDiffDst_iter = MemoryData.operationWant(bwdPD, Query.DiffDstPd, 1)
val realDiffSrc = MemoryData.operationWant(bwdPD, Query.DiffSrcPd, 0)
val realDiffSrc_iter = MemoryData.operationWant(bwdPD, Query.DiffSrcPd, 1)
val realDiffWei = MemoryData.operationWant(bwdPD, Query.DiffWeightsPd, 0)
val realDiffWei_iter = MemoryData.operationWant(bwdPD, Query.DiffWeightsPd, 1)
val realDiffBias = MemoryData.operationWant(bwdPD, Query.DiffWeightsPd, 2)
weightForBackwardMemoryData = realWei
reorderManager.register(weight.heapData, realWei)
weightForBackward = reorderManager
.infer(Array(weight.heapData), Array(weightForBackwardMemoryData), weight.dense)
.asInstanceOf[DnnTensor[Float]]
weightIterForBackwardMemoryData = realWei_iter
reorderManager.register(weight_i.heapData, realWei_iter)
weightIterForBackward = reorderManager
.infer(Array(weight_i.heapData), Array(weightIterForBackwardMemoryData), weight_i.dense)
.asInstanceOf[DnnTensor[Float]]
gradWeight.setMemoryData(realDiffWei, HeapData(weightShape, Memory.Format.ldigo), runtime)
gradWeight_i.setMemoryData(realDiffWei_iter, HeapData(weightIterShape, Memory.Format.ldigo),
runtime)
gradBias.setMemoryData(realDiffBias, HeapData(biasShape, Memory.Format.ldgo), runtime)
gradWeight.zero()
gradWeight_i.zero()
gradBias.zero()
gradsrc_i = initTensor(realDiffSrc_iter).asInstanceOf[DnnTensor[Float]]
graddst_i = initTensor(realDiffDst_iter).asInstanceOf[DnnTensor[Float]]
gradsrc_i.zero()
graddst_i.zero()
val srcs = Array(realSrc.getPrimitive(runtime), realSrc_iter.getPrimitive(runtime),
realWei.getPrimitive(runtime), realWei_iter.getPrimitive(runtime),
realBias.getPrimitive(runtime), realDst.getPrimitive(runtime),
realDst_iter.getPrimitive(runtime), realDiffDst.getPrimitive(runtime),
realDiffDst_iter.getPrimitive(runtime), workSpaceFormat.getPrimitive(runtime))
val indexes = Array.fill(srcs.length)(0)
val dsts = Array(realDiffSrc.getPrimitive(runtime), realDiffSrc_iter.getPrimitive(runtime),
realDiffWei.getPrimitive(runtime), realDiffWei_iter.getPrimitive(runtime),
realDiffBias.getPrimitive(runtime)
)
val primitive = MklDnnMemory.PrimitiveCreate2(bwdPD, srcs, indexes, srcs.length,
dsts, dsts.length)
updateGradInputMemoryPrimitives = srcs ++ dsts
updateGradInputPrimitives = Array(primitive)
gradInput = initTensor(realDiffSrc)
_gradInputFormats = Array(realDiffSrc)
_gradOutputFormats = Array(realDiffDst)
(_gradOutputFormats, _gradInputFormats)
}
override def updateGradInput(input: Activity, gradOutput: Activity): Activity = {
if (updateGradInputTensors == null) {
val buffer = new ArrayBuffer[Tensor[Float]]()
buffer.append(input.asInstanceOf[Tensor[Float]])
buffer.append(src_i)
buffer.append(weightForBackward)
buffer.append(weightIterForBackward)
buffer.append(bias.native)
buffer.append(output.asInstanceOf[Tensor[Float]])
buffer.append(dst_i)
buffer.append(gradOutput.asInstanceOf[Tensor[Float]])
buffer.append(graddst_i)
buffer.append(workSpace)
buffer.append(gradInput.asInstanceOf[Tensor[Float]])
buffer.append(gradsrc_i)
buffer.append(gradWeight.native)
buffer.append(gradWeight_i.native)
buffer.append(gradBias.native)
updateGradInputTensors = buffer.toArray
}
updateWithNewTensor(updateGradInputTensors, 0, input)
updateWithNewTensor(updateGradInputTensors, 7, gradOutput)
MklDnnOps.streamSubmit(runtime.stream, 1, updateGradInputPrimitives,
updateGradInputPrimitives.length, updateGradInputMemoryPrimitives, updateGradInputTensors)
gradWeight.sync()
gradWeight_i.sync()
gradBias.sync()
gradInput
}
override def accGradParameters(input: Activity, gradOutput: Activity): Unit = {
// Do nothing
}
override def parameters(): (Array[Tensor[Float]], Array[Tensor[Float]]) = {
(Array(weight.dense, bias.dense, weight_i.dense),
Array(gradWeight.dense, gradBias.dense, gradWeight_i.dense))
}
override def zeroGradParameters(): Unit = {
}
}
object RNN{
def apply(
mode: Int,
inputSize: Int,
hiddenSize: Int,
f: Int,
direction: Int,
layers: Int = 1,
flags: Int = RNNCellFlags.RNNCellWithRelu,
alpha: Float = 0F,
clipping: Float = 0F,
initWeight: Tensor[Float] = null,
initWeightIter: Tensor[Float] = null,
initBias: Tensor[Float] = null
): RNN = new RNN(mode, inputSize, hiddenSize, f, direction, layers, flags, alpha,
clipping, initWeight, initWeightIter, initBias)
}
| wzhongyuan/BigDL | spark/dl/src/main/scala/com/intel/analytics/bigdl/nn/mkldnn/RNN.scala | Scala | apache-2.0 | 19,759 |
package parser.json.providers
import play.api.Logger
import play.api.mvc.RequestHeader
import services.auth.providers.Facebook
import models.user.SkimboToken
import parser.json.GenericJsonParserUser
import play.api.libs.json.JsValue
import parser.json.GenericJsonParserParamHelper
import models.ParamHelper
case class FacebookUser(
id: String,
name: String
)
object FacebookUser extends GenericJsonParserUser with GenericJsonParserParamHelper {
override def cut(json: JsValue) = super.cut(json \\ "data")
override def asProviderUser(idUser: String, json: JsValue): Option[models.user.ProviderUser] = {
val id = (json \\ "id").as[String]
val name = (json \\ "name").asOpt[String]
val picture = (json \\ "picture" \\ "data" \\ "url").asOpt[String]
Some(models.user.ProviderUser(
id,
Facebook.name,
Some(SkimboToken(Facebook.getToken(idUser).get.token, None)),
name,
name,
None,
picture))
}
override def asParamHelper(idUser: String, json: JsValue) : Option[models.ParamHelper] = {
asProviderUser(idUser, json).map( user => ParamHelper.fromProviderUser(user))
}
} | Froggies/Skimbo | app/parser/json/providers/FacebookUser.scala | Scala | agpl-3.0 | 1,187 |
/*
* Copyright 2015 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package rhttpc.client.subscription
import java.util.UUID
import akka.actor.{ActorSystem, Props}
import akka.testkit.{ImplicitSender, TestKit, TestProbe}
import org.scalatest.matchers.should.Matchers
import org.scalatest.flatspec.AnyFlatSpecLike
import rhttpc.client.protocol.{Correlated, SuccessExchange}
class MessageDispatcherActorSpec
extends TestKit(ActorSystem("MessageDispatcherActorSpec"))
with ImplicitSender
with AnyFlatSpecLike
with Matchers {
it should "ack after promise -> confirm -> reply -> consumed" in {
val actor = system.actorOf(Props[MessageDispatcherActor]())
val sub = SubscriptionOnResponse(UUID.randomUUID().toString)
actor ! RegisterSubscriptionPromise(sub)
val replyMock = TestProbe()
actor ! ConfirmOrRegisterSubscription(sub, replyMock.ref)
val ackProbe = TestProbe()
ackProbe.send(actor, Correlated(SuccessExchange("fooReq", "foo"), sub.correlationId))
replyMock.expectMsg(MessageFromSubscription("foo", sub))
ackProbe.expectNoMessage()
replyMock.reply("ok")
ackProbe.expectMsg("ok")
()
}
it should "ack after promise -> reply -> confirm -> consumed" in {
val actor = system.actorOf(Props[MessageDispatcherActor]())
val sub = SubscriptionOnResponse(UUID.randomUUID().toString)
actor ! RegisterSubscriptionPromise(sub)
val ackProbe = TestProbe()
ackProbe.send(actor, Correlated(SuccessExchange("fooReq", "foo"), sub.correlationId))
val replyMock = TestProbe()
actor ! ConfirmOrRegisterSubscription(sub, replyMock.ref)
replyMock.expectMsg(MessageFromSubscription("foo", sub))
ackProbe.expectNoMessage()
replyMock.reply("ok")
ackProbe.expectMsg("ok")
()
}
} | arkadius/reliable-http-client | rhttpc-client/src/test/scala/rhttpc/client/subscription/MessageDispatcherActorSpec.scala | Scala | apache-2.0 | 2,326 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.kafka010
import java.{util => ju}
import org.apache.spark.internal.Logging
import org.apache.spark.sql.{AnalysisException, SparkSession}
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.execution.{QueryExecution, SQLExecution}
import org.apache.spark.sql.types.{BinaryType, StringType}
import org.apache.spark.util.Utils
/**
* The [[KafkaWriter]] class is used to write data from a batch query
* or structured streaming query, given by a [[QueryExecution]], to Kafka.
* The data is assumed to have a value column, and an optional topic and key
* columns. If the topic column is missing, then the topic must come from
* the 'topic' configuration option. If the key column is missing, then a
* null valued key field will be added to the
* [[org.apache.kafka.clients.producer.ProducerRecord]].
*/
private[kafka010] object KafkaWriter extends Logging {
val TOPIC_ATTRIBUTE_NAME: String = "topic"
val KEY_ATTRIBUTE_NAME: String = "key"
val VALUE_ATTRIBUTE_NAME: String = "value"
override def toString: String = "KafkaWriter"
def validateQuery(
schema: Seq[Attribute],
kafkaParameters: ju.Map[String, Object],
topic: Option[String] = None): Unit = {
schema.find(_.name == TOPIC_ATTRIBUTE_NAME).getOrElse(
if (topic.isEmpty) {
throw new AnalysisException(s"topic option required when no " +
s"'$TOPIC_ATTRIBUTE_NAME' attribute is present. Use the " +
s"${KafkaSourceProvider.TOPIC_OPTION_KEY} option for setting a topic.")
} else {
Literal(topic.get, StringType)
}
).dataType match {
case StringType => // good
case _ =>
throw new AnalysisException(s"Topic type must be a String")
}
schema.find(_.name == KEY_ATTRIBUTE_NAME).getOrElse(
Literal(null, StringType)
).dataType match {
case StringType | BinaryType => // good
case _ =>
throw new AnalysisException(s"$KEY_ATTRIBUTE_NAME attribute type " +
s"must be a String or BinaryType")
}
schema.find(_.name == VALUE_ATTRIBUTE_NAME).getOrElse(
throw new AnalysisException(s"Required attribute '$VALUE_ATTRIBUTE_NAME' not found")
).dataType match {
case StringType | BinaryType => // good
case _ =>
throw new AnalysisException(s"$VALUE_ATTRIBUTE_NAME attribute type " +
s"must be a String or BinaryType")
}
}
def write(
sparkSession: SparkSession,
queryExecution: QueryExecution,
kafkaParameters: ju.Map[String, Object],
topic: Option[String] = None): Unit = {
val schema = queryExecution.analyzed.output
validateQuery(schema, kafkaParameters, topic)
queryExecution.toRdd.foreachPartition { iter =>
val writeTask = new KafkaWriteTask(kafkaParameters, schema, topic)
Utils.tryWithSafeFinally(block = writeTask.execute(iter))(
finallyBlock = writeTask.close())
}
}
}
| bravo-zhang/spark | external/kafka-0-10-sql/src/main/scala/org/apache/spark/sql/kafka010/KafkaWriter.scala | Scala | apache-2.0 | 3,756 |
package xml.circumstances
import javax.xml.bind.DatatypeConverter
import gov.dwp.carers.security.encryption.EncryptorAES
import models.domain.{CircumstancesYourDetails, Claim}
import models.view.CachedChangeOfCircs
import org.specs2.mutable._
import utils.WithApplication
/**
* Created by neddakaltcheva on 3/14/14.
*/
class CareeSpec extends Specification {
section("unit")
"Careree" should {
"generate Careree xml from a given circumstances" in new WithApplication {
val yourDetails = CircumstancesYourDetails(
theirFirstName = "Phil",
theirSurname = "Smith",
theirRelationshipToYou = "Wife of civil partner"
)
val claim = Claim(CachedChangeOfCircs.key).update(yourDetails)
val xml = Caree.xml(claim)
(xml \\ "CareeDetails" \\ "OtherNames" \\ "Answer").text shouldEqual yourDetails.theirFirstName
(new EncryptorAES).decrypt(DatatypeConverter.parseBase64Binary((xml \\ "CareeDetails" \\ "Surname" \\ "Answer").text)) shouldEqual yourDetails.theirSurname
(xml \\ "CareeDetails" \\ "RelationToClaimant" \\ "Answer").text shouldEqual yourDetails.theirRelationshipToYou
}
}
section("unit")
}
| Department-for-Work-and-Pensions/ClaimCapture | c3/test/xml/circumstances/CareeSpec.scala | Scala | mit | 1,178 |
package pe.ambivalenta.roast.model
import scala.math.round
import pe.ambivalenta.roast.model.types._
object Calculator {
import Doneness._
import WeightUnit._
def normaliseWeight(weight: Weight) : Weight = weight.units match{
// force all weights into Grams to make life easier elsewhere.
case WeightUnit.Grams => weight
case WeightUnit.Kilograms => Weight(weight.quantity*1000.0,WeightUnit.Grams)
case WeightUnit.Ounces => Weight(weight.quantity*28.35,WeightUnit.Grams)
case WeightUnit.Pounds => Weight(weight.quantity*16*28.35,WeightUnit.Grams)
}
def animals() : Vector[String] = Vector("Beef","Chicken","Lamb","Pork","Venison","Turkey")
def calculateSizzle(weight : Weight) : Long = {
//'Sizzle' time on high heat (~220c). This will eventually need to support Animal as an additional
//parameter as we want deal with poultry. This time isn't linear with size of the joint being roasted,
//so we cap it at the extremes
val scaled = normaliseWeight(weight).quantity/100.0
scaled match {
case v if(scaled < 20.0) => 20
case w if(scaled > 40.0) => 40
case _ => round(scaled)
}
}
def calculateNormalCookingTime(animal : Animal, doneness : Doneness) : Long = {
//Calculate the 'main' longer slower roast - e.g. at 180 c
val weightInGrams = normaliseWeight(animal.weight).quantity
val isBig = if(weightInGrams >= 5000) true else false // we want smaller cooking times for larger joints to prevent them from being toast.
round((weightInGrams/500.0) * (animal match {
case p:Pork => doneness match{
case Doneness.Well => 25.0
case _ => 0.0 // Undercooking pork is probably a bad idea.
}
case c:Poultry => c match {
case k:Chicken => 15.0
case t:Turkey => 20.0
}
case _ => doneness match {
case Doneness.Rare => if(isBig){9.0} else 10.0
case Doneness.Medium => if(isBig){12.0} else 15.0
case Doneness.Well => if(isBig){18.0} else 20.0
}
}))}
def calculateTotalCookingTimes(animal : Animal, doneness : Doneness) : (Long,Long) = {
val sizzle = animal match {
case p:Poultry => 20
case _ => calculateSizzle(animal.weight)
}
(sizzle,calculateNormalCookingTime(animal,doneness))
}
def convertToFahrenheit(temp : Double) : Double = round(temp * (9.0/5.0) + 32.0)
}
| AmbivalentApe/roastingtimes | src/main/scala/roasting/roast.scala | Scala | gpl-2.0 | 2,349 |
package edu.umass.cs.iesl.scalacommons
import collection.JavaConversions
import com.davidsoergel.dsutils.range.{ Interval, BasicInterval, MultiIntervalUnion }
import scala.Double
trait GenericIntervals[T] {
def invert(list: List[(T, T)], min: T, max: T): List[(T, T)] = invertIgnoreEdges(((min, min) :: list) :+ (max -> max))
def isSortedNonOverlapping(list: List[(T, T)]): Boolean
// is there some better foldy way?
// note we ignore the edges and just return the holes. Not really correct but it's what we need right now.
def invertIgnoreEdges(list: List[(T, T)]): List[(T, T)] =
{
// require list is sorted and nonoverlapping
require(isSortedNonOverlapping(list))
list match {
case Nil => Nil
case a :: t =>
{
t match {
case Nil => Nil
case _ =>
{
(a._2, t.head._1) :: invertIgnoreEdges(list.tail)
};
}
}
// assert(b._1 > a._2)
}
}
}
/**
* Because scala Double is actually java double, it's not obvious how to make things generic for Double and Float because there's no common superclass to
* provide the basic operators. Oh well, just cut and paste for now.
*/
object DoubleIntervals extends GenericIntervals[Double] {
class DoubleInterval(val min: Double, val max: Double) extends Tuple2[Double, Double](min, max) {
require(max >= min)
def width = max - min
}
implicit def tupleToDoubleInterval(t: (Double, Double)): DoubleInterval =
{
new DoubleInterval(t._1, t._2)
}
def largestHole(list: List[(Double, Double)], minimum: Int): Option[(Double, Double)] =
{
val holes: List[(Double, Double)] = holesBySize(list)
val result = holes match {
case a :: b if (a._2 - a._1 >= minimum) => Some(a)
case _ => None;
}
result
}
def holesBySize[T](list: List[(Double, Double)]): List[(Double, Double)] =
{
invertIgnoreEdges(list.sortBy[Double]((x: (Double, Double)) => x._1 - x._2)).sortBy[Double]((x: (Double, Double)) => x._1 - x._2) // note reverse sort
}
implicit def tupleToInterval(t: (Double, Double)): Interval[java.lang.Double] =
{
new BasicInterval[java.lang.Double](t._1, t._2, true, true)
}
implicit def intervalToTuple(i: Interval[java.lang.Double]): (Double, Double) = (i.getMin, i.getMax)
def union(intervals: Seq[(Double, Double)]): List[(Double, Double)] =
{
val i: Seq[Interval[java.lang.Double]] = intervals.map(tupleToInterval)
val u: MultiIntervalUnion[java.lang.Double] = new MultiIntervalUnion[java.lang.Double](JavaConversions.setAsJavaSet(i.toSet))
val r = JavaConversions.asScalaIterator[Interval[java.lang.Double]](u.iterator()).toList
r.map(intervalToTuple)
}
def isSortedNonOverlapping(tuples: List[(Double, Double)]): Boolean =
{
val pairs = tuples.sliding(2)
val bad = pairs.find(p => (p.tail.head.min > p.head.max)) ++ tuples.find(x => x._2 < x._1)
bad.isEmpty
}
}
object FloatIntervals extends GenericIntervals[Float] {
class FloatInterval(val min: Float, val max: Float) extends Tuple2[Float, Float](min, max) {
require(max >= min)
def width = max - min
}
implicit def tupleToFloatInterval(t: (Float, Float)): FloatInterval =
{
new FloatInterval(t._1, t._2)
}
def largestHole(list: List[(Float, Float)], minimum: Int): Option[(Float, Float)] =
{
val holes: List[(Float, Float)] = holesBySize(list)
val result = holes match {
case a :: b if (a._2 - a._1 >= minimum) => Some(a)
case _ => None;
}
result
}
def holesBySize(list: List[(Float, Float)]): List[(Float, Float)] =
{
invertIgnoreEdges(list.sortBy[Float]((x: (Float, Float)) => x._1 - x._2)).sortBy[Float]((x: (Float, Float)) => x._1 - x._2) // note reverse sort
}
implicit def tupleToInterval(t: (Float, Float)): Interval[java.lang.Float] =
{
new BasicInterval[java.lang.Float](t._1, t._2, true, true)
}
implicit def intervalToTuple(i: Interval[java.lang.Float]): (Float, Float) = (i.getMin, i.getMax)
/**
* Convert an arbitrary set of intervals to a set of nonoverlapping intervals
* @param intervals
* @return
*/
def union(intervals: Seq[(Float, Float)]): List[(Float, Float)] = {
val i: Seq[Interval[java.lang.Float]] = intervals.map(tupleToInterval)
val u: MultiIntervalUnion[java.lang.Float] = new MultiIntervalUnion[java.lang.Float](JavaConversions.setAsJavaSet(i.toSet))
val r = JavaConversions.asScalaIterator[Interval[java.lang.Float]](u.iterator()).toList
r.map(intervalToTuple)
}
def isSortedNonOverlapping(tuples: List[(Float, Float)]): Boolean =
{
val pairs = tuples.sliding(2).toList
val bad = pairs.find(p => (p.tail.head.min > p.head.max)) ++ tuples.find(x => x._2 < x._1)
bad.isEmpty
}
}
| iesl/scalacommons | src/main/scala/edu/umass/cs/iesl/scalacommons/DoubleIntervals.scala | Scala | apache-2.0 | 5,032 |
/*
* Copyright 2011-2018 GatlingCorp (http://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.core.check
import io.gatling.commons.validation._
object Validator {
val FoundNothingFailure: Failure = "found nothing".failure
}
trait Validator[A] {
def name: String
def apply(actual: Option[A]): Validation[Option[A]]
}
abstract class Matcher[A] extends Validator[A] {
def doMatch(actual: Option[A]): Validation[Option[A]]
def apply(actual: Option[A]): Validation[Option[A]] =
doMatch(actual).mapError(message => s"but actually $message")
}
class IsMatcher[A](expected: A) extends Matcher[A] {
def name = s"is($expected)"
def doMatch(actual: Option[A]): Validation[Option[A]] = actual match {
case Some(actualValue) =>
if (actualValue == expected)
actual.success
else
s"found $actualValue".failure
case None => Validator.FoundNothingFailure
}
}
class IsNullMatcher[A] extends Matcher[A] {
def name = "isNull"
def doMatch(actual: Option[A]): Validation[Option[A]] = actual match {
case Some(actualValue) =>
if (actualValue == null)
actual.success
else
s"found $actualValue".failure
case None => Validator.FoundNothingFailure
}
}
class NotMatcher[A](expected: A) extends Matcher[A] {
def name = s"not($expected)"
def doMatch(actual: Option[A]): Validation[Option[A]] = actual match {
case Some(actualValue) =>
if (actualValue != expected)
actual.success
else
s"unexpectedly found $actualValue".failure
case None => NoneSuccess
}
}
class NotNullMatcher[A] extends Matcher[A] {
def name = "notNull"
def doMatch(actual: Option[A]): Validation[Option[A]] = actual match {
case Some(actualValue) =>
if (actualValue != null)
actual.success
else
"found null".failure
case None => NoneSuccess
}
}
class InMatcher[A](expected: Seq[A]) extends Matcher[A] {
def name = expected.mkString("in(", ",", ")")
def doMatch(actual: Option[A]): Validation[Option[A]] = actual match {
case Some(actualValue) =>
if (expected.contains(actualValue))
actual.success
else
s"found $actualValue".failure
case None => Validator.FoundNothingFailure
}
}
class CompareMatcher[A](val comparisonName: String, message: String, compare: (A, A) => Boolean, expected: A) extends Matcher[A] {
def name = s"$comparisonName($expected)"
def doMatch(actual: Option[A]): Validation[Option[A]] = actual match {
case Some(actualValue) =>
if (compare(actualValue, expected))
actual.success
else
s"$actualValue is not $message $expected".failure
case _ => s"can't compare nothing and $expected".failure
}
}
class ExistsValidator[A] extends Validator[A] {
val name = "exists"
def apply(actual: Option[A]): Validation[Option[A]] = actual match {
case Some(actualValue) => actual.success
case None => Validator.FoundNothingFailure
}
}
class NotExistsValidator[A] extends Validator[A] {
val name = "notExists"
def apply(actual: Option[A]): Validation[Option[A]] = actual match {
case Some(actualValue) => s"unexpectedly found $actualValue".failure
case None => NoneSuccess
}
}
class NoopValidator[A] extends Validator[A] {
val name = "noop"
def apply(actual: Option[A]): Validation[Option[A]] = actual.success
}
| wiacekm/gatling | gatling-core/src/main/scala/io/gatling/core/check/Validator.scala | Scala | apache-2.0 | 3,947 |
package mesosphere.marathon
package api.v2.validation
import java.util.regex.Pattern
import com.wix.accord.Descriptions.{Generic, Path}
import com.wix.accord._
import com.wix.accord.dsl._
import mesosphere.marathon.api.v2.Validation.{featureEnabled, _}
import mesosphere.marathon.core.externalvolume.ExternalVolumes
import mesosphere.marathon.raml._
import mesosphere.marathon.state.{AppDefinition, PathId, ResourceRole}
import mesosphere.marathon.stream.Implicits._
import scala.util.Try
trait AppValidation {
import ArtifactValidation._
import EnvVarValidation._
import NetworkValidation._
import PathId.{empty => _, _}
import SchedulingValidation._
import SecretValidation._
val portDefinitionsValidator: Validator[Seq[PortDefinition]] = validator[Seq[PortDefinition]] {
portDefinitions =>
portDefinitions is elementsAreUniqueByOptional(_.name, "Port names must be unique.")
portDefinitions is elementsAreUniqueBy(_.port, "Ports must be unique.",
filter = { (port: Int) => port != AppDefinition.RandomPortValue })
}
val portMappingsIndependentOfNetworks = validator[Seq[ContainerPortMapping]] { portMappings =>
portMappings is elementsAreUniqueByOptional(_.name, "Port names must be unique.")
}
private def portMappingNetworkNameValidator(networkNames: Seq[String]) = validator[ContainerPortMapping] { portMapping =>
portMapping.networkNames is every(oneOf(networkNames: _*))
}
private def portMappingIsCompatibleWithNetworks(networks: Seq[Network]): Validator[ContainerPortMapping] = {
val hostPortRequiresNetworkName = isTrue[ContainerPortMapping](
AppValidationMessages.NetworkNameRequiredForMultipleContainerNetworks) { mapping =>
mapping.hostPort.isEmpty || mapping.networkNames.length == 1
}
implied(networks.count(_.mode == NetworkMode.Container) > 1)(hostPortRequiresNetworkName)
}
def portMappingsValidator(networks: Seq[Network]): Validator[Seq[ContainerPortMapping]] = validator { portMappings =>
portMappings is portMappingsIndependentOfNetworks
portMappings is every(portMappingIsCompatibleWithNetworks(networks))
portMappings is every(portMappingNetworkNameValidator(networks.flatMap(_.name)))
}
def dockerDockerContainerValidator(networks: Seq[Network]): Validator[Container] = {
val validDockerEngineSpec: Validator[DockerContainer] = validator[DockerContainer] { docker =>
docker.image is notEmpty
docker.pullConfig is isTrue("pullConfig is not supported with Docker containerizer")(_.isEmpty)
docker.portMappings is optional(portMappingsValidator(networks))
}
validator { (container: Container) =>
container.docker is definedAnd(validDockerEngineSpec)
}
}
def mesosDockerContainerValidator(enabledFeatures: Set[String], secrets: Map[String, SecretDef]): Validator[Container] = {
val validPullConfigSpec: Validator[DockerPullConfig] = validator[DockerPullConfig] { pullConfig =>
pullConfig is isTrue("pullConfig.secret must refer to an existing secret")(
config => secrets.contains(config.secret))
}
val validMesosEngineSpec: Validator[DockerContainer] = validator[DockerContainer] { docker =>
docker.image is notEmpty
docker.pullConfig is empty or featureEnabled(enabledFeatures, Features.SECRETS)
docker.pullConfig is optional(validPullConfigSpec)
}
validator { (container: Container) =>
container.docker is definedAnd(validMesosEngineSpec)
}
}
val mesosAppcContainerValidator: Validator[Container] = {
val prefix = "sha512-"
val validId: Validator[String] =
isTrue[String](s"id must begin with '$prefix',") { id =>
id.startsWith(prefix)
} and isTrue[String](s"id must contain non-empty digest after '$prefix'.") { id =>
id.length > prefix.length
}
val validMesosEngineSpec: Validator[AppCContainer] = validator[AppCContainer] { appc =>
appc.image is notEmpty
appc.id is optional(validId)
}
validator{ (container: Container) =>
container.appc is definedAnd(validMesosEngineSpec)
}
}
val mesosImagelessContainerValidator: Validator[Container] =
// placeholder, there is no additional validation to do for a non-image-based mesos container
new NullSafeValidator[Container](_ => true, _ => Failure(Set.empty))
val validOldContainerAPI: Validator[Container] = new Validator[Container] {
val forDockerContainerizer: Validator[Container] = {
val oldDockerDockerContainerAPI: Validator[DockerContainer] = validator[DockerContainer] { docker =>
docker.credential is empty // credentials aren't supported this way anymore
}
validator[Container] { container =>
container.docker is optional(oldDockerDockerContainerAPI)
}
}
val forMesosContainerizer: Validator[Container] = {
val oldMesosDockerContainerAPI: Validator[DockerContainer] = validator[DockerContainer] { docker =>
docker.credential is empty // credentials aren't supported this way anymore
docker.network is empty
docker.parameters is empty
docker.portMappings is empty
}
validator[Container] { container =>
container.docker is optional(oldMesosDockerContainerAPI)
}
}
override def apply(container: Container): Result = {
(container.docker, container.appc, container.`type`) match {
case (Some(_), None, EngineType.Docker) => validate(container)(forDockerContainerizer)
case (Some(_), None, EngineType.Mesos) => validate(container)(forMesosContainerizer)
case _ => Success // canonical validation picks up where we leave off
}
}
}
def validContainer(enabledFeatures: Set[String], networks: Seq[Network], secrets: Map[String, SecretDef]): Validator[Container] = {
// When https://github.com/wix/accord/issues/120 is resolved, we can inline this expression again
def secretVolumes(container: Container) =
container.volumes.filterPF { case _: AppSecretVolume => true }
def volumesValidator(container: Container): Validator[Seq[AppVolume]] =
isTrue("Volume names must be unique") { (vols: Seq[AppVolume]) =>
val names: Seq[String] = vols.collect{ case v: AppExternalVolume => v.external.name }.flatten
names.distinct.size == names.size
} and every(validVolume(container, enabledFeatures, secrets))
val validGeneralContainer: Validator[Container] = validator[Container] { container =>
container.portMappings is optional(portMappingsValidator(networks))
container.volumes is volumesValidator(container)
secretVolumes(container) is empty or featureEnabled(enabledFeatures, Features.SECRETS)
}
val mesosContainerImageValidator = new Validator[Container] {
override def apply(container: Container): Result = {
(container.docker, container.appc, container.`type`) match {
case (Some(_), None, EngineType.Mesos) => validate(container)(mesosDockerContainerValidator(enabledFeatures, secrets))
case (None, Some(_), EngineType.Mesos) => validate(container)(mesosAppcContainerValidator)
case (None, None, EngineType.Mesos) => validate(container)(mesosImagelessContainerValidator)
case _ => Failure(Set(RuleViolation(container, "mesos containers should specify, at most, a single image type")))
}
}
}
forAll(
validGeneralContainer,
{ c: Container => c.`type` == EngineType.Docker } -> dockerDockerContainerValidator(networks),
{ c: Container => c.`type` == EngineType.Mesos } -> mesosContainerImageValidator
)
}
def validVolume(container: Container, enabledFeatures: Set[String], secrets: Map[String, SecretDef]): Validator[AppVolume] = new Validator[AppVolume] {
import state.PathPatterns._
val validHostVolume = validator[AppHostVolume] { v =>
v.containerPath is notEmpty
v.hostPath is notEmpty
}
val validPersistentVolume = {
val notHaveConstraintsOnRoot = isTrue[PersistentVolumeInfo](
"Constraints on root volumes are not supported") { info =>
if (info.`type`.forall(_ == PersistentVolumeType.Root)) // default is Root, see AppConversion
info.constraints.isEmpty
else
true
}
val meetMaxSizeConstraint = isTrue[PersistentVolumeInfo]("Only mount volumes can have maxSize") { info =>
info.`type`.contains(PersistentVolumeType.Mount) || info.maxSize.isEmpty
}
val haveProperlyOrderedMaxSize = isTrue[PersistentVolumeInfo]("Max size must be larger than size") { info =>
info.maxSize.forall(_ > info.size)
}
val complyWithVolumeConstraintRules: Validator[Seq[String]] = new Validator[Seq[String]] {
override def apply(c: Seq[String]): Result = {
import Protos.Constraint.Operator._
(c.headOption, c.lift(1), c.lift(2)) match {
case (None, None, _) =>
Failure(Set(RuleViolation(c, "Missing field and operator")))
case (Some("path"), Some(op), Some(value)) =>
Try(Protos.Constraint.Operator.valueOf(op)).toOption.map {
case LIKE | UNLIKE =>
Try(Pattern.compile(value)).toOption.map(_ => Success).getOrElse(
Failure(Set(RuleViolation(c, "Invalid regular expression", Path(Generic(value)))))
)
case _ =>
Failure(Set(
RuleViolation(c, "Operator must be one of LIKE, UNLIKE")))
}.getOrElse(
Failure(Set(
RuleViolation(c, s"unknown constraint operator $op")))
)
case _ =>
Failure(Set(RuleViolation(c, s"Unsupported constraint ${c.mkString(",")}")))
}
}
}
val validPersistentInfo = validator[PersistentVolumeInfo] { info =>
info.size should be > 0L
info.constraints.each must complyWithVolumeConstraintRules
} and meetMaxSizeConstraint and notHaveConstraintsOnRoot and haveProperlyOrderedMaxSize
validator[AppPersistentVolume] { v =>
v.containerPath is notEqualTo("") and notOneOf(DotPaths: _*)
v.containerPath is matchRegexWithFailureMessage(NoSlashesPattern, "value must not contain \\"/\\"")
v.mode is equalTo(ReadMode.Rw) // see AppConversion, default is RW
v.persistent is validPersistentInfo
}
}
val validExternalVolume: Validator[AppExternalVolume] = {
import state.OptionLabelPatterns._
val validOptions = validator[Map[String, String]] { option =>
option.keys.each should matchRegex(OptionKeyRegex)
}
val validExternalInfo: Validator[ExternalVolumeInfo] = validator[ExternalVolumeInfo] { info =>
info.name is definedAnd(matchRegex(LabelRegex))
info.provider is definedAnd(matchRegex(LabelRegex))
info.options is validOptions
}
forAll(
validator[AppExternalVolume] { v =>
v.containerPath is notEmpty
v.external is validExternalInfo
},
{ v: AppExternalVolume => v.external.provider.nonEmpty } -> ExternalVolumes.validRamlVolume(container),
featureEnabled[AppVolume](enabledFeatures, Features.EXTERNAL_VOLUMES)
)
}
val validSecretVolume: Validator[AppSecretVolume] = {
isTrue("volume.secret must refer to an existing secret")(
vol => secrets.contains(vol.secret))
}
override def apply(v: AppVolume): Result = {
v match {
case v: AppHostVolume => validate(v)(validHostVolume)
case v: AppPersistentVolume => validate(v)(validPersistentVolume)
case v: AppExternalVolume => validate(v)(validExternalVolume)
case v: AppSecretVolume => validate(v)(validSecretVolume) // Validate that the secret reference is valid
case _ => Failure(Set(RuleViolation(v, "Unknown app volume type")))
}
}
}
def readinessCheckValidator(app: App): Validator[ReadinessCheck] = {
// we expect that the deprecated API has already been translated into canonical form
def namesFromDefinitions = app.portDefinitions.fold(Set.empty[String])(_.flatMap(_.name)(collection.breakOut))
def portNames = app.container.flatMap(_.portMappings).fold(namesFromDefinitions)(_.flatMap(_.name)(collection.breakOut))
def portNameExists = isTrue[String]{ name: String => s"No port definition reference for portName $name" } { name =>
portNames.contains(name)
}
validator[ReadinessCheck] { rc =>
rc.portName is portNameExists
rc.timeoutSeconds should be < rc.intervalSeconds
}
}
/**
* all validation that touches deprecated app API fields goes in here
*/
val validateOldAppAPI: Validator[App] = forAll(
validator[App] { app =>
app.container is optional(validOldContainerAPI)
app.container.flatMap(_.docker.flatMap(_.portMappings)) is optional(portMappingsIndependentOfNetworks)
app.ipAddress is optional(isTrue(
"ipAddress/discovery is not allowed for Docker containers") { (ipAddress: IpAddress) =>
!(app.container.exists(c => c.`type` == EngineType.Docker) && ipAddress.discovery.nonEmpty)
})
app.uris is optional(every(api.v2.Validation.uriIsValid) and isTrue(
"may not be set in conjunction with fetch") { (uris: Seq[String]) => !(uris.nonEmpty && app.fetch.nonEmpty) })
},
isTrue("must not specify both container.docker.network and networks") { app =>
!(app.container.exists(_.docker.exists(_.network.nonEmpty)) && app.networks.nonEmpty)
},
isTrue("must not specify both networks and ipAddress") { app =>
!(app.ipAddress.nonEmpty && app.networks.nonEmpty)
},
isTrue("ports must be unique") { (app: App) =>
val withoutRandom: Seq[Int] = app.ports.map(_.filterNot(_ == AppDefinition.RandomPortValue)).getOrElse(Nil)
withoutRandom.distinct.size == withoutRandom.size
},
isTrue("cannot specify both an IP address and port") { app =>
def appWithoutPorts = !(app.ports.exists(_.nonEmpty) || app.portDefinitions.exists(_.nonEmpty))
app.ipAddress.isEmpty || appWithoutPorts
},
isTrue("cannot specify both ports and port definitions") { app =>
def portDefinitionsIsEquivalentToPorts = app.portDefinitions.map(_.map(_.port)) == app.ports
app.ports.isEmpty || app.portDefinitions.isEmpty || portDefinitionsIsEquivalentToPorts
}
)
def validateCanonicalAppAPI(enabledFeatures: Set[String], defaultNetworkName: () => Option[String]): Validator[App] = forAll(
validBasicAppDefinition(enabledFeatures),
validator[App] { app =>
PathId(app.id) as "id" is (PathId.pathIdValidator and PathId.absolutePathValidator and PathId.nonEmptyPath)
app.dependencies.map(PathId(_)) as "dependencies" is every(valid)
app.networks is defaultNetworkNameValidator(defaultNetworkName)
},
isTrue("must not be root")(!_.id.toPath.isRoot),
isTrue("must not be an empty string")(_.cmd.forall { s => s.length() > 1 }),
isTrue("portMappings are not allowed with host-networking") { app =>
!(app.networks.exists(_.mode == NetworkMode.Host) && app.container.exists(_.portMappings.exists(_.nonEmpty)))
},
isTrue("portDefinitions are only allowed with host-networking") { app =>
!(app.networks.exists(_.mode != NetworkMode.Host) && app.portDefinitions.exists(_.nonEmpty))
}
)
def validateAppUpdateVersion: Validator[AppUpdate] = forAll(
isTrue("The 'version' field may only be combined with the 'id' field.") { update =>
def onlyVersionOrIdSet: Boolean = update.productIterator.forall {
case x: Some[Any] => x == update.version || x == update.id // linter:ignore UnlikelyEquality
case _ => true
}
update.version.isEmpty || onlyVersionOrIdSet
}
)
/** expects that app is already in canonical form and that someone else is (or will) handle basic app validation */
def validNestedApp(base: PathId): Validator[App] = validator[App] { app =>
PathId(app.id) as "id" is PathId.validPathWithBase(base)
}
def portIndices(app: App): Range = {
// should be kept in sync with AppDefinition.portIndices
app.container.withFilter(_.portMappings.nonEmpty)
.flatMap(_.portMappings).orElse(app.portDefinitions).getOrElse(Nil).indices
}
/** validate most canonical API fields */
private def validBasicAppDefinition(enabledFeatures: Set[String]): Validator[App] = validator[App] { app =>
app.container is optional(validContainer(enabledFeatures, app.networks, app.secrets))
app.portDefinitions is optional(portDefinitionsValidator)
app is containsCmdArgsOrContainer
app.healthChecks is every(portIndexIsValid(portIndices(app)))
app.healthChecks is every(complyWithIpProtocolRules(app.container))
app must haveAtMostOneMesosHealthCheck
app.fetch is every(valid)
app.secrets is { secrets: Map[String, SecretDef] =>
secrets.nonEmpty
} -> (featureEnabled(enabledFeatures, Features.SECRETS))
app.secrets is featureEnabledImplies(enabledFeatures, Features.SECRETS)(secretValidator)
app.env is envValidator(strictNameValidation = false, app.secrets, enabledFeatures)
app.acceptedResourceRoles is optional(ResourceRole.validAcceptedResourceRoles("app", app.residency.isDefined) and notEmpty)
app must complyWithGpuRules(enabledFeatures)
app must complyWithMigrationAPI
app must complyWithReadinessCheckRules
app must complyWithSingleInstanceLabelRules
app must complyWithUpgradeStrategyRules
app must complyWithDockerNetworkingRules
app must requireUnreachableDisabledForResidentTasks
app.constraints.each must complyWithAppConstraintRules
app.networks is ramlNetworksValidator
} and ExternalVolumes.validAppRaml
val requireUnreachableDisabledForResidentTasks =
conditional((app: App) => app.residency.isDefined && app.unreachableStrategy.isDefined)(
isTrue("unreachableStrategy must be disabled for resident tasks") { app =>
app.unreachableStrategy.collectFirst { case x: UnreachableDisabled => x }.isDefined
}
)
/**
* The Mesos docker containerizer implementation only supports a single CNI network.
*/
val complyWithDockerNetworkingRules: Validator[App] =
conditional((app: App) => app.container.fold(false)(_.`type` == EngineType.Docker))(
isTrue(AppValidationMessages.DockerEngineLimitedToSingleContainerNetwork){
_.networks.count(_.mode == NetworkMode.Container) <= 1
}
)
private val complyWithReadinessCheckRules: Validator[App] = validator[App] { app =>
app.readinessChecks.size should be <= 1
app.readinessChecks is every(readinessCheckValidator(app))
}
// TODO: migrate DCOS-specific things to plugins
private val complyWithMigrationAPI: Validator[App] =
isTrue("DCOS_PACKAGE_FRAMEWORK_NAME and DCOS_MIGRATION_API_PATH must be defined" +
" when using DCOS_MIGRATION_API_VERSION") { app =>
val understandsMigrationProtocol = app.labels.get(Apps.LabelDcosMigrationApiVersion).exists(_.nonEmpty)
// if the api version IS NOT set, we're ok
// if the api version IS set, we expect to see a valid version, a frameworkName and a path
def compliesWithMigrationApi =
app.labels.get(Apps.LabelDcosMigrationApiVersion).fold(true) { apiVersion =>
apiVersion == "v1" &&
app.labels.get(Apps.LabelDcosPackageFrameworkName).exists(_.nonEmpty) &&
app.labels.get(Apps.LabelDcosMigrationApiPath).exists(_.nonEmpty)
}
!understandsMigrationProtocol || (understandsMigrationProtocol && compliesWithMigrationApi)
}
private def complyWithGpuRules(enabledFeatures: Set[String]): Validator[App] =
conditional[App](_.gpus > 0) {
isTrue[App]("GPU resources only work with the Mesos containerizer") { app =>
!app.container.exists(_.`type` == EngineType.Docker)
} and featureEnabled(enabledFeatures, Features.GPU_RESOURCES)
}
private def portIndexIsValid(hostPortsIndices: Range): Validator[AppHealthCheck] = {
val marathonProtocols = Set(AppHealthCheckProtocol.Http, AppHealthCheckProtocol.Https, AppHealthCheckProtocol.Tcp)
isTrue("Health check port indices must address an element of the ports array or container port mappings.") { check =>
if (check.command.isEmpty && marathonProtocols.contains(check.protocol)) {
check.portIndex match {
case Some(idx) => hostPortsIndices.contains(idx)
case _ => check.port.nonEmpty || (hostPortsIndices.length == 1 && hostPortsIndices.headOption.contains(0))
}
} else {
true
}
}
}
private def complyWithIpProtocolRules(container: Option[Container]): Validator[AppHealthCheck] =
isTrue(AppValidationMessages.HealthCheckIpProtocolLimitation) { healthCheck =>
def isMesosHttpHealthCheck: Boolean = healthCheck.protocol match {
case AppHealthCheckProtocol.MesosHttp | AppHealthCheckProtocol.MesosHttps | AppHealthCheckProtocol.MesosTcp => true
case _ => false
}
def isDockerContainer = container.exists(c => c.`type` == EngineType.Docker)
val hasDefaultIpProtocol = healthCheck.ipProtocol == IpProtocol.Ipv4
hasDefaultIpProtocol || (isMesosHttpHealthCheck && isDockerContainer)
}
private val haveAtMostOneMesosHealthCheck: Validator[App] = {
val mesosProtocols = Set(
AppHealthCheckProtocol.Command,
AppHealthCheckProtocol.MesosHttp,
AppHealthCheckProtocol.MesosHttps,
AppHealthCheckProtocol.MesosTcp)
isTrue[App]("AppDefinition can contain at most one Mesos health check") { app =>
val mesosCommandHealthChecks = app.healthChecks.count(_.command.nonEmpty)
val allMesosHealthChecks = app.healthChecks.count { check =>
check.command.nonEmpty || mesosProtocols.contains(check.protocol)
}
// Previous versions of Marathon allowed saving an app definition with more than one command health check, and
// we don't want to make them invalid
allMesosHealthChecks - mesosCommandHealthChecks <= 1
}
}
private val containsCmdArgsOrContainer: Validator[App] =
isTrue("AppDefinition must either contain one of 'cmd' or 'args', and/or a 'container'.") { app =>
val cmd = app.cmd.nonEmpty
val args = app.args.nonEmpty
val container = app.container.exists { ct =>
(ct.docker, ct.appc, ct.`type`) match {
case (Some(_), None, EngineType.Docker) |
(Some(_), None, EngineType.Mesos) |
(None, Some(_), EngineType.Mesos) => true
case _ => false
}
}
(cmd ^ args) || (!(cmd && args) && container)
}
}
object AppValidation extends AppValidation
object AppValidationMessages {
// Note: we should keep this in sync with PodsValidationMessages
val NetworkNameRequiredForMultipleContainerNetworks =
"networkNames must be a single item list when hostPort is specified and more than 1 container network is defined"
val DockerEngineLimitedToSingleContainerNetwork =
"may only specify a single container network when using the Docker container engine"
val HealthCheckIpProtocolLimitation =
"IPv6 can only be used for container type DOCKER and Mesos http/https/tcp health checks"
}
| gsantovena/marathon | src/main/scala/mesosphere/marathon/api/v2/validation/AppValidation.scala | Scala | apache-2.0 | 23,361 |
/**
* Copyright 2015 Thomson Reuters
*
* Licensed under the Apache License, Version 2.0 (the “License”); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an “AS IS” BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.util.concurrent.TimeoutException
import akka.NotUsed
import akka.stream.scaladsl.Source
import akka.util.ByteString
import cmwell.domain._
import cmwell.formats.{FormatExtractor, Formatter}
import cmwell.fts._
import cmwell.tracking.PathStatus
import cmwell.util.collections._
import cmwell.util.concurrent.{SimpleScheduler, travset}
import cmwell.web.ld.cmw.CMWellRDFHelper
import cmwell.web.ld.exceptions.{UnretrievableIdentifierException, UnsupportedURIException}
import cmwell.ws.Settings
import cmwell.ws.util.{ExpandGraphParser, FieldNameConverter, PathGraphExpansionParser}
import com.typesafe.scalalogging.LazyLogging
import controllers.SpaMissingException
import filters.Attrs
import ld.cmw.PassiveFieldTypesCache
import ld.exceptions.{BadFieldTypeException, ConflictingNsEntriesException, ServerComponentNotAvailableException, TooManyNsRequestsException}
import logic.CRUDServiceFS
import org.joda.time.DateTime
import org.joda.time.format.ISODateTimeFormat
import play.api.http.{HttpChunk, HttpEntity}
import play.api.libs.json.Json
import play.api.mvc.Results._
import play.api.mvc.{Headers, Request, ResponseHeader, Result}
import play.utils.InvalidUriEncodingException
import scala.collection.breakOut
import scala.concurrent.duration.{DurationInt, FiniteDuration}
import scala.concurrent.{ExecutionContext, Future, Promise}
import scala.util.{Failure, Success, Try}
package object wsutil extends LazyLogging {
val Uuid = "([a-f0-9]{32})".r
val zeroTime = new DateTime(0L)
lazy val dtf = ISODateTimeFormat.dateTime()
/**
* Normalize Path.
*
* {{{
* # Scala REPL style
* scala> wsutil.normalizePath("")
* res0: String = /
*
* scala> wsutil.normalizePath("/")
* res1: String = /
*
* scala> wsutil.normalizePath("/xyz/")
* res2: String = /xyz
*
* scala> wsutil.normalizePath("xyz/")
* res3: String = /xyz
*
* scala> wsutil.normalizePath("/x/yz/")
* res4: String = /x/yz
*
* scala> wsutil.normalizePath("x/yz/")
* res5: String = /x/yz
*
* scala> wsutil.normalizePath("/x//yz/")
* res6: String = /x/yz
*
* scala> wsutil.normalizePath("x//yz/")
* res7: String = /x/yz
*
* scala> wsutil.normalizePath("/x///yz/")
* res8: String = /x/yz
*
* scala> wsutil.normalizePath("x///yz/")
* res9: String = /x/yz
*
* scala> wsutil.normalizePath("/x/yz//////")
* res10: String = /x/yz
*
* scala> wsutil.normalizePath("/////x/yz//////")
* res11: String = /x/yz
*
* scala> wsutil.normalizePath("x/yz//////")
* res12: String = /x/yz
*
* scala> wsutil.normalizePath("/////x////yz//////")
* res13: String = /x/yz
*
* scala> wsutil.normalizePath("x////yz//////")
* res14: String = /x/yz
*
* scala> wsutil.normalizePath("/////x////y/z//////")
* res15: String = /x/y/z
*
* scala> wsutil.normalizePath("x////y/z//////")
* res16: String = /x/y/z
*
* scala> wsutil.normalizePath("/xyz")
* res17: String = /xyz
*
* scala> wsutil.normalizePath("xyz")
* res18: String = /xyz
*
* scala> wsutil.normalizePath("/x/yz")
* res19: String = /x/yz
*
* scala> wsutil.normalizePath("x/yz")
* res20: String = /x/yz
*
* scala> wsutil.normalizePath("/x//yz")
* res21: String = /x/yz
*
* scala> wsutil.normalizePath("x//yz")
* res22: String = /x/yz
*
* scala> wsutil.normalizePath("/x///yz")
* res23: String = /x/yz
*
* scala> wsutil.normalizePath("x///yz")
* res24: String = /x/yz
*
* scala> wsutil.normalizePath("/////x/yz")
* res25: String = /x/yz
*
* scala> wsutil.normalizePath("/////x////yz")
* res26: String = /x/yz
*
* scala> wsutil.normalizePath("x////yz")
* res27: String = /x/yz
*
* scala> wsutil.normalizePath("/////x////y/z")
* res28: String = /x/y/z
*
* scala> wsutil.normalizePath("x////y/z")
* res29: String = /x/y/z
*
* scala> wsutil.normalizePath("//x/y/z")
* res30: String = /x/y/z
* }}}
*
* OK, so this is an overkill. (thanks Mark...!!!)
* original code was:
* if(path.length > 1 && path.charAt(path.length-1) == '/') path.dropRight(1) else path
*
* but it wasn't good enough, so we changed it to the following idiomatic code:
* if (path.forall(_ == '/')) "/"
* else if (path.length > 1 && path.last == '/') path.dropTrailingChars('/')
* else path
*
* but then, Mark said it can be optimized further.
* now (after optimization challenge accepted),
* we resulted with the following bloated code.
* if you read whole this comment, please add at list one doctest above.
*/
def normalizePath(path: String): String = {
var i, j: Int = 0
var k: Int = path.length
var lastIsSlash: Boolean = false
var last2AreSlash: Boolean = false
var initialized: Boolean = false
var starting: Boolean = true
val pre: String = if (path.isEmpty || path.head != '/') "/" else ""
var chr: Char = '\\n'
lazy val sb = new StringBuilder(path.length)
while (i < path.length) {
chr = path(i)
if (chr == '/') {
if (!starting) {
if (initialized) {
if (!lastIsSlash) {
lastIsSlash = true
}
} else {
if (lastIsSlash) {
last2AreSlash = true
} else {
lastIsSlash = true
k = i
}
}
}
} else {
if (!starting) {
if (initialized) {
if (lastIsSlash) {
sb += '/'
}
sb += chr
} else {
if (last2AreSlash) {
sb ++= pre
sb ++= path.substring(j, k + 1)
sb += chr
initialized = true
last2AreSlash = false
}
}
} else {
starting = false
if (i == 0) { //if path didn't start with a slash
j = 0
} else {
j = i - 1
}
}
lastIsSlash = false
}
i += 1
}
if (initialized) sb.mkString
else if (lastIsSlash) pre + path.substring(j, k)
else pre + path.substring(j, i)
}
implicit class StringExtensions(s: String) {
def dropRightWhile(p: (Char) => Boolean): String = s.dropRight(s.reverseIterator.takeWhile(p).length)
def dropTrailingChars(c: Char*): String = dropRightWhile(c.toSet[Char](_))
}
def overrideMimetype(default: String, req: Request[_]): (String, String) =
overrideMimetype(default, req.getQueryString("override-mimetype"))
def overrideMimetype(default: String, overrideMime: Option[String]): (String, String) = overrideMime match {
case Some(mimetype) => (play.api.http.HeaderNames.CONTENT_TYPE, mimetype)
case _ => (play.api.http.HeaderNames.CONTENT_TYPE, default)
}
def filterInfoton(f: SingleFieldFilter, i: Infoton): Boolean = {
require(f.valueOperator == Contains || f.valueOperator == Equals, s"unsupported ValueOperator: ${f.valueOperator}")
val valOp: (String, String) => Boolean = f.valueOperator match {
case Contains =>
(infotonValue, inputValue) =>
infotonValue.contains(inputValue)
case Equals =>
(infotonValue, inputValue) =>
infotonValue == inputValue
case _ => ???
}
f.fieldOperator match {
case Should | Must =>
i.fields
.flatMap(_.get(f.name).map(_.exists(fv => f.value.forall(v => valOp(fv.value.toString, v)))))
.getOrElse(false)
case MustNot =>
i.fields
.flatMap(_.get(f.name).map(_.forall(fv => !f.value.exists(v => valOp(fv.value.toString, v)))))
.getOrElse(true)
}
}
type RawField[Op <: FieldValeOperator] = (Op, Either[UnresolvedFieldKey, DirectFieldKey])
sealed trait RawAggregationFilter
case class RawStatsAggregationFilter(name: String = "Statistics Aggregation", field: RawField[FieldValeOperator])
extends RawAggregationFilter
case class RawTermAggregationFilter(name: String = "Term Aggregation",
field: RawField[FieldValeOperator],
size: Int = 10,
subFilters: Seq[RawAggregationFilter] = Seq.empty)
extends RawAggregationFilter
case class RawHistogramAggregationFilter(name: String = "Histogram Aggregation",
field: RawField[FieldValeOperator],
interval: Int,
minDocCount: Int,
extMin: Option[Long],
extMax: Option[Long],
subFilters: Seq[RawAggregationFilter] = Seq.empty)
extends RawAggregationFilter
case class RawSignificantTermsAggregationFilter(name: String = "Signigicant Terms Aggregation",
field: RawField[FieldValeOperator],
backgroundTerm: Option[(String, String)],
minDocCount: Int,
size: Int,
subFilters: Seq[RawAggregationFilter] = Seq.empty)
extends RawAggregationFilter
case class RawCardinalityAggregationFilter(name: String,
field: RawField[FieldValeOperator],
precisionThreshold: Option[Long])
extends RawAggregationFilter
object RawAggregationFilter {
private[this] val lbo = scala.collection.breakOut[Set[String], AggregationFilter, List[AggregationFilter]]
private def uniq(fn: String, name: String) = {
if (fn.length > 1 && fn.tail.head == '$') s"-${fn.head}- $name"
else "-s- " + name
}
def eval(af: RawAggregationFilter,
cache: PassiveFieldTypesCache,
cmwellRDFHelper: CMWellRDFHelper,
timeContext: Option[Long])(implicit ec: ExecutionContext): Future[List[AggregationFilter]] = af match {
case RawStatsAggregationFilter(name, (op, fk)) =>
FieldKey.eval(fk, cache, cmwellRDFHelper, timeContext).map { fns =>
fns.map { fn =>
val uname = {
if (fns.size == 1) name
else uniq(fn, name)
}
StatsAggregationFilter(uname, Field(op, fn))
}(lbo)
}
case RawTermAggregationFilter(name, (op, fk), size, rawSubFilters) if rawSubFilters.nonEmpty => {
val ff = FieldKey.eval(fk, cache, cmwellRDFHelper, timeContext)
Future.traverse(rawSubFilters)(eval(_, cache, cmwellRDFHelper, timeContext)).flatMap { subFilters =>
ff.map { fns =>
fns.map { fn =>
val uname = {
if (fns.size == 1) name
else uniq(fn, name)
}
TermAggregationFilter(uname, Field(op, fn), size, subFilters.flatten)
}(lbo)
}
}
}
case RawTermAggregationFilter(name, (op, fk), size, rawSubFilters) if rawSubFilters.isEmpty =>
FieldKey.eval(fk, cache, cmwellRDFHelper, timeContext).map { fns =>
fns.map { fn =>
val uname = {
if (fns.size == 1) name
else uniq(fn, name)
}
TermAggregationFilter(uname, Field(op, fn), size)
}(lbo)
}
case RawHistogramAggregationFilter(name, (op, fk), interval, minDocCount, extMin, extMax, rawSubFilters)
if rawSubFilters.nonEmpty => {
val ff = FieldKey.eval(fk, cache, cmwellRDFHelper, timeContext)
Future.traverse(rawSubFilters)(eval(_, cache, cmwellRDFHelper, timeContext)).flatMap { subFilters =>
ff.map { fns =>
fns.map { fn =>
val uname = {
if (fns.size == 1) name
else uniq(fn, name)
}
HistogramAggregationFilter(uname,
Field(op, fn),
interval,
minDocCount,
extMin,
extMax,
subFilters.flatten)
}(lbo)
}
}
}
case RawHistogramAggregationFilter(name, (op, fk), interval, minDocCount, extMin, extMax, rawSubFilters)
if rawSubFilters.isEmpty =>
FieldKey.eval(fk, cache, cmwellRDFHelper, timeContext).map { fns =>
fns.map { fn =>
val uname = {
if (fns.size == 1) name
else uniq(fn, name)
}
HistogramAggregationFilter(uname, Field(op, fn), interval, minDocCount, extMin, extMax)
}(lbo)
}
case RawSignificantTermsAggregationFilter(name, (op, fk), None, minDocCount, size, rawSubFilters)
if rawSubFilters.nonEmpty => {
val ff = FieldKey.eval(fk, cache, cmwellRDFHelper, timeContext)
Future.traverse(rawSubFilters)(eval(_, cache, cmwellRDFHelper, timeContext)).flatMap { subFilters =>
ff.map { fns =>
fns.map { fn =>
val uname = {
if (fns.size == 1) name
else uniq(fn, name)
}
SignificantTermsAggregationFilter(uname, Field(op, fn), None, minDocCount, size, subFilters.flatten)
}(lbo)
}
}
}
case RawSignificantTermsAggregationFilter(name, (op, fk), None, minDocCount, size, rawSubFilters)
if rawSubFilters.isEmpty =>
FieldKey.eval(fk, cache, cmwellRDFHelper, timeContext).map { fns =>
fns.map { fn =>
val uname = {
if (fns.size == 1) name
else uniq(fn, name)
}
SignificantTermsAggregationFilter(uname, Field(op, fn), None, minDocCount, size)
}(lbo)
}
//TODO: backgroundTerms should also be unevaluated FieldKey. need to fix the parser.
case RawSignificantTermsAggregationFilter(_, _, Some(_), _, _, _) => ???
case RawCardinalityAggregationFilter(name, (op, fk), precisionThreshold) =>
FieldKey.eval(fk, cache, cmwellRDFHelper, timeContext).map { fns =>
fns.map { fn =>
val uname = {
if (fns.size == 1) name
else uniq(fn, name)
}
CardinalityAggregationFilter(uname, Field(op, fn), precisionThreshold)
}(lbo)
}
}
}
trait FieldPattern
trait NsPattern
case class HashedNsPattern(hash: String) extends NsPattern
trait ResolvedNsPattern extends NsPattern {
def resolve(cmwellRDFHelper: CMWellRDFHelper, timeContext: Option[Long])(
implicit ec: ExecutionContext
): Future[String]
}
case class NsUriPattern(nsUri: String) extends ResolvedNsPattern {
override def resolve(cmwellRDFHelper: CMWellRDFHelper, timeContext: Option[Long])(implicit ec: ExecutionContext) =
cmwellRDFHelper.urlToHashAsync(nsUri, timeContext)
}
case class PrefixPattern(prefix: String) extends ResolvedNsPattern {
override def resolve(cmwellRDFHelper: CMWellRDFHelper, timeContext: Option[Long])(implicit ec: ExecutionContext) =
cmwellRDFHelper.getIdentifierForPrefixAsync(prefix, timeContext)
}
case object JokerPattern extends FieldPattern
case class NsWildCard(nsPattern: NsPattern) extends FieldPattern
case class FieldKeyPattern(fieldKey: Either[UnresolvedFieldKey, DirectFieldKey]) extends FieldPattern
case class FilteredField[FP <: FieldPattern](fieldPattern: FP, rawFieldFilterOpt: Option[RawFieldFilter])
case class LevelExpansion(filteredFields: List[FilteredField[FieldPattern]])
sealed abstract class DirectedExpansion
case class ExpandUp(filteredFields: List[FilteredField[FieldKeyPattern]]) extends DirectedExpansion
case class ExpandIn(filteredFields: List[FilteredField[FieldPattern]]) extends DirectedExpansion
case class PathExpansion(pathSegments: List[DirectedExpansion])
case class PathsExpansion(paths: List[PathExpansion])
//Some convenience methods & types
def getByPath(protocol: String, path: String, crudServiceFS: CRUDServiceFS)(implicit ec: ExecutionContext): Future[Infoton] =
crudServiceFS.irwService.readPathAsync(path, crudServiceFS.level).map(_.getOrElse(GhostInfoton.ghost(protocol, path)))
type F[X] = (X, Option[List[RawFieldFilter]])
type EFX = Either[F[Future[Infoton]], F[Infoton]]
def filterByRawFieldFiltersTupled(
cache: PassiveFieldTypesCache,
cmwellRDFHelper: CMWellRDFHelper,
timeContext: Option[Long]
)(tuple: (Infoton, Option[List[RawFieldFilter]]))(implicit ec: ExecutionContext): Future[Boolean] = tuple match {
case (i, None) => Future.successful(true)
case (i, Some(filters)) => filterByRawFieldFilters(cache, cmwellRDFHelper, timeContext)(i, filters)
}
def filterByRawFieldFilters(
cache: PassiveFieldTypesCache,
cmwellRDFHelper: CMWellRDFHelper,
timeContext: Option[Long]
)(infoton: Infoton, filters: List[RawFieldFilter])(implicit ec: ExecutionContext): Future[Boolean] = {
val p = Promise[Boolean]()
val futures = for {
filter <- filters
future = filterByRawFieldFilter(infoton, filter, cache, cmwellRDFHelper, timeContext)
} yield
future.andThen {
case Success(true) if !p.isCompleted => p.trySuccess(true)
}
cmwell.util.concurrent.successes(futures).foreach {
case Nil =>
Future.traverse(futures)(_.failed).foreach { err =>
p.tryFailure(new cmwell.util.exceptions.MultipleFailures(err))
}
case list =>
if (!p.isCompleted) {
p.trySuccess(list.exists(identity))
}
}
p.future
}
// //if needed: TODO: IMPLEMENT!!!!!
// def filterByRawFieldFilter(infoton: Infoton, filter: RawFieldFilter): Future[Boolean] = filter match {
// case RawSingleFieldFilter(fo,vo,fk,v) => fk.internalKey.map{ internalFieldName =>
// (fo,vo) match {
// case (Must|Should,op) => infoton.fields.exists(_.exists {
// case (fieldName,values) => fieldName == internalFieldName && v.fold(true){ value =>
// op match {
// case Equals => values.exists(_.toString == value)
// case Contains => values.exists(_.toString.contains(value))
// }
// }
// })
// }
// }
// }
def filterByRawFieldFilter(infoton: Infoton,
filter: RawFieldFilter,
cache: PassiveFieldTypesCache,
cmwellRDFHelper: CMWellRDFHelper,
timeContext: Option[Long])(implicit ec: ExecutionContext): Future[Boolean] =
RawFieldFilter.eval(filter, cache, cmwellRDFHelper, timeContext).transform {
case Success(ff) => Try(ff.filter(infoton).value)
case Failure(_: NoSuchElementException) => Success(false)
case failure => failure.asInstanceOf[Failure[Boolean]]
}
def expandIn(filteredFields: List[FilteredField[FieldPattern]],
infotonsToExpand: Seq[Infoton],
infotonsRetrievedCache: Map[String, Infoton],
cmwellRDFHelper: CMWellRDFHelper,
cache: PassiveFieldTypesCache,
timeContext: Option[Long])(implicit ec: ExecutionContext): Future[(Seq[Infoton], Seq[Infoton])] = {
val expansionFuncsFut = Future.traverse(filteredFields) {
case FilteredField(JokerPattern, rffo) =>
Future.successful({ (internalFieldName: String) =>
true
} -> rffo)
case FilteredField(FieldKeyPattern(Right(dfk)), rffo) => Future.successful((dfk.internalKey == _, rffo))
case FilteredField(FieldKeyPattern(Left(rfk)), rffo) =>
FieldKey.resolve(rfk, cmwellRDFHelper, timeContext).map(fk => (fk.internalKey == _, rffo))
case FilteredField(NsWildCard(HashedNsPattern(hash)), rffo) =>
Future.successful({ (internalFieldName: String) =>
internalFieldName.endsWith(s".$hash")
} -> rffo)
case FilteredField(NsWildCard(rnp: ResolvedNsPattern), rffo) =>
rnp
.resolve(cmwellRDFHelper, timeContext)
.map(hash => { (internalFieldName: String) =>
internalFieldName.endsWith(s".$hash")
} -> rffo)
case x @ FilteredField(_, _) => logger.error(s"Unexpected input. Received: $x"); ???
}
expansionFuncsFut.flatMap { funs =>
// all the infotons' fields
val fieldsMaps = infotonsToExpand.map(_.fields).collect { case Some(m) => m }
// maps reduced into 1 aggregated fields `Map`
val fieldsReduced = if (fieldsMaps.nonEmpty) fieldsMaps.reduce { (m1, m2) =>
m1.foldLeft(m2) {
case (acc, (fieldName, values)) if acc.contains(fieldName) =>
acc.updated(fieldName, acc(fieldName).union(values))
case (acc, fieldNameValsTuple) => acc + fieldNameValsTuple
}
} else Map.empty[String, Set[FieldValue]]
// build a list that pairs cmwell paths to retrieval with raw field filters to be applied on
val cmwPathFilterOptionPairs = fieldsReduced.flatMap {
case (fieldName, values) =>
funs.flatMap {
case (func, _) if !func(fieldName) => Nil
case (_, rffo) =>
values.collect {
case fr: FReference => (fr.getProtocol -> normalizePath(fr.getCmwellPath)) -> rffo
}(breakOut[Set[FieldValue], ((String,String), Option[RawFieldFilter]), List[((String,String), Option[RawFieldFilter])]])
}
}(
breakOut[Map[String, Set[FieldValue]], ((String,String), Option[RawFieldFilter]), List[((String,String), Option[RawFieldFilter])]]
)
// value are `Option[List[...]]` because `None` means no filtering (pass all)
// which is different from emptylist which means at least 1 filter should apply (i.e: block all)
val pathToFiltersMap = cmwPathFilterOptionPairs.groupBy(_._1).mapValues(ps => Option.sequence(ps.unzip._2)).toSeq
// get infotons from either `infotonsRetrievedCache` or from cassandra, and pair with filters option
val (l, r) = partitionWith(pathToFiltersMap) {
case ((protocol,path), rffso) => {
infotonsRetrievedCache.get(path).fold[EFX](Left(getByPath(protocol, path, cmwellRDFHelper.crudServiceFS) -> rffso)) {
i =>
Right(i -> rffso)
}
}
}
// after retrieval, filter out what is not needed
val lInfotonsFut = Future
.traverse(l) {
case (fi, None) => fi.map(Some.apply)
case (fi, Some(filters)) =>
fi.flatMap {
case i =>
filterByRawFieldFilters(cache, cmwellRDFHelper, timeContext)(i, filters).map {
case true => Some(i)
case false => None
}
}
}
.map(_.collect { case Some(i) => i })
// also filter the infotons retrieved from "cache"
val rInfotonsFut = Future
.traverse(r) {
case t @ (i, _) =>
filterByRawFieldFiltersTupled(cache, cmwellRDFHelper, timeContext)(t).map {
case true => Some(i)
case false => None
}
}
.map(_.collect { case Some(i) => i })
// combine results
lInfotonsFut.zip(rInfotonsFut)
}
}
def expandUp(filteredFields: List[FilteredField[FieldKeyPattern]],
population: Seq[Infoton],
cmwellRDFHelper: CMWellRDFHelper,
cache: Map[String, Infoton],
typesCache: PassiveFieldTypesCache,
infotonsSample: Seq[Infoton],
pattern: String,
chunkSize: Int,
timeContext: Option[Long])(implicit ec: ExecutionContext): Future[(Seq[Infoton], Seq[Infoton])] = {
def mkFieldFilters2(ff: FilteredField[FieldKeyPattern],
outerFieldOperator: FieldOperator,
pathsAndProtocols: List[(String,String)]): Future[FieldFilter] = {
val FilteredField(fkp, rffo) = ff
val internalFieldNameFut = fkp match {
case FieldKeyPattern(Right(dfk)) => Future.successful(dfk.internalKey)
case FieldKeyPattern(Left(unfk)) => FieldKey.resolve(unfk, cmwellRDFHelper, timeContext).map(_.internalKey)
}
val filterFut: Future[FieldFilter] = internalFieldNameFut.map { internalFieldName =>
if(pathsAndProtocols.isEmpty) {
val sb = new StringBuilder
sb ++= "empty urls in expandUp("
sb ++= filteredFields.toString
sb ++= ",population[size="
sb ++= population.size.toString
sb ++= "],cache[size="
sb ++= cache.size.toString
sb ++= "])\\nfor pattern: "
sb ++= pattern
sb ++= "\\nand infotons.take(3) = "
sb += '['
infotonsSample.headOption.foreach { i =>
sb ++= i.toString
infotonsSample.tail.foreach { j =>
sb += ','
sb ++= j.toString
}
}
sb += ']'
throw new IllegalStateException(sb.result())
} else {
val shoulds = pathsAndProtocols.flatMap { case (path,protocol) => pathToUris(protocol, path) }.
map(url => SingleFieldFilter(Should, Equals, internalFieldName, Some(url)))
MultiFieldFilter(rffo.fold[FieldOperator](outerFieldOperator)(_ => Must), shoulds)
}
}
rffo.fold[Future[FieldFilter]](filterFut) { rawFilter =>
RawFieldFilter.eval(rawFilter, typesCache, cmwellRDFHelper, timeContext).flatMap { filter =>
filterFut.map(ff => MultiFieldFilter(outerFieldOperator, List(ff, filter)))
}
}
}
Future
.traverse(population.grouped(chunkSize)) { infotonsChunk =>
val pathsAndProtocols: List[(String,String)] = infotonsChunk.map { i =>
i.path -> i.protocol.getOrElse(cmwell.common.Settings.defaultProtocol)
}(breakOut)
val fieldFilterFut = filteredFields match {
case Nil =>
val i = infotonsSample.mkString("[", ",", "]")
val c = cache.size
val p = population.size
throw new IllegalStateException(s"expandUp($filteredFields,population[size=$p],cache[size=$c])\\nfor pattern: $pattern\\nand infotons.take(3) = $i")
case ff :: Nil => mkFieldFilters2(ff, Must, pathsAndProtocols)
case _ => Future.traverse(filteredFields)(mkFieldFilters2(_, Should, pathsAndProtocols)).map(MultiFieldFilter(Must, _))
}
fieldFilterFut.transformWith {
case Failure(_: NoSuchElementException) => Future.successful(Nil -> Nil)
case Success(ffs) =>
cmwellRDFHelper.crudServiceFS
.thinSearch(None,
Some(ffs),
None,
PaginationParams(0, Settings.expansionLimit),
withHistory = false,
NullSortParam,
debugInfo = false,
withDeleted = false)
.flatMap(sr => {
val (inCache, toFetch) = sr.thinResults.partition(i => cache.contains(i.path))
cmwellRDFHelper.crudServiceFS.getInfotonsByUuidAsync(toFetch.map(_.uuid)).map {
_ -> inCache.map(i => cache(i.path))
}
})
case anotherFailures => Future.fromTry(anotherFailures.asInstanceOf[Try[(Seq[Infoton], Seq[Infoton])]])
}
}
.map {
case tuples if tuples.isEmpty => (Seq.empty, Seq.empty)
case tuples =>
tuples.reduce[(Seq[Infoton], Seq[Infoton])] {
case ((la, ra), (lb, rb)) => (la ++ lb) -> (ra ++ rb)
}
}
}
def deepExpandGraph(xgPattern: String,
infotons: Seq[Infoton],
cmwellRDFHelper: CMWellRDFHelper,
cache: PassiveFieldTypesCache,
timeContext: Option[Long])(implicit ec: ExecutionContext): Future[(Boolean, Seq[Infoton])] = {
def expandDeeper(expanders: List[LevelExpansion],
infotonsToExpand: Seq[Infoton],
infotonsRetrievedCache: Map[String, Infoton]): Future[(Boolean, Seq[Infoton])] = expanders match {
case Nil => Future.successful(true -> infotonsRetrievedCache.values.filterNot(_.isInstanceOf[GhostInfoton]).toSeq)
case f :: fs if infotonsRetrievedCache.size > Settings.expansionLimit =>
Future.successful(false -> infotonsRetrievedCache.values.toSeq)
case f :: fs => {
expandIn(f.filteredFields, infotonsToExpand, infotonsRetrievedCache, cmwellRDFHelper, cache, timeContext)
.flatMap {
case (lInfotons, rInfotons) =>
expandDeeper(
fs,
lInfotons ++ rInfotons,
infotonsRetrievedCache ++ lInfotons.map(i => i.path -> i)(
scala.collection.breakOut[Seq[Infoton], (String, Infoton), Map[String, Infoton]]
)
)
}
}
}
val t = ExpandGraphParser.getLevelsExpansionFunctions(xgPattern).map { fs =>
expandDeeper(fs, infotons, infotons.map(i => i.path -> i).toMap)
}
t match {
case Success(future) => future
case Failure(error) => Future.failed(error)
}
}
def gqpFilter(gqpPattern: String,
infotons: Seq[Infoton],
cmwellRDFHelper: CMWellRDFHelper,
typesCache: PassiveFieldTypesCache,
chunkSize: Int,
timeContext: Option[Long])(implicit ec: ExecutionContext): Future[Seq[Infoton]] = {
logger.trace(s"gqpFilter with infotons: [${infotons.map(_.path).mkString(", ")}]")
def filterByDirectedExpansion(
dexp: DirectedExpansion
)(iv: (Infoton, Vector[Infoton])): Future[(Infoton, Vector[Infoton])] = {
logger.trace(
s"filterByDirectedExpansion($dexp): with original[${iv._1.path}] and current-pop[${iv._2.map(_.path).mkString(", ")}]"
)
dexp match {
case ExpandIn(filteredFields) =>
expandIn(
filteredFields,
iv._2,
iv._2.map(i => i.path -> i)(
scala.collection.breakOut[Vector[Infoton], (String, Infoton), Map[String, Infoton]]
),
cmwellRDFHelper,
typesCache,
timeContext
).map {
case (l, r) =>
iv._1 -> {
val rv = l.toVector ++ r
logger.trace(
s"filterByDirectedExpansion($dexp): after expandIn($filteredFields), finished with result[${rv.map(_.path).mkString(", ")}]"
)
rv
}
}
case ExpandUp(filteredFields) =>
expandUp(
filteredFields,
iv._2,
cmwellRDFHelper,
iv._2.map(i => i.path -> i)(
scala.collection.breakOut[Vector[Infoton], (String, Infoton), Map[String, Infoton]]
),
typesCache,
infotons.take(3),
gqpPattern,
chunkSize,
timeContext
).map {
case (l, r) =>
iv._1 -> {
val rv = l.toVector ++ r
logger.trace(
s"filterByDirectedExpansion($dexp): after expandIn($filteredFields), finished with result[${rv.map(_.path).mkString(", ")}]"
)
rv
}
}
}
}
def nextFilteringHop(dexp: DirectedExpansion,
dexps: List[DirectedExpansion],
survivors: Vector[(Infoton, Vector[Infoton])]): Future[Vector[Infoton]] = {
Future
.traverse(survivors)(filterByDirectedExpansion(dexp))
.flatMap { s =>
val newSurvivors = s.filter(_._2.nonEmpty)
if (newSurvivors.isEmpty || dexps.isEmpty) Future.successful(newSurvivors.map(_._1))
else nextFilteringHop(dexps.head, dexps.tail, newSurvivors)
}
.andThen {
case Success(is) =>
logger.trace(s"nextFilteringHop: finished with survivors[${is.map(_.path).mkString(", ")}]")
case Failure(ex) => logger.error(s"nextFilteringHop($dexp,$dexps,$survivors)", ex)
}
}
if (gqpPattern.isEmpty) Future.successful(infotons)
else
PathGraphExpansionParser.getGQPs(gqpPattern).map {
case PathsExpansion(paths) =>
paths.foldLeft(Future.successful(Vector.empty[Infoton])) {
case (vecFut, PathExpansion(segments)) if segments.isEmpty => vecFut
case (vecFut, PathExpansion(segments)) =>
vecFut.flatMap { vec =>
val candidates: Vector[(Infoton, Vector[Infoton])] = infotons.collect {
case i if !vec.contains(i) =>
i -> Vector(i)
}(breakOut[Seq[Infoton], (Infoton, Vector[Infoton]), Vector[(Infoton, Vector[Infoton])]])
logger.trace(s"appending: [${segments.mkString(", ")}] to vec[${vec
.map(_.path)
.mkString(", ")}] with candidates[${candidates.map(_._1.path).mkString(", ")}]")
nextFilteringHop(segments.head, segments.tail, candidates).map(_ ++: vec)
}
}
} match {
case Success(future) => future
case Failure(error) => Future.failed(error)
}
}
def pathExpansionParser(ygPattern: String,
infotons: Seq[Infoton],
chunkSize: Int,
cmwellRDFHelper: CMWellRDFHelper,
typesCache: PassiveFieldTypesCache,
timeContext: Option[Long])(implicit ec: ExecutionContext): Future[(Boolean, Seq[Infoton])] = {
type Expander = (DirectedExpansion, List[DirectedExpansion], Seq[Infoton])
/**
*
* @param infoCachedOrRetrieved
* @param tail
* @return
*/
def adjustResults(infoCachedOrRetrieved: (Seq[Infoton], Seq[Infoton]),
tail: List[DirectedExpansion]): (Option[Expander], Seq[Infoton]) = {
val (retrieved, cached) = infoCachedOrRetrieved
tail.headOption.map[Expander](h => (h, tail.tail, cached ++ retrieved)) -> retrieved
}
def expandDeeper(expanders: List[Option[Expander]],
cache: Map[String, Infoton]): Future[(Boolean, Seq[Infoton])] = {
if (expanders.forall(_.isEmpty))
Future.successful(true -> cache.values.filterNot(_.isInstanceOf[GhostInfoton]).toSeq)
else if (cache.count(!_._2.isInstanceOf[GhostInfoton]) > Settings.expansionLimit)
Future.successful(false -> cache.values.toSeq)
else
Future
.traverse(expanders) {
case None => Future.successful(None -> Seq.empty)
case Some((ExpandIn(ffs), tail, population)) =>
expandIn(ffs, population, cache, cmwellRDFHelper, typesCache, timeContext).map(
newPop => adjustResults(newPop, tail)
)
case Some((ExpandUp(ffs), tail, population)) =>
expandUp(ffs,
population,
cmwellRDFHelper,
cache,
typesCache,
infotons.take(3),
ygPattern,
chunkSize,
timeContext).map(newPop => adjustResults(newPop, tail))
}
.flatMap { expanderRetrievedInfotonPairs =>
val (newExpanders, retrievedInfotons) = expanderRetrievedInfotonPairs.unzip
val newCache = retrievedInfotons.foldLeft(cache) {
case (accache, additions) => accache ++ additions.map(i => i.path -> i)
}
expandDeeper(newExpanders, newCache)
}
}
PathGraphExpansionParser.getPathsExpansionFunctions(ygPattern).map {
case PathsExpansion(paths) => {
val perPathHeadTail = paths.map {
case PathExpansion(segments) =>
segments.headOption.map { head =>
(head, segments.tail, infotons)
}
}
expandDeeper(perPathHeadTail,
infotons.map(i => i.path -> i)(
scala.collection.breakOut[Seq[Infoton], (String, Infoton), Map[String, Infoton]]
))
}
} match {
case Success(future) => future
case Failure(error) => Future.failed(error)
}
}
def isPathADomain(path: String): Boolean = path.dropWhile(_ == '/').takeWhile(_ != '/').contains('.')
def pathToUris(protocol: String, path: String): Seq[String] = {
if (isPathADomain(path))
List(s"http:/$path", s"https:/$path") //TODO When it is safe to undo WombatUpdate2, return this: s"$protocol:/$path"
else List(s"cmwell:/$path")
}
def exceptionToResponse(throwable: Throwable): Result = {
val (status, eHandler): (Status, Throwable => String) = throwable match {
case _: ServerComponentNotAvailableException => ServiceUnavailable -> { _.getMessage }
case _: TooManyNsRequestsException => ServiceUnavailable -> { _.getMessage }
case _: ConflictingNsEntriesException => ExpectationFailed -> { _.getMessage }
case _: TimeoutException => ServiceUnavailable -> { _.getMessage }
case _: SpaMissingException => ServiceUnavailable -> { _.getMessage }
case _: UnretrievableIdentifierException => UnprocessableEntity -> { _.getMessage }
case _: security.UnauthorizedException => Forbidden -> { _.getMessage }
case _: org.apache.jena.shared.JenaException => BadRequest -> { _.getMessage }
case _: cmwell.web.ld.exceptions.ParsingException => BadRequest -> { _.getMessage }
case _: IllegalArgumentException => BadRequest -> { _.getMessage }
case _: UnsupportedURIException => BadRequest -> { _.getMessage }
case _: InvalidUriEncodingException => BadRequest -> { _.getMessage }
case _: BadFieldTypeException => BadRequest -> { _.getMessage }
case _: com.datastax.driver.core.exceptions.InvalidQueryException => ExpectationFailed -> { _.getMessage }
case _: com.datastax.driver.core.exceptions.DriverException => ServiceUnavailable -> { _.getMessage }
case e: org.elasticsearch.transport.RemoteTransportException
if e.getCause.isInstanceOf[org.elasticsearch.action.search.ReduceSearchPhaseException] =>
ServiceUnavailable -> { _.getCause.getMessage }
case e: org.elasticsearch.transport.RemoteTransportException
if e.getCause.isInstanceOf[org.elasticsearch.action.search.SearchPhaseExecutionException] =>
BadRequest -> { _.getCause.getMessage }
case e: Throwable => {
logger.error("unexpected error occurred", e)
InternalServerError -> { _.getMessage }
}
}
status(Json.obj("success" -> false, "error" -> eHandler(throwable)))
}
def extractFieldsMask(req: Request[_],
cache: PassiveFieldTypesCache,
cmwellRDFHelper: CMWellRDFHelper,
timeContext: Option[Long])(implicit ec: ExecutionContext): Future[Set[String]] = {
extractFieldsMask(req.getQueryString("fields"), cache, cmwellRDFHelper, timeContext)
}
def extractFieldsMask(fieldsOpt: Option[String],
cache: PassiveFieldTypesCache,
cmwellRDFHelper: CMWellRDFHelper,
timeContext: Option[Long])(implicit ec: ExecutionContext): Future[Set[String]] = {
fieldsOpt.map(FieldNameConverter.toActualFieldNames) match {
case Some(Success(fields)) =>
travset(fields) {
case Right(x) => Future.successful(x.internalKey)
case Left(u) => FieldKey.resolve(u, cmwellRDFHelper, timeContext).map(_.internalKey)
}
case Some(Failure(e)) => Future.failed(e)
case None => Future.successful(Set.empty[String])
}
}
val noDataFormat: String => Boolean = Set("tsv", "tab", "text", "path").apply
def extractInferredFormatWithData(req: Request[_], defaultFormat: String = "json"): (String, Boolean) = {
val format = req.getQueryString("format").getOrElse(defaultFormat)
val withData = req.getQueryString("with-data").fold(!noDataFormat(format))(_ != "false")
format -> withData
}
val endln = ByteString(cmwell.util.os.Props.endln)
def guardHangingFutureByExpandingToSource[T, U, V](futureThatMayHang: Future[T],
initialGraceTime: FiniteDuration,
injectInterval: FiniteDuration)(
backOnTime: T => V,
prependInjections: () => U,
injectOriginalFutureWith: T => U,
continueWithSource: Source[U, NotUsed] => V
)(implicit ec: ExecutionContext): Future[V] = {
val p1 = Promise[V]()
p1.tryCompleteWith(futureThatMayHang.map(backOnTime))
SimpleScheduler.schedule(initialGraceTime) {
if (!futureThatMayHang.isCompleted) {
//unfoldAsync(s: S)(f: S => Future[Option[(S,E)]]): Source[E,_]
//
// s: initial state
// f: function that takes the current state to:
// a Future of:
// None (if it's the end of Source's output)
// Some of:
// S: next state to operate on
// E: next Element to output
val source = Source.unfoldAsync(false) { isCompleted =>
{
if (isCompleted) Future.successful(None)
else {
val p2 = Promise[Option[(Boolean, U)]]()
p2.tryCompleteWith(futureThatMayHang.map(t => Some(true -> injectOriginalFutureWith(t))))
SimpleScheduler.schedule(injectInterval) {
p2.trySuccess(Some(false -> prependInjections()))
}
p2.future
}
}
}
p1.trySuccess(continueWithSource(source))
}
}
p1.future
}
/**false
* @param response The original response to be wrapped
* @return a new response that will drip "\\n" drops every 3 seconds,
* UNLESS the original Future[Result] is completed within the first 3 seconds.
* One can use the latter case for validation before long execution, and return 400 or so,
* prior to the decision of Chunked 200 OK.
*/
def keepAliveByDrippingNewlines(
response: Future[Result],
extraHeaders: Seq[(String, String)] = Nil,
extraTrailers: Future[Seq[(String, String)]] = Future.successful(Nil),
expandedResponseContentType: Option[String] = None
)(implicit ec: ExecutionContext): Future[Result] = {
val isEmptySuccessfulFuture = extraTrailers.value.fold(false)(_.fold(err => false, _.isEmpty))
if (isEmptySuccessfulFuture)
guardHangingFutureByExpandingToSource[Result, Source[ByteString, _], Result](response, 7.seconds, 3.seconds)(
identity,
() => Source.single(endln),
_.body.dataStream,
source => {
val r = Ok.chunked(source.flatMapConcat(x => x)).withHeaders(extraHeaders: _*)
expandedResponseContentType.fold(r)(r.as)
}
)
else {
val prependInjections = () => Source.single(HttpChunk.Chunk(endln))
val injectOriginalFutureWith: Result => Source[HttpChunk, _] = _.body.dataStream
.map(HttpChunk.Chunk)
.concat(
Source
.fromFuture(extraTrailers)
.filter(_.nonEmpty)
.map(nonEmptyTrailers => HttpChunk.LastChunk(new Headers(nonEmptyTrailers)))
)
val continueWithSource: Source[Source[HttpChunk, _], NotUsed] => Result = source => {
Result(
header = ResponseHeader(200),
body = HttpEntity.Chunked(
source.flatMapConcat(x => x),
expandedResponseContentType
)
).withHeaders(extraHeaders: _*)
}
guardHangingFutureByExpandingToSource[Result, Source[HttpChunk, _], Result](response, 7.seconds, 3.seconds)(
identity,
prependInjections,
injectOriginalFutureWith,
continueWithSource
)
}
}
def formattableToByteString(formatter: Formatter)(i: Formattable) = {
val body = formatter.render(i)
if (body.endsWith(cmwell.util.os.Props.endln)) ByteString(body, "utf-8")
else ByteString(body, "utf-8") ++ endln
}
// format: off
// scalastyle:off
def metaOpRegex(metaOpType: String): String = {
/* ******************* *
* REGEX explanation: *
* ******************* *
( 1. start a group to capture protocol + optional domain
(cmwell:/) 2. if protocol is `cmwell:` no need to specify domain
| 3. OR
(https?:// 4. if protocol is `http:` or `https:` (we need a domain)
( 5. start a group to capture multiple domain parts that end with a '.'
(?!-) 6. domain part cannot begin with hyphen '-'
[A-Za-z0-9-]{1,63} 7. domain part is composed of letters, digits & hyphen and can be in size of at least 1 but less than 64
(?<!-) 8. domain part cannot end with an hyphen (look back)
\\.)* 9. all parts except the last must end with a dot '.'
( 10. start same regex as before for the last domain part which must not end with a dot '.'
(?!-) 11. same as 6
[A-Za-z0-9-]{1,63} 12. same as 7
(?<!-) 13. same as 8
) 14. end domain group
(:\\d+)? 15. optional port
) 16. end http/https with domain and optional port group
) 17. end cmwell OR http group
(/meta/$op#) 18. path must be `/meta/sys#` / `/meta/ns#` / etc'...
*/
"""((cmwell:/)|(http://((?!-)[A-Za-z0-9-]{1,63}(?<!-)\\.)*((?!-)[A-Za-z0-9-]{1,63}(?<!-))(:\\d+)?))(/meta/""" + metaOpType + """#)"""
}
// scalastyle:on
// format: on
def pathStatusAsInfoton(ps: PathStatus): Infoton = {
val PathStatus(path, status) = ps
val fields: Option[Map[String, Set[FieldValue]]] = Some(Map("trackingStatus" -> Set(FString(status.toString))))
VirtualInfoton(ObjectInfoton(path, Settings.dataCenter, None, fields = fields, protocol = None))
}
def getFormatter(request: Request[_],
formatterManager: FormatterManager,
defaultFormat: String,
withoutMeta: Boolean = false): Formatter =
request.getQueryString("format").getOrElse(defaultFormat) match {
case FormatExtractor(formatType) =>
formatterManager.getFormatter(
format = formatType,
timeContext = request.attrs.get(Attrs.RequestReceivedTimestamp),
host = request.host,
uri = request.uri,
pretty = request.queryString.keySet("pretty"),
callback = request.queryString.get("callback").flatMap(_.headOption),
withData = request.getQueryString("with-data"),
withoutMeta = withoutMeta
)
case unknownFormat => throw new IllegalArgumentException(s"Format $unknownFormat is not supported")
}
def isSystemProperty(firstDotLast: (String, String)): Boolean = firstDotLast match {
case ("system.parent", "parent_hierarchy") => true
case ("system.parent", "parent") => true
case ("system", "lastModified") => true
case ("system", "kind") => true
case ("system", "path") => true
case ("system", "uuid") => true
case ("system", "quad") => true
case ("system", "dc") => true
case ("system", "indexTime") => true
case ("system", "current") => true
case ("system", "parent") => true
case ("system", "protocol") => true
case ("link", "to") => true
case ("link", "kind") => true
case ("content", "data") => true
case ("content", "mimeType") => true
case ("content", "length") => true
case _ => false
}
val errorHandler: PartialFunction[Throwable, Result] = { case t => exceptionToResponse(t) }
val asyncErrorHandler: PartialFunction[Throwable, Future[Result]] = {
val res2fut: Result => Future[Result] = Future.successful[Result]
val err2res: Throwable => Result = exceptionToResponse
({ case t => res2fut.compose(err2res)(t) }): PartialFunction[Throwable, Future[Result]]
}
}
| bryaakov/CM-Well | server/cmwell-ws/app/wsutil/package.scala | Scala | apache-2.0 | 51,146 |
/*
* Copyright 2014 Commonwealth Computer Research, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.locationtech.geomesa.core.iterators
import java.util.Date
import com.vividsolutions.jts.geom.Geometry
import org.geotools.feature.simple.SimpleFeatureBuilder
import org.geotools.filter.text.ecql.ECQL
import org.locationtech.geomesa.core._
import org.locationtech.geomesa.core.index._
import org.locationtech.geomesa.core.iterators.IteratorExtensions.OptionMap
import org.locationtech.geomesa.core.transform.TransformCreator
import org.locationtech.geomesa.feature.FeatureEncoding.FeatureEncoding
import org.locationtech.geomesa.feature.ScalaSimpleFeatureFactory
import org.locationtech.geomesa.feature.{FeatureEncoding, SimpleFeatureDecoder, SimpleFeatureEncoder}
import org.locationtech.geomesa.utils.geotools.SimpleFeatureTypes
import org.opengis.feature.simple.{SimpleFeature, SimpleFeatureType}
import org.opengis.filter.Filter
/**
* Defines common iterator functionality in traits that can be mixed-in to iterator implementations
*/
trait IteratorExtensions {
def init(featureType: SimpleFeatureType, options: OptionMap)
}
object IteratorExtensions {
type OptionMap = java.util.Map[String, String]
}
/**
* We need a concrete class to mix the traits into. This way they can share a common 'init' method
* that will be called for each trait. See http://stackoverflow.com/a/1836619
*/
class HasIteratorExtensions extends IteratorExtensions {
override def init(featureType: SimpleFeatureType, options: OptionMap) = {}
}
/**
* Provides a feature type based on the iterator config
*/
trait HasFeatureType {
var featureType: SimpleFeatureType = null
// feature type config
def initFeatureType(options: OptionMap) = {
val sftName = Option(options.get(GEOMESA_ITERATORS_SFT_NAME)).getOrElse(this.getClass.getSimpleName)
featureType = SimpleFeatureTypes.createType(sftName,
options.get(GEOMESA_ITERATORS_SIMPLE_FEATURE_TYPE))
featureType.decodeUserData(options, GEOMESA_ITERATORS_SIMPLE_FEATURE_TYPE)
}
}
/**
* Provides an index value decoder
*/
trait HasIndexValueDecoder extends IteratorExtensions {
var indexEncoder: IndexValueEncoder = null
// index value encoder/decoder
abstract override def init(featureType: SimpleFeatureType, options: OptionMap) = {
super.init(featureType, options)
val indexValues = SimpleFeatureTypes.createType(featureType.getTypeName,
options.get(GEOMESA_ITERATORS_SFT_INDEX_VALUE))
indexEncoder = IndexValueEncoder(indexValues)
}
}
/**
* Provides a feature builder and a method to create a feature from an index value
*/
trait HasFeatureBuilder extends HasFeatureType {
import org.locationtech.geomesa.core.iterators.IteratorTrigger._
private var featureBuilder: SimpleFeatureBuilder = null
private lazy val geomIdx = featureType.indexOf(featureType.getGeometryDescriptor.getLocalName)
private lazy val sdtgIdx = featureType.startTimeName.map(featureType.indexOf).getOrElse(-1)
private lazy val edtgIdx = featureType.endTimeName.map(featureType.indexOf).getOrElse(-1)
private lazy val hasDtg = sdtgIdx != -1 || edtgIdx != -1
private lazy val attrArray = Array.ofDim[AnyRef](featureType.getAttributeCount)
override def initFeatureType(options: OptionMap) = {
super.initFeatureType(options)
featureBuilder = ScalaSimpleFeatureFactory.featureBuilder(featureType)
}
def encodeIndexValueToSF(value: DecodedIndexValue): SimpleFeature = {
// Build and fill the Feature. This offers some performance gain over building and then setting the attributes.
featureBuilder.buildFeature(value.id, attributeArray(featureBuilder.getFeatureType, value))
}
/**
* Construct and fill an array of the SimpleFeature's attribute values
*/
def attributeArray(sft: SimpleFeatureType, indexValue: DecodedIndexValue): Array[AnyRef] = {
val attrArray = new Array[AnyRef](sft.getAttributeCount)
indexValue.attributes.foreach { case (name, value) =>
val index = sft.indexOf(name)
if (index != -1) {
attrArray.update(index, value.asInstanceOf[AnyRef])
}
}
attrArray
}
/**
* Construct and fill an array of the SimpleFeature's attribute values
* Reuse attrArray as it is copied inside of feature builder anyway
*/
private def fillAttributeArray(geomValue: Geometry, date: Option[Date]) = {
// always set the mandatory geo element
attrArray(geomIdx) = geomValue
// if dtgDT exists, attempt to fill the elements corresponding to the start and/or end times
date.foreach { time =>
if (sdtgIdx != -1) attrArray(sdtgIdx) = time
if (edtgIdx != -1) attrArray(edtgIdx) = time
}
}
}
/**
* Provides a feature encoder and decoder
*/
trait HasFeatureDecoder extends IteratorExtensions {
var featureDecoder: SimpleFeatureDecoder = null
var featureEncoder: SimpleFeatureEncoder = null
val defaultEncoding = org.locationtech.geomesa.core.data.DEFAULT_ENCODING.toString
// feature encoder/decoder
abstract override def init(featureType: SimpleFeatureType, options: OptionMap) = {
super.init(featureType, options)
// this encoder is for the source sft
val encodingOpt = Option(options.get(FEATURE_ENCODING)).getOrElse(defaultEncoding)
featureDecoder = SimpleFeatureDecoder(featureType, encodingOpt)
featureEncoder = SimpleFeatureEncoder(featureType, encodingOpt)
}
}
/**
* Provides a spatio-temporal filter (date and geometry only) if the iterator config specifies one
*/
trait HasSpatioTemporalFilter extends IteratorExtensions {
private var filterOption: Option[Filter] = None
private var dateAttributeIndex: Option[Int] = None
private var testSimpleFeature: Option[SimpleFeature] = None
lazy val stFilter: Option[(Geometry, Option[Long]) => Boolean] =
for (filter <- filterOption.filterNot(_ == Filter.INCLUDE); feat <- testSimpleFeature) yield {
(geom: Geometry, olong: Option[Long]) => {
feat.setDefaultGeometry(geom)
dateAttributeIndex.foreach { i =>
olong.map(new Date(_)).foreach(feat.setAttribute(i, _))
}
filter.evaluate(feat)
}
}
// spatio-temporal filter config
abstract override def init(featureType: SimpleFeatureType, options: OptionMap) = {
super.init(featureType, options)
dateAttributeIndex = getDtgFieldName(featureType).map(featureType.indexOf)
if (options.containsKey(ST_FILTER_PROPERTY_NAME)) {
val filterString = options.get(ST_FILTER_PROPERTY_NAME)
filterOption = Some(ECQL.toFilter(filterString))
val sfb = new SimpleFeatureBuilder(featureType)
testSimpleFeature = Some(sfb.buildFeature("test"))
}
}
}
/**
* Provides an arbitrary filter if the iterator config specifies one
*/
trait HasEcqlFilter extends IteratorExtensions {
private var filterOption: Option[Filter] = None
lazy val ecqlFilter: Option[(SimpleFeature) => Boolean] =
filterOption.filterNot(_ == Filter.INCLUDE).map { filter =>
(sf: SimpleFeature) => filter.evaluate(sf)
}
// other filter config
abstract override def init(featureType: SimpleFeatureType, options: OptionMap) = {
super.init(featureType, options)
if (options.containsKey(GEOMESA_ITERATORS_ECQL_FILTER)) {
val filterString = options.get(GEOMESA_ITERATORS_ECQL_FILTER)
filterOption = Some(ECQL.toFilter(filterString))
}
}
}
/**
* Provides a feature type transformation if the iterator config specifies one
*/
trait HasTransforms extends IteratorExtensions {
import org.locationtech.geomesa.core.data.DEFAULT_ENCODING
private var targetFeatureType: Option[SimpleFeatureType] = None
private var transformString: Option[String] = None
private var transformEncoding: FeatureEncoding = null
lazy val transform: Option[(SimpleFeature) => Array[Byte]] =
for { featureType <- targetFeatureType; string <- transformString } yield {
val transform = TransformCreator.createTransform(featureType, transformEncoding, string)
(sf: SimpleFeature) => transform(sf)
}
// feature type transforms
abstract override def init(featureType: SimpleFeatureType, options: OptionMap) = {
super.init(featureType, options)
if (options.containsKey(GEOMESA_ITERATORS_TRANSFORM_SCHEMA)) {
val transformSchema = options.get(GEOMESA_ITERATORS_TRANSFORM_SCHEMA)
targetFeatureType = Some(SimpleFeatureTypes.createType(this.getClass.getCanonicalName, transformSchema))
targetFeatureType.foreach(_.decodeUserData(options, GEOMESA_ITERATORS_TRANSFORM_SCHEMA))
transformString = Option(options.get(GEOMESA_ITERATORS_TRANSFORM))
transformEncoding = Option(options.get(FEATURE_ENCODING)).map(FeatureEncoding.withName(_))
.getOrElse(DEFAULT_ENCODING)
}
}
}
/**
* Provides deduplication if the iterator config specifies it
*/
trait HasInMemoryDeduplication extends IteratorExtensions {
private var deduplicate: Boolean = false
// each thread maintains its own (imperfect!) list of the unique identifiers it has seen
private var maxInMemoryIdCacheEntries = 10000
private var inMemoryIdCache: java.util.HashSet[String] = null
/**
* Returns a local estimate as to whether the current identifier
* is likely to be a duplicate.
*
* Because we set a limit on how many unique IDs will be preserved in
* the local cache, a TRUE response is always accurate, but a FALSE
* response may not be accurate. (That is, this cache allows for false-
* negatives, but no false-positives.) We accept this, because there is
* a final, client-side filter that will eliminate all duplicate IDs
* definitively. The purpose of the local cache is to reduce traffic
* through the remainder of the iterator/aggregator pipeline as quickly as
* possible.
*
* @return False if this identifier is in the local cache; True otherwise
*/
lazy val checkUniqueId: Option[(String) => Boolean] =
Some(deduplicate).filter(_ == true).map { _ =>
(id: String) =>
Option(id)
.filter(_ => inMemoryIdCache.size < maxInMemoryIdCacheEntries)
.forall(inMemoryIdCache.add(_))
}
abstract override def init(featureType: SimpleFeatureType, options: OptionMap) = {
super.init(featureType, options)
// check for dedupe - we don't need to dedupe for density queries
if (!options.containsKey(GEOMESA_ITERATORS_IS_DENSITY_TYPE)) {
deduplicate = IndexSchema.mayContainDuplicates(featureType)
if (deduplicate) {
if (options.containsKey(DEFAULT_CACHE_SIZE_NAME)) {
maxInMemoryIdCacheEntries = options.get(DEFAULT_CACHE_SIZE_NAME).toInt
}
inMemoryIdCache = new java.util.HashSet[String](maxInMemoryIdCacheEntries)
}
}
}
} | kevinwheeler/geomesa | geomesa-core/src/main/scala/org/locationtech/geomesa/core/iterators/IteratorExtensions.scala | Scala | apache-2.0 | 11,287 |
package org.scalacoin.script.interpreter.testprotocol
import org.scalacoin.protocol.script.{ScriptPubKey, ScriptSignature}
/**
* Created by chris on 1/18/16.
* This represents a core test case for valid and invalid scripts
* the scripts can be seen in the ../script_valid.json and ../script_invalid.json
* files.
*/
trait CoreTestCase {
def scriptSig : ScriptSignatureCoreTestCase
def scriptPubKey : ScriptPubKeyCoreTestCase
def flags : String
def comments : String
def raw : String
}
case class CoreTestCaseImpl(scriptSig : ScriptSignatureCoreTestCase,
scriptPubKey: ScriptPubKeyCoreTestCase, flags : String, comments : String, raw : String) extends CoreTestCase
| TomMcCabe/scalacoin | src/test/scala/org/scalacoin/script/interpreter/testprotocol/CoreTestCase.scala | Scala | mit | 684 |
package scalacookbook.chapter10
/**
* Created by liguodong on 2016/7/26.
*/
object WalkThroughCollectionWithReduceFold extends App {
val a = Array(12, 6, 15, 2, 20, 9)
val sum = a.reduceLeft(_ + _)
println(sum)
println(a.reduceLeft((x,y) => x + y))
println(a.reduceLeft(_ * _))
println(a.reduceLeft(_ min _)+" , "+a.reduceLeft(_ max _))
println("---------------------")
//Show each step in the process
// returns the max of the two elements
val findMax = (x: Int, y: Int) => {
val winner = x max y
println(s"compared $x to $y, $winner was larger")
winner
}
a.reduceLeft(findMax)
println("-------------------")
val seq = Array(2,3,5,10,6,7);
def f(a:Int,b:Int):Int = {
var max = a;
if(b>max) max = b;
max
}
// you provide the sequence 'seq' and the function 'f'
var result = seq(0)
for (i <- 1 until seq.length) {
val next = seq(i)
result = f(result, next)
}
println(result)
println("----------------")
//Working with other sequences and types
val peeps = Vector("al", "hannah", "emily", "christina", "aleka")
// longest
val longStr = peeps.reduceLeft((x,y) => if (x.length > y.length) x else y)
println(longStr)
// shortest
val shortStr = peeps.reduceLeft((x,y) => if (x.length < y.length) x else y)
println(shortStr)
//foldLeft, reduceRight, and foldRight
val aa = Array(1, 2, 3)
println(aa.reduceLeft(_ + _))
println(aa.foldLeft(20)(_ + _))
println(aa.foldLeft(100)(_ + _))
//The difference between reduceLeft and reduceRight
val divide = (x: Double, y: Double) => {
val result = x / y
println(s"divided $x by $y to yield $result")
result
}
val b = Array(1.0, 2.0, 3.0)
val leftDiv = b.reduceLeft(divide)
//从左算
println(leftDiv)
val rigthDiv = b.reduceRight(divide)
//从右算
println(rigthDiv)
println("++++++++++++++++++++++")
//scanLeft and scanRight
val product = (x: Int, y: Int) => {
val result = x * y
println(s"multiplied $x by $y to yield $result")
result
}
val c = Array(1, 2, 3)
c.scanLeft(10)(product).foreach(x=>print(x+" "))
println()
val scan = c.scanLeft(10)(product)
println(scan.toList)
println("++++++++++++++++++++++")
val findMax2 = (x: Int, y: Int) => {
Thread.sleep(10)
val winner = x max y
println(s"compared $x to $y, $winner was larger")
winner
}
//并行集合par
val a2 = Array.range(0,50)
a2.par.reduce(findMax2) //序列中的元素是在不确定的比较
println(a2.par)
val sumPar = a.par.reduce(_ + _)
println(sumPar)
//获取到参与并行计算的线程
println((0 to 10000).collect{case _ => Thread.currentThread.getName}.distinct.toList)
println((0 to 10000).par.collect{case _ => Thread.currentThread.getName}.distinct.toList)
}
| liguodongIOT/java-scala-mix-sbt | src/main/scala/scalacookbook/chapter10/WalkThroughCollectionWithReduceFold.scala | Scala | apache-2.0 | 2,824 |
package com.sksamuel.elastic4s.requests.alias
import com.sksamuel.elastic4s.Indexes
import com.sksamuel.exts.OptionImplicits._
case class GetAliasesRequest(indices: Indexes, aliases: Seq[String] = Nil, ignoreUnavailable: Option[Boolean] = None) {
def ignoreUnavailable(ignore: Boolean): GetAliasesRequest = copy(ignoreUnavailable = ignore.some)
}
| stringbean/elastic4s | elastic4s-core/src/main/scala/com/sksamuel/elastic4s/requests/alias/GetAliasesRequest.scala | Scala | apache-2.0 | 351 |
package ucesoft.cbm.cpu.asm
import util.parsing.combinator._
import scala.util.parsing.input.{Position, Positional}
object AsmParser {
import ucesoft.cbm.cpu.CPU65xx._
import Mode._
private val OPCODES = Instruction.values.map { _.toString.toLowerCase } ++ (Instruction.values map { _.toString.toUpperCase })
private val BRANCHES = OP_MATRIX flatMap { r => r.filter { o => o._2 == REL } } map { _._1.toString }
sealed trait Statement extends Positional {
var fileName : Option[String] = None
}
sealed trait ASMStatement extends Statement
case class ModuleLabel(label:String,module:Option[String])
case class EvalTarget(variable:ModuleLabel, fieldSelector: Option[List[String]])
// Expressions
sealed trait Expr extends Positional {
var sizeHint : Option[Int] = None
}
case object Null extends Expr
case class Str(value: String) extends Expr
case class Value(value: Double) extends Expr
case class ListValue(elements: List[Expr], isRange: Boolean = false) extends Expr
case class MapValue(keyVals:List[ListValue]) extends Expr
case class Label(label: String,module:Option[String]) extends Expr
case class BinOp(op: String, op1: Expr, op2: Expr) extends Expr
case class UnaryOp(op: String, op1: Expr, isPre: Boolean = true) extends Expr
case class FunOp(fun: String,module:Option[String],args: List[Expr]) extends Expr
case class NewStruct(name: String,module:Option[String],values: Array[Expr]) extends Expr
case class FieldOrMethod(e:Expr,name:String,pars:List[Expr]) extends Expr
// Statements
case class LABELED(label: String) extends Statement
case class CONST(name: String, value: Expr,isPrivate:Boolean) extends Statement
case class VAR(name: String, initValue: Expr,isPrivate:Boolean) extends Statement
case class PRINT(what: Expr) extends Statement
case class EVAL(assignTo: Option[EvalTarget], value: Expr) extends Statement
case class IF(cond: Expr, thenClause: List[Statement], elseClause: List[Statement]) extends Statement
case class STRUCT(name: String, fields: List[String],isPrivate:Boolean) extends Statement
case class ENUM(fields: List[(String, Option[Int])]) extends Statement
case class WHILE(cond: Expr, body: List[Statement]) extends Statement
case class FOR(vars: List[VAR], cond: Expr, post: List[Statement], body: List[Statement]) extends Statement
case class MACRO(name:String,parameters:List[String],body:List[Statement]) extends Statement
case class MACRO_CALL(name:String,actualParameters:List[Expr]) extends Statement
case class FUNCTION(name:String,parameters:List[String],body:List[Statement],isPrivate:Boolean) extends Statement
case class DECLARE_LABEL(name:String,value:Expr) extends Statement
case object BREAK extends Statement
case class DUP(times:Expr, body: List[Statement]) extends Statement
case class ALIGN(alignTo:Expr) extends Statement
case class ERROR(msg:Expr) extends Statement
case class MODULE(name:String,body:List[Statement]) extends Statement
case class INCLUDE(file:String) extends Statement
case class DISCARDEXPR(expr:Expr) extends Statement
// ASMStatement
sealed trait ORGType
case object Default extends ORGType
case object Virtual extends ORGType
case object Patch extends ORGType
case class ORG(org: Expr, sectionName: Option[Expr],virtual:ORGType = Default) extends ASMStatement
case class WORD(values: List[Expr], isByte: Boolean = true) extends ASMStatement
case class BYTE_LIST(list: Expr) extends ASMStatement
case class FILL(size:Expr,value:Option[Expr]) extends ASMStatement
case class WORD_GEN(gen: FunOp, isByte: Boolean = true) extends ASMStatement
case class TEXT(text: Expr,enc:TEXTEncoding) extends ASMStatement
case class ASM(opcode: String, var mode: Mode.MODE, operand: Option[Expr]) extends ASMStatement
case class TEXTEncoding(upper:Boolean,screenCode:Boolean)
case class COMMENTED_ASM(_line:Int) extends ASMStatement {
setPos(new Position {
override def line: Int = _line
override def column: Int = 1
override protected def lineContents: String = ""
})
}
}
class AsmParser(fileName:String) extends JavaTokenParsers {
import AsmParser._
import ucesoft.cbm.cpu.CPU65xx._
import Mode._
import Instruction._
private val other_opcodes: Parser[String] = OPCODES.drop(1).foldLeft(OPCODES.head: Parser[String]) { (p, b) => p | b }
override val whiteSpace = "[ \\t]+".r
// =================================================================
private def nl : Parser[String] = "[\\n\\r]*".r
private def lineComment: Parser[String] = "(//|;).*".r ^^ { s => if (s == ";") s.substring(1) else s.substring(2) }
private def multilineComment: Parser[String] = """/\\*[^\\*]*\\*(\\*|[^\\*/][^\\*]*\\*)*/""".r ^^ { c => c.substring(2, c.length - 2) }
private def comment: Parser[String] = lineComment | multilineComment
// =========================== EXPR ================================
private def label: Parser[String] = ident
private def moduleLabel : Parser[ModuleLabel] = label ~ opt("::" ~> label) ^^ {
case l ~ None => ModuleLabel(l,None)
case m ~ Some(l) => ModuleLabel(l,Some(m))
}
private def decNumber: Parser[Int] = "[0-9]{1,5}".r ^^ { i => i.toInt }
private def hexWord: Parser[Int] = "\\\\$[0-9A-Fa-f]{1,4}".r ^^ { i => Integer.parseInt(i.substring(1), 16) }
private def binWord: Parser[Int] = "%[01]{1,16}".r ^^ { i => Integer.parseInt(i.substring(1), 2) }
private def float: Parser[Double] = floatingPointNumber ^^ { _.toDouble }
private def number: Parser[Double] = float | (decNumber | hexWord | binWord) ^^ { _.toFloat }
private def string: Parser[String] = stringLiteral ^^ { s => s.substring(1, s.length - 1) }
//===========================================================================
private def expr: Parser[Expr] = positioned {
expr2 ~ opt(":" ~> decNumber) ^^ {
case expr ~ None => expr
case expr ~ Some(size) =>
expr.sizeHint = Some(size)
expr
}
}
private def expr2: Parser[Expr] =
logical ~ rep("^" ~ logical | "&" ~ logical | "|" ~ logical) ^^ {
case s ~ l => l.foldLeft(s) { case (acc, op ~ f) => BinOp(op, acc, f) }
}
private def logical: Parser[Expr] = positioned {
logical2 ~ rep("==" ~ logical2 | "!=" ~ logical2) ~ opt("?" ~> (expr ~ (":" ~> expr))) ^^ {
case l1 ~ l ~ ifclause =>
val cond = l.foldLeft(l1) { case (acc, op ~ f) => BinOp(op, acc, f) }
ifclause match {
case None =>
cond
case Some(t ~ f) =>
FunOp("if",None,List(cond, t, f))
}
}
}
private def logical2: Parser[Expr] =
not ~ rep("<<" ~ not | ">>" ~ not | ">=" ~ not | "<=" ~ not | ">" ~ not | "<" ~ not) ^^ {
case s ~ l => l.foldLeft(s) { case (acc, op ~ f) => BinOp(op, acc, f) }
}
private def not: Parser[Expr] =
opt("!") ~ sum ^^ {
case None ~ s => s
case Some(n) ~ s => UnaryOp(n, s)
}
private def sum: Parser[Expr] =
term ~ rep("+" ~ term | "-" ~ term) ^^ {
case f ~ l => l.foldLeft(f) { case (acc, op ~ f) => BinOp(op, acc, f) }
}
private def term: Parser[Expr] = factorFieldOrMethod ~ rep("*" ~ factorFieldOrMethod | "/" ~ factorFieldOrMethod | "%" ~ factorFieldOrMethod) ^^ {
case f ~ l => l.foldLeft(f) { case (acc, op ~ f) => BinOp(op, acc, f) }
}
private def factorFieldOrMethod : Parser[Expr] =
factor ~ rep("." ~> fieldOrMethod) ^^ {
case e~Nil => e
case e~l =>
l.foldLeft(e) { (acc,f) =>
f match {
case Label(l,None) =>
FieldOrMethod(acc,l,Nil)
case FunOp(fn,None,pars) =>
FieldOrMethod(acc,fn,pars)
case _ => throw new IllegalArgumentException // TODO
}
}
}
private def fieldOrMethod : Parser[Expr] = function | label ^^ { Label(_,None) }
private def factor: Parser[Expr] =
"null" ^^ { n => Null } |
generate |
map |
list |
"<" ~> factor ^^ { e => FunOp("lsb",None, e :: Nil) } |
">" ~> factor ^^ { e => FunOp("msb",None, e :: Nil) } |
function |
newStruct |
string ^^ { Str(_) } |
number ^^ { Value(_) } |
"(" ~> expr <~ ")" ^^ { e => e } |
"-" ~> expr ^^ { UnaryOp("-", _) } |
("++" | "--") ~ moduleLabel ^^ { case op ~ l => UnaryOp(op, Label(l.label,l.module)) } |
//structField |
moduleLabel ~ opt("++" | "--") ^^ {
case l ~ None => Label(l.label,l.module)
case l ~ Some(op) => UnaryOp(op, Label(l.label,l.module), false)
}
private def map : Parser[Expr] = "#[" ~ nl ~ "]" ^^ { _ => MapValue(Nil) } |
"#[" ~> repsep(list, (nl ~ "," ~ nl)) <~ "]" ~ nl ^^ { MapValue(_) }
private def list: Parser[ListValue] = "[" ~ nl ~ "]" ^^ { _ => ListValue(Nil) } |
("[" ~> expr <~ "..") ~ expr ~ opt("," ~> expr) <~ "]" ^^ {
case (from ~ to) ~ step =>
ListValue(List(from, to, step.getOrElse(Value(1))), true)
} |
"[" ~> repsep(expr, (nl ~ "," ~ nl)) <~ "]" ~ nl ^^ { ListValue(_) }
private def function: Parser[FunOp] = moduleLabel ~ ("(" ~> repsep(expr, ",") <~ ")") ^^ { case ModuleLabel(l,m) ~ args => FunOp(l,m, args) }
private def newStruct: Parser[NewStruct] = moduleLabel ~ ("{" ~> repsep(expr, ",") <~ "}") ^^ { case ModuleLabel(l,m) ~ vals => NewStruct(l,m,vals.toArray) }
private def generate: Parser[FunOp] = ("[" ~> label <~ "<-") ~ (expr ~ ("||" ~> expr <~ "]")) ^^ {
case v ~ (l ~ e) => FunOp("gen",None, List(Str(v), l, e))
}
// ================ ASM STATEMENT ==================================
private def org: Parser[ASMStatement] = ((".pc" | "*") ~ "=" | "org") ~> (expr ~ opt("virtual" | "patch") ~ opt(expr)) ^^ {
case addr ~ None ~ l => ORG(addr, l)
case addr ~ Some("virtual") ~ l => ORG(addr, l,Virtual)
case addr ~ Some("patch") ~ l => ORG(addr, l,Patch)
}
private def text: Parser[ASMStatement] = (".text" | ".t" | "!text") ~> opt("ascii"|"screen"|"upper-ascii"|"upper-screen") ~ expr ^^ {
case enc~e => enc match {
case Some("ascii")|None => TEXT(e,TEXTEncoding(false,false))
case Some("screen") => TEXT(e,TEXTEncoding(false,true))
case Some("upper-screen") => TEXT(e,TEXTEncoding(true,true))
case Some("upper-ascii") => TEXT(e,TEXTEncoding(true,false))
case Some(e) => throw new CompilerException(s"Invalid encoding: $e",None)
}
}
private def bytes: Parser[ASMStatement] =
(".bytelist" | ".bl" | "!bytelist") ~> expr ^^ { BYTE_LIST(_) } |
(".byte" | ".b" | "!byte") ~> (
generate ^^ { g => WORD_GEN(g, true) } |
list ^^ { BYTE_LIST(_) }|
repsep(expr, ",") ^^ { l => WORD(l, true) }
)
private def words: Parser[ASMStatement] = (".word" | ".w" | "!word") ~> (
generate ^^ { g => WORD_GEN(g, false) } |
repsep(expr, ",") ^^ { l => WORD(l, false) })
private def fill: Parser[ASMStatement] = (".fill" | ".f" | "!fill") ~> expr ~ opt("," ~> expr) ^^ {
case size ~ None => FILL(size,None)
case size ~ (v@Some(_)) => FILL(size,v)
}
private def asmMode: Parser[(Mode.MODE, Expr)] =
"#" ~> expr ^^ { (IMM, _) } |
"(" ~> expr <~ ")" <~ ("," ~ "y|Y".r) ^^ { (IZY, _) } |
"(" ~> expr <~ ")" ^^ { (IND, _) } |
"(" ~> expr <~ ("," ~ "x|X".r) <~ ")" ^^ { (IZX, _) } |
expr ~ opt(("," ~> "x|X".r) | ("," ~> "y|Y".r)) ^^ {
case e ~ None => (UNKNOWN_ABS_OR_ZP, e) // we will decide later if it's ABS or ZP
case e ~ Some(index) =>
if (index.toUpperCase.endsWith("X")) (UNKNOWN_ABX_OR_ZPX, e) else (UNKNOWN_ABY_OR_ZPY, e)
}
private def asm: Parser[ASMStatement] = other_opcodes ~ opt(".z" | ".a") ~ opt(asmMode) <~ opt(comment) <~ ("\\n" | "") ^^ {
case o ~ _ ~ None => // m is ignored, IMP forced
ASM(o, IMP, None)
case o ~ m ~ Some(mode) =>
var mm = m match {
case None => mode._1
case Some(hm) =>
(hm, mode._1) match {
case (".z", UNKNOWN_ABS_OR_ZP) => ZP
case (".a", UNKNOWN_ABS_OR_ZP) => ABS
case (".z", UNKNOWN_ABX_OR_ZPX) => ZPX
case (".a", UNKNOWN_ABX_OR_ZPX) => ABX
case (".z", UNKNOWN_ABY_OR_ZPY) => ZPY
case (".a", UNKNOWN_ABY_OR_ZPY) => ABY
case (_,m) => m
}
}
mm = mm match {
case UNKNOWN_ABS_OR_ZP =>
if (!hasMode(o,ZP)) ABS else mm
case UNKNOWN_ABX_OR_ZPX =>
if (!hasMode(o,ZPX)) ABX else mm
case UNKNOWN_ABY_OR_ZPY =>
if (!hasMode(o,ZPY)) ABY else mm
case m => m
}
BRANCHES.contains(o.toUpperCase) match {
case true =>
ASM(o, REL, Some(mode._2))
case false =>
ASM(o, mm, Some(mode._2))
}
}
// ===================== STATEMENT =================================
private def enum: Parser[Statement] = ("enum" ~ "{") ~> nl ~> repsep(label ~ opt("=" ~> number), "," <~ nl) <~ nl <~ "}" ^^ {
case l => ENUM(l map { case e ~ n => (e, n map { _.toInt }) })
}
private def struct: Parser[Statement] = opt("private") ~ ("struct" ~> label) ~ ("{" ~> repsep(label, ",") <~ "}") ^^ {
case pr ~ name ~ fields => STRUCT(name, fields,pr.isDefined)
}
private def const: Parser[Statement] = opt("private") ~ ("val" ~> (label ~ ("=" ~> expr))) ^^ { case pr ~ (id ~ e) => CONST(id,e,pr.isDefined) }
private def variable: Parser[Statement] = opt("private") ~ ("var" ~> (label ~ ("=" ~> expr))) ^^ { case pr ~ (id ~ e) => VAR(id,e,pr.isDefined) }
private def print: Parser[Statement] = "print" ~> expr ^^ { PRINT }
private def assignment : Parser[Statement] = moduleLabel ~ rep("." ~> label) ~ ("=" ~> expr) ^^ {
case l ~ Nil ~ e => EVAL(Some(EvalTarget(l, None)), e)
case l ~ fs ~ e => EVAL(Some(EvalTarget(l, Some(fs))), e)
}
private def eval: Parser[Statement] = "eval" ~> opt(moduleLabel ~ rep("." ~> label) <~ "=") ~ expr ^^ {
case Some(l ~ Nil) ~ e => EVAL(Some(EvalTarget(l, None)), e)
case Some(l ~ fs) ~ e => EVAL(Some(EvalTarget(l, Some(fs))), e)
case None ~ e => EVAL(None, e)
}
private def singleBlock: Parser[List[Statement]] = (asmStatement | statement) ^^ { List(_) }
private def multipleBlock: Parser[List[Statement]] = "{" ~> statements(false,false,false,false) <~ "}"
private def block: Parser[List[Statement]] = nl ~> (multipleBlock | singleBlock) <~ nl
private def macroBlock: Parser[List[Statement]] = nl ~> "{" ~> statements(true,false,false,false) <~ "}" <~ nl
private def funBlock: Parser[List[Statement]] = nl ~> "{" ~> statements(false,true,false,false) <~ "}" <~ nl
private def moduleBlock: Parser[List[Statement]] = nl ~> "{" ~> statements(false,true,false,true) <~ "}" <~ nl
private def ifStmt: Parser[Statement] = (("if" ~ "(") ~> expr <~ ")" <~ opt(comment)) ~ block ~ opt("else" ~> opt(comment) ~> block) ^^ {
case cond ~ t ~ None => IF(cond, t, Nil)
case cond ~ t ~ Some(e) => IF(cond, t, e)
}
private def whileStmt: Parser[Statement] = (("while" ~ "(") ~> expr <~ ")" <~ opt(comment)) ~ block ^^ { case c ~ b => WHILE(c, b) }
private def forStmt: Parser[Statement] = ("for" ~ "(") ~> repsep((label <~ "=") ~ expr, ",") ~ (";" ~> expr <~ ";") ~ repsep(statement, ",") ~ (")" ~> opt(comment) ~> block) ^^ {
case vars ~ cond ~ post ~ body =>
val vs = vars map { case n ~ e => VAR(n,e,false) }
FOR(vs, cond, post, body)
}
private def declareLabel : Parser[Statement] = ("label" ~> label <~ "=") ~ expr ^^ {
case l~e => DECLARE_LABEL(l,e)
}
private def break : Parser[Statement] = "break" ^^ { b => BREAK }
private def dup : Parser[Statement] = "dup" ~> expr ~ block ^^ { case e~b => DUP(e,b) }
private def align : Parser[Statement] = ("align" | ".align") ~> expr ^^ { case e => ALIGN(e) }
private def error : Parser[Statement] = "error" ~> expr ^^ { case e => ERROR(e) }
private def macroCall : Parser[Statement] = function ^^ {
case FunOp(n,None,pars) => MACRO_CALL(n,pars)
case FunOp(n,_,pars) => throw new IllegalArgumentException // TODO
}
private def macroStmt : Parser[Statement] = positioned {
("def" ~ "macro") ~> label ~ ("(" ~> repsep(label,",") <~ ")") ~ macroBlock ^^ {
case name~pars~body => MACRO(name,pars,body)
}
}
private def funStmt : Parser[Statement] = positioned {
opt("private") ~ ("def" ~> label ~ ("(" ~> repsep(label,",") <~ ")") ~ funBlock) ^^ {
case pr~(name~pars~body) => FUNCTION(name,pars,body,pr.isDefined)
}
}
private def exprStmt : Parser[Statement] = positioned {
expr ^^ { DISCARDEXPR(_) }
}
private def moduleStmt : Parser[Statement] = positioned {
"module" ~> label ~ moduleBlock ^^ { case l~b => MODULE(l,b) }
}
private def include : Parser[Statement] = "source" ~> expr ^? ({ case Str(fn) => INCLUDE(fn) }, _ => "Type mismatch: include statement expects a string as filename")
private def asmStatement: Parser[Statement] = positioned { asm | org | bytes | words | text | fill }
private def statement: Parser[Statement] = positioned {
include | error | align | dup | break | const | variable | print | eval | ifStmt | struct | enum | whileStmt | forStmt | macroCall | declareLabel | assignment | exprStmt
}
private def statements(macroMode:Boolean, asmNotAllowed:Boolean, top:Boolean, module:Boolean): Parser[List[Statement]] = allStatements(macroMode,asmNotAllowed,top,module) ^^ { _.flatten }
private def allStatements(macroMode:Boolean, asmNotAllowed:Boolean, top:Boolean, module:Boolean) : Parser[List[Option[Statement]]] = {
rep(
nl ~>
(comment ^^ { _ => None } |
label <~ ":" ^^ { case l => Some(LABELED(l)) } |
(
(
if (module) (const | variable | struct | funStmt) | ((statement | asmStatement) <~ failure("In a module can be defined functions, vars/vals and structs only"))
else
if (top) (moduleStmt | macroStmt | funStmt| asmStatement | statement )
else
if (macroMode) (asmStatement|statement)|(macroStmt | funStmt)<~ err("In macros functions and other macros are not permitted")
else
if (asmNotAllowed) statement|asmStatement<~failure("Assembler statement are not allowed here")|macroStmt<~failure("Macros are not allowed here")|funStmt<~failure("Functions are not allowed here")
else
asmStatement | statement | macroStmt<~failure("Macros can be declared on outermost scope only")|funStmt<~failure("Functions can be declared on outermost scope only")
) ^^ { s => s.fileName = Some(fileName) ; s }
) ^^ {
case s => Some(s)
}
) <~ nl)
}
def topStatements : Parser[List[Statement]] = statements(false,false,true,false)
} | abbruzze/kernal64 | Kernal64/src/ucesoft/cbm/cpu/asm/AsmParser.scala | Scala | mit | 18,787 |
object Foo extends Function1[Int, Boolean] {
def apply(x: Int): Boolean = { x % 2 == 0 }
}
object ArrayUtils {
def filter(xs: Array[Int], pred: Function1[Int,Boolean]): Array[Int] = ???
}
object UseCase {
val predicate: Function1[Int, Boolean] = Foo
}
trait Function1[T, R] {
def apply(x: T): R
}
| agconti/scala-school | 04-functions-as-values/slides/slide025.scala | Scala | mit | 312 |
import blah.*
def testO(): Unit = {
import AsObject.LineNo
assert(summon[LineNo].lineNo == 4)
}
def testP(): Unit = {
import AsPackage.LineNo
assert(summon[LineNo].lineNo == 9)
}
@main def Test =
testO()
testP()
| dotty-staging/dotty | tests/run-macros/i6803/Test_2.scala | Scala | apache-2.0 | 227 |
/**
* @author ram
*
*/
package app
object MConfig {
val nsqurl = play.api.Play.application(play.api.Play.current).configuration.getString("nsq.url").get
val mute_events = play.api.Play.application(play.api.Play.current).configuration.getBoolean("nsq.events.muted").getOrElse(false)
val mute_emails = play.api.Play.application(play.api.Play.current).configuration.getStringList("nsq.events.muted_emails").get
val cassandra_host = play.api.Play.application(play.api.Play.current).configuration.getString("cassandra.host").get
val cassandra_keyspace = play.api.Play.application(play.api.Play.current).configuration.getString("cassandra.keyspace").get
val cassandra_username = play.api.Play.application(play.api.Play.current).configuration.getString("cassandra.username").get
val cassandra_password = play.api.Play.application(play.api.Play.current).configuration.getString("cassandra.password").get
val cassandra_use_ssl = play.api.Play.application(play.api.Play.current).configuration.getString("cassandra.password").get
val org = play.api.Play.application(play.api.Play.current).configuration.getString("org").get
val domain = play.api.Play.application(play.api.Play.current).configuration.getString("domain").get
val master_key = play.api.Play.application(play.api.Play.current).configuration.getString("master.key").get
}
| indykish/vertice_gateway | app/MConfig.scala | Scala | mit | 1,367 |
/*
* Copyright (c) 2014-2018 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.reactive.internal.operators
import cats.laws._
import cats.laws.discipline._
import monix.eval.Task
import monix.execution.Ack.Stop
import monix.execution.FutureUtils.extensions._
import monix.execution.Scheduler
import monix.execution.atomic.Atomic
import monix.reactive.{BaseTestSuite, Observable, Observer}
import scala.concurrent.{Future, Promise}
import scala.util.{Success, Try}
object ObservableOpsReturningTaskSuite extends BaseTestSuite {
def first[A](obs: Observable[A])(implicit s: Scheduler): Future[Try[Option[A]]] = {
val p = Promise[Try[Option[A]]]()
obs.unsafeSubscribeFn(new Observer.Sync[A] {
def onNext(elem: A) = { p.trySuccess(Success(Some(elem))); Stop }
def onError(ex: Throwable): Unit = p.tryFailure(ex)
def onComplete(): Unit = p.trySuccess(Success(None))
})
p.future
}
test("runAsyncGetFirst works") { implicit s =>
check1 { (list: List[Int]) =>
val obs = Observable.fromIterable(list)
obs.runAsyncGetFirst.materialize <-> first(obs)
}
}
test("runAsyncGetLast works") { implicit s =>
check1 { (list: List[Int]) =>
val obs = Observable.fromIterable(list)
obs.runAsyncGetLast.materialize <-> first(obs.last)
}
}
test("countL works") { implicit s =>
check1 { (list: List[Int]) =>
val obs = Observable.fromIterable(list)
val result: Future[Try[Option[Long]]] =
obs.countL.map(Some.apply).materialize.runToFuture
result <-> Future.successful(Success(Some(list.length)))
}
}
test("countL is equivalent with countF") { implicit s =>
check1 { (list: List[Int]) =>
val obs = Observable.fromIterable(list)
val result: Future[Try[Option[Long]]] =
obs.countL.map(Some.apply).materialize.runToFuture
result <-> first(obs.count)
}
}
test("existsL works") { implicit s =>
check1 { (list: List[Int]) =>
val obs = Observable.fromIterable(list)
val result: Future[Try[Option[Boolean]]] =
obs.existsL(_ % 3 == 0).map(Some.apply).materialize.runToFuture
result <-> Future.successful(Success(Some(list.exists(_ % 3 == 0))))
}
}
test("existsL is equivalent with existsF") { implicit s =>
check1 { (list: List[Int]) =>
val obs = Observable.fromIterable(list)
val result: Future[Try[Option[Boolean]]] =
obs.existsL(_ % 3 == 0).map(Some.apply).materialize.runToFuture
result <-> first(obs.exists(_ % 3 == 0))
}
}
test("findL is equivalent with findF") { implicit s =>
check1 { (list: List[Int]) =>
val obs = Observable.fromIterable(list)
val result: Future[Try[Option[Int]]] =
obs.findL(_ % 3 == 0).materialize.runToFuture
result <-> first(obs.find(_ % 3 == 0))
}
}
test("foldLeftL works") { implicit s =>
check1 { (list: List[Int]) =>
val obs = Observable.fromIterable(list)
val result: Future[Try[Option[Int]]] =
obs.foldLeftL(0)(_+_).map(Some.apply).materialize.runToFuture
result <-> Future.successful(Success(Some(list.sum)))
}
}
test("foldWhileL works") { implicit s =>
check1 { (list: List[Int]) =>
val obs = Observable.fromIterable(list)
val sum1 = obs.foldLeftL(0)(_+_)
val sum2 = obs.foldWhileLeftL(0)((acc,e) => Left(acc + e))
sum1 <-> sum2
}
}
test("forAllL works") { implicit s =>
check1 { (list: List[Int]) =>
val obs = Observable.fromIterable(list)
val result: Future[Try[Option[Boolean]]] =
obs.forallL(_ >= 0).map(Some.apply).materialize.runToFuture
result <-> Future.successful(Success(Some(list.forall(_ >= 0))))
}
}
test("forAllL is equivalent with forAllF") { implicit s =>
check1 { (list: List[Int]) =>
val obs = Observable.fromIterable(list)
val result: Future[Try[Option[Boolean]]] =
obs.forallL(_ >= 0).map(Some.apply).materialize.runToFuture
result <-> first(obs.forall(_ >= 0))
}
}
test("firstOptionL works") { implicit s =>
check1 { (list: List[Int]) =>
val obs = Observable.fromIterable(list)
val result = obs.firstOptionL
result <-> Task.now(list.headOption)
}
}
test("firstL works") { implicit s =>
check1 { (list: List[Int]) =>
val obs = Observable.fromIterable(list)
val result = obs.firstL.onErrorHandle(_ => -101)
result <-> Task.now(list.headOption.getOrElse(-101))
}
}
test("headL is equivalent with firstL") { implicit s =>
check1 { (list: List[Int]) =>
val obs = Observable.fromIterable(list)
obs.headL.onErrorHandle(_ => -101) <-> obs.firstL.onErrorHandle(_ => -101)
}
}
test("headOptionL is equivalent with firstOptionL") { implicit s =>
check1 { (list: List[Int]) =>
val obs = Observable.fromIterable(list)
obs.headOptionL <-> obs.firstOptionL
}
}
test("headOrElseL is equivalent with firstOrElseL") { implicit s =>
check1 { (list: List[Int]) =>
val obs = Observable.fromIterable(list)
obs.map(Some.apply).headOrElseL(None) <->
obs.map(Some.apply).firstOrElseL(None)
}
}
test("lastOptionL is equivalent with lastF") { implicit s =>
check1 { (list: List[Int]) =>
val obs = Observable.fromIterable(list)
val result: Future[Try[Option[Int]]] =
obs.lastOptionL.materialize.runToFuture
result <-> first(obs.last)
}
}
test("lastOrElseL is equivalent with lastF") { implicit s =>
check1 { (list: List[Int]) =>
val obs = Observable.fromIterable(list)
val result: Future[Try[Option[Int]]] =
obs.map(Some.apply).lastOrElseL(None)
.materialize.runToFuture
result <-> first(obs.last)
}
}
test("lastL works") { implicit s =>
check1 { (list: List[Int]) =>
val obs = Observable.fromIterable(list)
val result = obs.lastL.onErrorHandle(_ => -101)
result <-> Task.now(list.lastOption.getOrElse(-101))
}
}
test("isEmptyL works") { implicit s =>
check1 { (list: List[Int]) =>
val obs = Observable.fromIterable(list)
val result = obs.isEmptyL
result <-> Task.now(list.isEmpty)
}
}
test("nonEmptyL works") { implicit s =>
check1 { (list: List[Int]) =>
val obs = Observable.fromIterable(list)
val result = obs.nonEmptyL
result <-> Task.now(list.nonEmpty)
}
}
test("maxL works") { implicit s =>
check1 { (list: List[Int]) =>
val obs = Observable.fromIterable(list)
val result = obs.maxL.map(_.getOrElse(-101))
result <-> Task.now(Try(list.max).getOrElse(-101))
}
}
test("maxByL works") { implicit s =>
check1 { (list: List[Int]) =>
val obs = Observable.fromIterable(list)
val result = obs.maxByL(identity).map(_.getOrElse(-101))
result <-> Task.now(Try(list.max).getOrElse(-101))
}
}
test("minL works") { implicit s =>
check1 { (list: List[Int]) =>
val obs = Observable.fromIterable(list)
val result = obs.minL.map(_.getOrElse(-101))
result <-> Task.now(Try(list.min).getOrElse(-101))
}
}
test("minByL works") { implicit s =>
check1 { (list: List[Int]) =>
val obs = Observable.fromIterable(list)
val result = obs.minByL(identity).map(_.getOrElse(-101))
result <-> Task.now(Try(list.min).getOrElse(-101))
}
}
test("sumL works") { implicit s =>
check1 { (list: List[Int]) =>
val obs = Observable.fromIterable(list)
val result = obs.sumL
result <-> Task.now(list.sum)
}
}
test("toListL works") { implicit s =>
check1 { (list: List[Int]) =>
val obs = Observable.fromIterable(list)
val result = obs.toListL
result <-> Task.now(list)
}
}
test("foreachL works") { implicit s =>
check1 { (list: List[Int]) =>
val obs = Observable.fromIterable(list)
val sumRef = Atomic(0)
val result: Future[Int] = obs.foreachL(sumRef.increment).runToFuture.map(_ => sumRef.get)
result <-> Future.successful(list.sum)
}
}
}
| Wogan/monix | monix-reactive/shared/src/test/scala/monix/reactive/internal/operators/ObservableOpsReturningTaskSuite.scala | Scala | apache-2.0 | 8,706 |
package com.twitter.finatra.tests.utils
import com.twitter.finagle.http.{Response, Status}
import com.twitter.finatra.conversions.time._
import com.twitter.finatra.utils.{RetryPolicyUtils, RetryUtils}
import com.twitter.inject.Test
import com.twitter.util.{Await, Future}
class RetryUtilsTest extends Test {
val nonFatalExponentialPolicy = RetryPolicyUtils.exponentialRetry(
start = 10.millis,
multiplier = 2,
numRetries = 4,
shouldRetry = RetryPolicyUtils.NonFatalExceptions)
val constantHttpSuccessPolicy = RetryPolicyUtils.constantRetry(
start = 10.millis,
numRetries = 4,
shouldRetry = RetryPolicyUtils.Http4xxOr5xxResponses)
"retry with futures succeeds" in {
var numRuns = 0
val result = RetryUtils.retryFuture(nonFatalExponentialPolicy) {
numRuns += 1
if (numRuns == 3)
Future(26)
else
throw new RuntimeException("fake failure")
}
Await.result(result) should be(26)
}
"retry with futures fails" in {
val result = RetryUtils.retryFuture(nonFatalExponentialPolicy) {
throw new IllegalArgumentException("foo")
}
intercept[IllegalArgumentException] {
Await.result(result)
}
}
"HTTP retry with futures succeeds" in {
var numRuns = 0
val result = RetryUtils.retryFuture(constantHttpSuccessPolicy) {
numRuns += 1
if (numRuns == 1)
Future(Response(Status.InternalServerError))
else if (numRuns == 2)
Future(Response(Status.NotFound))
else if (numRuns == 3)
Future(Response(Status.Ok))
else
fail("shouldn't get here")
}
Await.result(result).status should be(Status.Ok)
}
"HTTP retry with futures fails" in {
val result = RetryUtils.retryFuture(constantHttpSuccessPolicy) {
Future(Response(Status.NotFound))
}
Await.result(result).status should be(Status.NotFound)
}
}
| tempbottle/finatra | utils/src/test/scala/com/twitter/finatra/tests/utils/RetryUtilsTest.scala | Scala | apache-2.0 | 1,902 |
package com.github.tro2102
import org.scalatest.{Matchers, FlatSpec}
import org.scalatest.mock.MockitoSugar
/**
* Created on 8/28/14.
* @author Taylor Owens
*/
trait BaseSpec extends FlatSpec with MockitoSugar with Matchers
| tro2102/scala-euler | src/test/scala/com/github/tro2102/BaseSpec.scala | Scala | apache-2.0 | 229 |
import collection.immutable._
import org.scalacheck._
import Prop._
import Gen._
import Arbitrary._
import util._
object TreeSetTest extends Properties("TreeSet") {
def genTreeSet[A: Arbitrary: Ordering]: Gen[TreeSet[A]] =
for {
elements <- listOf(arbitrary[A])
} yield TreeSet(elements: _*)
implicit def arbTreeSet[A : Arbitrary : Ordering]: Arbitrary[TreeSet[A]] = Arbitrary(genTreeSet)
property("foreach/iterator consistency") = forAll { (subject: TreeSet[Int]) =>
val it = subject.iterator
var consistent = true
subject.foreach { element =>
consistent &&= it.hasNext && element == it.next()
}
consistent
}
property("worst-case tree height is iterable") = forAll(choose(0, 10), arbitrary[Boolean]) { (n: Int, even: Boolean) =>
/*
* According to "Ralf Hinze. Constructing red-black trees" [https://www.cs.ox.ac.uk/ralf.hinze/publications/#P5]
* you can construct a skinny tree of height 2n by inserting the elements [1 .. 2^(n+1) - 2] and a tree of height
* 2n+1 by inserting the elements [1 .. 3 * 2^n - 2], both in reverse order.
*
* Since we allocate a fixed size buffer in the iterator (based on the tree size) we need to ensure
* it is big enough for these worst-case trees.
*/
val highest = if (even) (1 << (n+1)) - 2 else 3*(1 << n) - 2
val values = (1 to highest).reverse
val subject = TreeSet(values: _*)
val it = subject.iterator
try { while (it.hasNext) it.next(); true } catch { case _: Throwable => false }
}
property("sorted") = forAll { (subject: TreeSet[Int]) => (subject.size >= 3) ==> {
subject.zip(subject.tail).forall { case (x, y) => x < y }
}}
property("contains all") = forAll { (elements: List[Int]) =>
val subject = TreeSet(elements: _*)
elements.forall(subject.contains)
}
property("size") = forAll { (elements: List[Int]) =>
val subject = TreeSet(elements: _*)
elements.distinct.size == subject.size
}
property("toSeq") = forAll { (elements: List[Int]) =>
val subject = TreeSet(elements: _*)
elements.distinct.sorted == subject.toSeq
}
property("head") = forAll { (elements: List[Int]) => elements.nonEmpty ==> {
val subject = TreeSet(elements: _*)
elements.min == subject.head
}}
property("last") = forAll { (elements: List[Int]) => elements.nonEmpty ==> {
val subject = TreeSet(elements: _*)
elements.max == subject.last
}}
property("minAfter") = forAll { (elements: List[Int]) => elements.nonEmpty ==> {
val half = elements.take(elements.size / 2)
val subject = TreeSet(half: _*)
elements.forall{e => {
val temp = subject.rangeFrom(e)
if (temp.isEmpty) subject.minAfter(e).isEmpty
else subject.minAfter(e).get == temp.min
}}
}}
property("maxBefore") = forAll { (elements: List[Int]) => elements.nonEmpty ==> {
val half = elements.take(elements.size / 2)
val subject = TreeSet(half: _*)
elements.forall{e => {
val temp = subject.rangeFrom(e)
if (temp.isEmpty) subject.minAfter(e).isEmpty
else subject.minAfter(e).get == temp.min
}}
}}
property("head/tail identity") = forAll { (subject: TreeSet[Int]) => subject.nonEmpty ==> {
subject == (subject.tail + subject.head)
}}
property("init/last identity") = forAll { (subject: TreeSet[Int]) => subject.nonEmpty ==> {
subject == (subject.init + subject.last)
}}
property("take") = forAll { (subject: TreeSet[Int]) =>
val n = choose(0, subject.size).sample.get
n == subject.take(n).size && subject.take(n).forall(subject.contains)
}
property("drop") = forAll { (subject: TreeSet[Int]) =>
val n = choose(0, subject.size).sample.get
(subject.size - n) == subject.drop(n).size && subject.drop(n).forall(subject.contains)
}
property("take/drop identity") = forAll { (subject: TreeSet[Int]) =>
val n = choose(-1, subject.size + 1).sample.get
subject == subject.take(n) ++ subject.drop(n)
}
property("splitAt") = forAll { (subject: TreeSet[Int]) =>
val n = choose(-1, subject.size + 1).sample.get
val (prefix, suffix) = subject.splitAt(n)
prefix == subject.take(n) && suffix == subject.drop(n)
}
def genSliceParms = for {
tree <- genTreeSet[Int]
from <- choose(0, tree.size)
until <- choose(from, tree.size)
} yield (tree, from, until)
property("slice") = forAll(genSliceParms) { case (subject, from, until) =>
val slice = subject.slice(from, until)
slice.size == until - from && subject.toSeq == subject.take(from).toSeq ++ slice ++ subject.drop(until)
}
property("takeWhile") = forAll { (subject: TreeSet[Int]) =>
val result = subject.takeWhile(_ < 0)
result.forall(_ < 0) && result == subject.take(result.size)
}
property("dropWhile") = forAll { (subject: TreeSet[Int]) =>
val result = subject.dropWhile(_ < 0)
result.forall(_ >= 0) && result == subject.takeRight(result.size)
}
property("span identity") = forAll { (subject: TreeSet[Int]) =>
val (prefix, suffix) = subject.span(_ < 0)
prefix.forall(_ < 0) && suffix.forall(_ >= 0) && subject == prefix ++ suffix
}
property("from is inclusive") = forAll { (subject: TreeSet[Int]) => subject.nonEmpty ==> {
val n = choose(0, subject.size - 1).sample.get
val from = subject.drop(n).firstKey
subject.rangeFrom(from).firstKey == from && subject.rangeFrom(from).forall(_ >= from)
}}
property("to is inclusive") = forAll { (subject: TreeSet[Int]) => subject.nonEmpty ==> {
val n = choose(0, subject.size - 1).sample.get
val to = subject.drop(n).firstKey
subject.rangeTo(to).lastKey == to && subject.rangeTo(to).forall(_ <= to)
}}
property("until is exclusive") = forAll { (subject: TreeSet[Int]) => subject.size > 1 ==> {
val n = choose(1, subject.size - 1).sample.get
val until = subject.drop(n).firstKey
subject.rangeUntil(until).lastKey == subject.take(n).lastKey && subject.rangeUntil(until).forall(_ <= until)
}}
property("remove single") = forAll { (subject: TreeSet[Int]) => subject.nonEmpty ==> {
val element = oneOf(subject.toSeq).sample.get
val removed = subject - element
subject.contains(element) && !removed.contains(element) && subject.size - 1 == removed.size
}}
property("remove all") = forAll { (subject: TreeSet[Int]) =>
val result = subject.foldLeft(subject)((acc, elt) => acc - elt)
result.isEmpty
}
property("ordering must not be null") =
throws(classOf[NullPointerException])(TreeSet.empty[Int](null))
}
| lrytz/scala | test/scalacheck/treeset.scala | Scala | apache-2.0 | 6,539 |
/*
* Copyright (c) 2014 Dufresne Management Consulting LLC.
*/
package com.nickelsoftware.bettercare4me.models;
import org.joda.time.LocalDate
import org.scalatestplus.play.OneAppPerSuite
import org.scalatestplus.play.PlaySpec
class PatientScorecardResultTestSpec extends PlaySpec {
"The CriteriaResultDetail class" must {
"serialize to a csv string" in {
val c1 = CriteriaResultDetail("claimId", "providerLName", "providerFName", LocalDate.parse("1962-07-27").toDateTimeAtStartOfDay(), "reason")
c1.toCSVString mustBe "claimId,providerLName,providerFName,1962-07-27T00:00:00.000-04:00,reason\r\n"
val c2 = CriteriaResultDetail("claimId", "provider, LName", "providerFName", LocalDate.parse("1962-07-27").toDateTimeAtStartOfDay(), "reason")
c2.toCSVString mustBe "claimId,\"provider, LName\",providerFName,1962-07-27T00:00:00.000-04:00,reason\r\n"
}
"de-serialize from a csv string" in {
val c1 = CriteriaResultDetail("claimId", "providerLName", "providerFName", LocalDate.parse("1962-07-27").toDateTimeAtStartOfDay(), "reason")
CriteriaResultDetail("claimId,providerLName,providerFName,1962-07-27T00:00:00.000-04:00,reason\r\n") mustEqual c1
val c2 = CriteriaResultDetail("claimId", "provider, LName", "providerFName", LocalDate.parse("1962-07-27").toDateTimeAtStartOfDay(), "reason")
CriteriaResultDetail("claimId,\"provider, LName\",providerFName,1962-07-27T00:00:00.000-04:00,reason\r\n") mustEqual c2
}
}
}
| reactivecore01/bettercare4.me | play/test/com/nickelsoftware/bettercare4me/models/PatientScorecardResultTestSpec.scala | Scala | apache-2.0 | 1,515 |
/*
* Copyright 2015 Stephan Rehfeld
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package test.scaladelray.material
import org.scalatest.FunSpec
import scaladelray.material.ReflectiveMaterial
import scaladelray.{Color, World}
import scaladelray.math.{Normal3, Vector3, Point3, Ray}
import test.scaladelray.geometry.GeometryTestAdapter
import scaladelray.texture.{SingleColorTexture, TexCoord2D}
import scaladelray.geometry.Hit
import scaladelray.light.PointLight
class ReflectiveMaterialSpec extends FunSpec {
describe( "A ReflectiveMaterial" ) {
it( "should retrieve to color from the texture, using texture coordinate in the hit" ) {
val t1 = new TextureTestAdapter()
val t2 = new TextureTestAdapter()
val t3 = new TextureTestAdapter()
val m = ReflectiveMaterial( t1, t2, 1, t3 )
val w = World( Color( 0, 0, 0 ), Set() )
val r = Ray( Point3(0,0,0), Vector3( 0, 0, -1 ) )
val g = new GeometryTestAdapter( m )
val tc = TexCoord2D( 1.0, 1.0 )
val h = new Hit( r, g, 1, Normal3( 0, 1, 0 ), tc )
m.colorFor( h, w, (_,_) => Color( 0, 0, 0 ) )
assert( t1.coordinates.isDefined )
assert( t1.coordinates.get == tc )
assert( t2.coordinates.isDefined )
assert( t2.coordinates.get == tc )
assert( t3.coordinates.isDefined )
assert( t3.coordinates.get == tc )
}
it( "should call the tracer with the reflected ray" ) {
val t1 = new TextureTestAdapter()
val t2 = new TextureTestAdapter()
val t3 = new TextureTestAdapter()
val m = ReflectiveMaterial( t1, t2, 1, t3 )
val w = World( Color( 0, 0, 0 ), Set() )
val r = Ray( Point3(0,0,0), Vector3( 0, 0, -1 ) )
val g = new GeometryTestAdapter( m )
val tc = TexCoord2D( 1.0, 1.0 )
val h = new Hit( r, g, 1, Vector3( 0, 1, 1 ).normalized.asNormal, tc )
var called = false
val tracer = ( r: Ray, w : World) => {
assert( r.o == Point3( 0, 0, -1 ) )
assert( r.d =~= Vector3( 0, 1, 0 ) )
called = true
Color( 0, 0, 0 )
}
m.colorFor( h, w, tracer )
assert( called )
}
it( "should call createLight of the light" ) {
val illuminatesData = true :: Nil
val directionFromData = Vector3( 0, 1, 0 ) :: Nil
val intensityData = 1.0 :: Nil
val l1 = new LightTestAdapter( illuminatesData, directionFromData, intensityData )
val l2 = new LightTestAdapter( illuminatesData, directionFromData, intensityData )
val t1 = new TextureTestAdapter()
val t2 = new TextureTestAdapter()
val t3 = new TextureTestAdapter()
val m = ReflectiveMaterial( t1, t2, 1, t3 )
val w = World( Color( 0, 0, 0 ), Set(), Color( 0, 0, 0 ), l1 + l2 )
val r = Ray( Point3(0,0,0), Vector3( 0, 0, -1 ) )
val g = new GeometryTestAdapter( m )
val tc = TexCoord2D( 1.0, 1.0 )
val h = new Hit( r, g, 1, Normal3( 0, 1, 0 ), tc )
m.colorFor( h, w, (_,_) => Color( 0, 0, 0 ) )
assert( l1.createLightCalled )
assert( l2.createLightCalled )
}
it( "should request if a light hits a point" ) {
val illuminatesData = true :: Nil
val directionFromData = Vector3( 0, 1, 0 ) :: Nil
val intensityData = 1.0 :: Nil
val l1 = new LightTestAdapter( illuminatesData, directionFromData, intensityData )
val l2 = new LightTestAdapter( illuminatesData, directionFromData, intensityData )
val t1 = new TextureTestAdapter()
val t2 = new TextureTestAdapter()
val t3 = new TextureTestAdapter()
val m = ReflectiveMaterial( t1, t2, 1, t3 )
val w = World( Color( 0, 0, 0 ), Set(), Color( 0, 0, 0 ), l1 + l2 )
val r = Ray( Point3(0,0,0), Vector3( 0, 0, -1 ) )
val g = new GeometryTestAdapter( m )
val tc = TexCoord2D( 1.0, 1.0 )
val h = new Hit( r, g, 1, Normal3( 0, 1, 0 ), tc )
m.colorFor( h, w, (_,_) => Color( 0, 0, 0 ) )
assert( l1.illuminatesPoint.isDefined )
assert( l1.illuminatesPoint.get == r( h.t ) )
assert( l1.illuminatesWorld.isDefined )
assert( l1.illuminatesWorld.get == w )
assert( l2.illuminatesPoint.isDefined )
assert( l2.illuminatesPoint.get == r( h.t ) )
assert( l2.illuminatesWorld.isDefined )
assert( l2.illuminatesWorld.get == w )
}
it( "should request the direction to the light" ) {
val illuminatesData = true :: Nil
val directionFromData = Vector3( 0, 1, 0 ) :: Nil
val intensityData = 1.0 :: Nil
val l1 = new LightTestAdapter( illuminatesData, directionFromData, intensityData )
val l2 = new LightTestAdapter( illuminatesData, directionFromData, intensityData )
val t1 = new TextureTestAdapter()
val t2 = new TextureTestAdapter()
val t3 = new TextureTestAdapter()
val m = ReflectiveMaterial( t1, t2, 1, t3 )
val w = World( Color( 0, 0, 0 ), Set(), Color( 0, 0, 0 ), l1 + l2 )
val r = Ray( Point3(0,0,0), Vector3( 0, 0, -1 ) )
val g = new GeometryTestAdapter( m )
val tc = TexCoord2D( 1.0, 1.0 )
val h = new Hit( r, g, 1, Normal3( 0, 1, 0 ), tc )
m.colorFor( h, w, (_,_) => Color( 0, 0, 0 ) )
assert( l1.directionPoint.isDefined )
assert( l1.directionPoint.get == r( h.t ) )
assert( l2.directionPoint.isDefined )
assert( l2.directionPoint.get == r( h.t ) )
}
it( "should request the intensity of the light" ) {
val illuminatesData = true :: Nil
val directionFromData = Vector3( 0, 1, 0 ) :: Nil
val intensityData = 1.0 :: Nil
val l1 = new LightTestAdapter( illuminatesData, directionFromData, intensityData )
val l2 = new LightTestAdapter( illuminatesData, directionFromData, intensityData )
val t1 = new TextureTestAdapter()
val t2 = new TextureTestAdapter()
val t3 = new TextureTestAdapter()
val m = ReflectiveMaterial( t1, t2, 1, t3 )
val w = World( Color( 0, 0, 0 ), Set(), Color( 0, 0, 0 ), l1 + l2 )
val r = Ray( Point3(0,0,0), Vector3( 0, 0, -1 ) )
val g = new GeometryTestAdapter( m )
val tc = TexCoord2D( 1.0, 1.0 )
val h = new Hit( r, g, 1, Normal3( 0, 1, 0 ), tc )
m.colorFor( h, w, (_,_) => Color( 0, 0, 0 ) )
assert( l1.intensityPoint.isDefined )
assert( l1.intensityPoint.get == r( h.t ) )
assert( l2.intensityPoint.isDefined )
assert( l2.intensityPoint.get == r( h.t ) )
}
it( "should use the color returned by the texture to calculate to color at the point on the surface" ) {
val l = new PointLight( Color( 1, 1, 1 ), Point3( 0, 0, 0 ) )
val t1 = new SingleColorTexture( Color( 1, 0, 0 ) )
val t2 = new SingleColorTexture( Color( 0, 1, 0 ) )
val t3 = new SingleColorTexture( Color( 0, 0, 1 ) )
val m = ReflectiveMaterial( t1, t2, 1, t3 )
val w = World( Color( 0, 0, 0 ), Set(), Color( 0, 0, 0 ), Set() + l )
val r = Ray( Point3( 0, 0, 0 ), Vector3( 0, 0, -1 ) )
val g = new GeometryTestAdapter( m )
val tc = TexCoord2D( 1.0, 1.0 )
val h = new Hit( r, g, 1, Normal3( 0, 0, 1 ), tc )
assert( m.colorFor( h, w, (_,_) => Color( 1, 1, 1 ) ) == Color( 1, 1, 1 ) )
}
it( "should use the normal of the hit to calculate the color" ) {
val l = new PointLight( Color( 1, 1, 1 ), Point3( 0, 0, 0 ) )
val t1 = new SingleColorTexture( Color( 1, 0, 0 ) )
val t2 = new SingleColorTexture( Color( 0, 1, 0 ) )
val t3 = new SingleColorTexture( Color( 0, 0, 1 ) )
val m = ReflectiveMaterial( t1, t2, 1, t3 )
val w = World( Color( 0, 0, 0 ), Set(), Color( 0, 0, 0 ), Set() + l )
val r = Ray( Point3( 0, 0, 0 ), Vector3( 0, 0, -1 ) )
val g = new GeometryTestAdapter( m )
val tc = TexCoord2D( 1.0, 1.0 )
val h = new Hit( r, g, 1, Vector3( 0, 1, 1 ).normalized.asNormal, tc )
assert( m.colorFor( h, w, (_,_) => Color( 1, 1, 1 ) ) == Color( 1 * Math.cos( Math.PI / 4 ) , Vector3( 0, 0, 1 ).reflectOn( h.n ) dot -r.d , 1 ) )
}
it( "should use the information if the light illuminates the surface to calculate the color" ) {
val illuminatesData = false :: Nil
val directionFromData = Vector3( 0, 0, 1 ) :: Nil
val intensityData = 1.0 :: Nil
val l = new LightTestAdapter( illuminatesData, directionFromData, intensityData )
val t1 = new SingleColorTexture( Color( 1, 0, 0 ) )
val t2 = new SingleColorTexture( Color( 0, 1, 0 ) )
val t3 = new SingleColorTexture( Color( 0, 0, 1 ) )
val m = ReflectiveMaterial( t1, t2, 1, t3 )
val w = World( Color( 0, 0, 0 ), Set(), Color( 0, 0, 0 ), Set() + l )
val r = Ray( Point3(0,0,0), Vector3( 0, 0, -1 ) )
val g = new GeometryTestAdapter( m )
val tc = TexCoord2D( 1.0, 1.0 )
val h = new Hit( r, g, 1, Normal3( 0, 0, 1 ), tc )
assert( m.colorFor( h, w, (_,_) => Color( 1, 1, 1 ) ) == Color( 0, 0, 1 ) )
}
it( "should use the intensity returned by the light to calculate to color" ) {
val illuminatesData = true :: Nil
val directionFromData = Vector3( 0, 0, 1 ) :: Nil
val intensityData = 0.5 :: Nil
val l = new LightTestAdapter( illuminatesData, directionFromData, intensityData )
val t1 = new SingleColorTexture( Color( 1, 0, 0 ) )
val t2 = new SingleColorTexture( Color( 0, 1, 0 ) )
val t3 = new SingleColorTexture( Color( 0, 0, 1 ) )
val m = ReflectiveMaterial( t1, t2, 1, t3 )
val w = World( Color( 0, 0, 0 ), Set(), Color( 0, 0, 0 ), Set() + l )
val r = Ray( Point3(0,0,0), Vector3( 0, 0, -1 ) )
val g = new GeometryTestAdapter( m )
val tc = TexCoord2D( 1.0, 1.0 )
val h = new Hit( r, g, 1, Normal3( 0, 0, 1 ), tc )
assert( m.colorFor( h, w, (_,_) => Color( 1, 1, 1 ) ) == Color( 0.5, 0.5, 1 ) )
}
it( "should use the direction returned by the light to calculate to color" ) {
val illuminatesData = true :: Nil
val directionFromData = Vector3( 0, 1, 1 ).normalized :: Nil
val intensityData = 1.0 :: Nil
val l = new LightTestAdapter( illuminatesData, directionFromData, intensityData )
val t1 = new SingleColorTexture( Color( 1, 0, 0 ) )
val t2 = new SingleColorTexture( Color( 0, 1, 0 ) )
val t3 = new SingleColorTexture( Color( 0, 0, 1 ) )
val m = ReflectiveMaterial( t1, t2, 1, t3 )
val w = World( Color( 0, 0, 0 ), Set(), Color( 0, 0, 0 ), Set() + l )
val r = Ray( Point3(0,0,0), Vector3( 0, 0, -1 ) )
val g = new GeometryTestAdapter( m )
val tc = TexCoord2D( 1.0, 1.0 )
val h = new Hit( r, g, 1, Normal3( 0, 0, 1 ), tc )
assert( m.colorFor( h, w, (_,_) => Color( 1, 1, 1 ) ) =~= Color( 1 * Math.cos( Math.PI / 4 ), 1 * Math.cos( Math.PI / 4 ), 1 ) )
}
}
}
| stephan-rehfeld/scaladelray | src/test/scala/test/scaladelray/material/ReflectiveMaterialSpec.scala | Scala | apache-2.0 | 11,311 |
package com.enkidu.lignum.parsers.ast.expression.types.templates
trait TemplateArgument extends Template
| marek1840/java-parser | src/main/scala/com/enkidu/lignum/parsers/ast/expression/types/templates/TemplateArgument.scala | Scala | mit | 107 |
package com
object Sequence {
def filteringFunction[V](filter: V => Boolean): List[V] => List[V] = {
def include(v: V) =
filter(v)
(l: List[V]) => l.filter(include)
}
}
| yusuke2255/dotty | tests/pos/java-interop/t1711/Seq.scala | Scala | bsd-3-clause | 191 |
package org.scalex
package index
import scala.tools.nsc.CompilerCommand
private[index] final class Command(
arguments: List[String],
settings: Settings) extends CompilerCommand(arguments, settings) {
override def files = super.files ++ {
if (settings.inputDir.isDefault) Nil
else findSources(new File(settings.inputDir.value)) map (_.getAbsolutePath)
}
private def findSources(dir: File): List[File] = dir.listFiles.toList flatMap { file ⇒
if (file.isDirectory) findSources(file)
else (file.getName endsWith ".scala") ?? List(file)
}
override def cmdName = "scalex"
override def usageMsg = (
createUsageMsg("where possible scalex", shouldExplain = false, x ⇒ x.isStandard && settings.isScalexSpecific(x.name)) +
"\\n\\nStandard scalac options also available:" +
createUsageMsg(x ⇒ x.isStandard && !settings.isScalexSpecific(x.name))
)
}
| kzys/scalex | src/main/scala/index/Command.scala | Scala | mit | 897 |
package org.raisercostin.jedi
import java.io.File
import java.io.InputStream
import scala.language.implicitConversions
import scala.language.reflectiveCalls
import scala.util.Try
import org.raisercostin.jedi.impl.ResourceUtil
import sun.net.www.protocol.file.FileURLConnection
import org.raisercostin.jedi.impl.SlfLogger
import org.raisercostin.jedi.impl.Http2
import org.raisercostin.jedi.impl.HttpRequest
import org.raisercostin.jedi.impl.DefaultConnectFunc
import scalaj.http.HttpConstants
import org.raisercostin.jedi.impl.QueryStringUrlFunc
import java.net.HttpURLConnection
import scala.util.Failure
import java.io.IOException
object HttpConfig {
val defaultConfig: HttpConfig = HttpConfig(header = Map(
"User-Agent" -> "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36",
"Accept" -> "*/*"))
}
case class HttpConfig(header: Map[String, String] = Map(), allowedRedirects: Int = 5, connectionTimeout: Int = 10000, readTimeout: Int = 15000, useScalaJHttp: Boolean = true) {
def followRedirects = allowedRedirects > 0
def configureConnection(conn: HttpURLConnection): Unit = {
header.foreach(element => conn.setRequestProperty(element._1, element._2))
conn.setInstanceFollowRedirects(followRedirects)
conn.setConnectTimeout(connectionTimeout)
conn.setReadTimeout(readTimeout)
}
/**Usually needed if a 403 is returned.*/
def withBrowserHeader: HttpConfig = HttpConfig(header = this.header + (
"User-Agent" -> "curl/7.51.0",
"Accept" -> "*/*"))
//Other useful settings:
//"Accept" -> "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8"
//"Connection" -> "keep-alive"
//User-Agent:Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36
//val cookies = "osCsid=d9bbb1602e315dadfe4a5b6e07832053; MIsid=18d1653b970413564d19469c06c8ebad"
//Set-Cookie: osCsid=d9bbb1602e315dadfe4a5b6e07832053; path=/;
//val userAgent = "User-Agent" -> "Wget/1.9"
//MIsid 20b3258abfd25dfda1d9a2a04088f577
//Http.configure(_ setFollowRedirects true)(q OK as.String)
//"Connection" -> "Keep-Alive", "Cookie" -> cookies)
def withJavaImpl: HttpConfig = this.copy(useScalaJHttp = false)
def withAgent(newAgent: String) = this.copy(header = header + ("User-Agent" -> newAgent))
def withoutAgent = this.copy(header = header - "User-Agent")
}
object UrlLocation extends SlfLogger
/**
* See here for good behaviour: https://www.scrapehero.com/how-to-prevent-getting-blacklisted-while-scraping/
*/
case class UrlLocation(url: java.net.URL, redirects: Seq[UrlLocation] = Seq(), config: HttpConfig = HttpConfig.defaultConfig) extends InputLocation with IsFile{ self =>
// override type MetaRepr = MemoryLocation
def exists = ???
def raw = url.toExternalForm()
//TODO dump intermediate requests/responses
override def toUrl: java.net.URL = url
override def nameAndBefore: String = url.getPath
def toFile: File = ???
import java.net._
override def size: Long = lengthTry.get
//TODO sending the current etag as well and wait for 302 not modified? This will save one more connection. Maybe this should be managed in a CachedUrlLocation?
def etagFromHttpRequestHeader: Option[String] = headConnection { conn => conn.getHeaderField("ETag").stripPrefix("\\"").stripSuffix("\\"") }.toOption
def headConnection[T](openedHeadConnection: URLConnection => T): Try[T] = ResourceUtil.cleanly(url.openConnection()) {
case c: HttpURLConnection =>
c.disconnect()
case f: FileURLConnection =>
f.close()
} {
case conn: HttpURLConnection =>
config.configureConnection(conn)
conn.setRequestMethod("HEAD")
openedHeadConnection(conn)
case conn: FileURLConnection =>
openedHeadConnection(conn)
}
def metaLocation:Try[NavigableInOutLocation/*MetaRepr*/] = {
val out = Locations.memory("")
HierarchicalMultimap.save(meta.get,out).map(_=>out)
}
/**
* InputLocations should have metadata. Worst case scenario in a separate file or other files in the filesystem.
* See .svn, .csv, .git, dos navigator, .info files, nio meta/user attributes etc.
*/
override def meta: Try[HttpHMap] = headConnection { conn =>
conn match {
case conn: HttpURLConnection =>
if (conn.getResponseCode != 200)
throw new RuntimeException("A redirect is needed. Cannot compute size!")
import scala.collection.JavaConverters._
HttpHMap(conn.getRequestProperties.asScala.toMap.mapValues(_.asScala), conn.getHeaderFields.asScala.toMap.mapValues(_.asScala))
case conn: FileURLConnection =>
HttpHMap(Map(), Map())
}
}
def lengthTry: Try[Long] = headConnection { conn =>
conn match {
case conn: HttpURLConnection =>
if (conn.getResponseCode != 200)
throw new RuntimeException("A redirect is needed. Cannot compute size!")
val len = conn.getContentLengthLong()
if (len < 0) throw new RuntimeException("Invalid length " + len + " received!")
len
case conn: FileURLConnection =>
//conn.getInputStream
val len = conn.getContentLengthLong()
if (len < 0) throw new RuntimeException("Invalid length " + len + " received!")
len
}
}
override def unsafeToInputStream: InputStream =
if (config.useScalaJHttp)
unsafeToInputStreamUsingScalaJHttp
else
unsafeToInputStreamUsingJava
private def createRequester(url: String) = {
import scalaj.http.Http
import scalaj.http.HttpOptions
HttpRequest(
url = raw,
method = "GET",
connectFunc = {
case (req: HttpRequest, conn: HttpURLConnection) =>
import scala.collection.JavaConverters._
UrlLocation.logger.debug(s"RequestHeaders for $raw:\\n " + conn.getRequestProperties.asScala.mkString("\\n "))
DefaultConnectFunc.apply(req, conn)
UrlLocation.logger.debug(s"ResponseHeaders for $raw:\\n " + conn.getHeaderFields.asScala.mkString("\\n "))
},
params = Nil,
headers = config.header.toSeq, //agent.map(ag=>Seq("User-Agent" -> ag)).getOrElse(Seq()),//"scalaj-http/1.0"),
options = HttpConstants.defaultOptions,
proxyConfig = None,
charset = HttpConstants.utf8,
sendBufferSize = 4096,
urlBuilder = QueryStringUrlFunc,
compress = true)
.option(_ setInstanceFollowRedirects config.followRedirects)
.option(HttpOptions.connTimeout(config.connectionTimeout))
.option(HttpOptions.readTimeout(config.readTimeout))
.headers(config.header)
}
def unsafeToInputStreamUsingScalaJHttp: InputStream = {
createRequester(raw).withUnclosedConnection.exec {
case (code, map, stream) =>
handleCode(code, map.getOrElse("Location", Seq()).headOption.getOrElse(null), stream, Try { map })
stream
}.body
}
//protected override
def unsafeToInputStreamUsingJava: InputStream = {
url.openConnection() match {
case conn: HttpURLConnection =>
config.configureConnection(conn)
import scala.collection.JavaConverters._
UrlLocation.logger.info("header:\\n" + config.header.mkString("\\n "))
UrlLocation.logger.info(s"RequestHeaders for $raw:\\n " + conn.getRequestProperties.asScala.mkString("\\n "))
//if (UrlLocation.log.isDebugEnabled())
UrlLocation.logger.info(s"ResponseHeaders for $raw:\\n " + Try { conn.getHeaderFields.asScala.mkString("\\n ") })
handleCode(conn.getResponseCode, conn.getHeaderField("Location"), { conn.getInputStream }, Try { conn.getHeaderFields.asScala.toMap })
case conn =>
conn.getInputStream
}
}
def handleCode(code: Int, location: String, stream: => InputStream, map: => Try[Map[String, _]]): InputStream =
(code, location) match {
case (200, _) =>
stream
case (code, location) if config.allowedRedirects > redirects.size && location != null && location.nonEmpty && location != raw =>
//This is manual redirection. The connection should already do all the redirects if config.allowedRedirects is true
closeStream(stream)
UrlLocation(new java.net.URL(location), this +: redirects, config).unsafeToInputStream
case (code, _) =>
closeStream(stream)
throw new HttpStatusException(s"Got $code response from $this. A 200 code is needed to get an InputStream. The header is\\n " + map.getOrElse(Map()).mkString("\\n ")
+ " After " + redirects.size + " redirects:\\n " + redirects.mkString("\\n "), code, this)
}
/**
* Shouldn't disconnect as it "Indicates that other requests to the server are unlikely in the near future."
* We should just close() on the input/output/error streams
* http://stackoverflow.com/questions/15834350/httpurlconnection-closing-io-streams
*/
def closeStream(stream: => InputStream) = Try {
if (stream != null)
stream.close
}.recover { case e => UrlLocation.logger.debug("Couldn't close input/error stream to " + this, e) }
def withSensibleAgent = withAgent("User-Agent:Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36")
def withAgent(newAgent: String) = this.copy(config = config.withAgent(newAgent))
def withoutAgent = this.copy(config = config.withoutAgent)
def withBrowserHeader = this.copy(config = config.withBrowserHeader)
def withoutRedirect = this.copy(config = config.copy(allowedRedirects = 0))
def resolved: ResolvedUrlLocation = ResolvedUrlLocation(this)
def withJavaImpl = this.copy(config = config.withJavaImpl)
override def etag: String = etagFromHttpRequestHeader.getOrElse("")
}
//TODO add a resolved state where you can interrogate things like All redirects headers, status code and others.
case class ResolvedUrlLocation(location: UrlLocation) {
}
case class HttpStatusException(message: String, code: Int, url: UrlLocation) extends IOException(message)
| raisercostin/jedi-io | src/main/scala/org/raisercostin/jedi/UrlLocation.scala | Scala | apache-2.0 | 10,036 |
package com.github.mrpowers.spark.daria.sql
import utest._
import org.apache.spark.sql.functions._
import ColumnExt._
import com.github.mrpowers.spark.fast.tests.{ColumnComparer, DataFrameComparer}
import SparkSessionExt._
import org.apache.spark.sql.types.{BooleanType, IntegerType, StringType}
object ColumnExtTest extends TestSuite with DataFrameComparer with ColumnComparer with SparkSessionTestWrapper {
val tests = Tests {
'chain - {
"chains sql functions" - {
val df = spark
.createDF(
List(
("Batman ", "batman"),
(" CATWOMAN", "catwoman"),
(" pikachu ", "pikachu")
),
List(
("word", StringType, true),
("expected", StringType, true)
)
)
.withColumn(
"cleaned_word",
col("word").chain(lower).chain(trim)
)
assertColumnEquality(
df,
"expected",
"cleaned_word"
)
}
"chains SQL functions with updated method signatures" - {
val df = spark
.createDF(
List(
("hi ", "hixxx"),
(" ok", "okxxx")
),
List(
("word", StringType, true),
("expected", StringType, true)
)
)
.withColumn(
"diff_word",
col("word")
.chain(trim)
.chain(
rpad(
_,
5,
"x"
)
)
)
assertColumnEquality(
df,
"expected",
"diff_word"
)
}
}
'chainUDF - {
"allows user defined functions to be chained" - {
def appendZ(s: String): String = {
s"${s}Z"
}
spark.udf.register(
"appendZUdf",
appendZ _
)
def prependA(s: String): String = {
s"A${s}"
}
spark.udf.register(
"prependAUdf",
prependA _
)
val df = spark
.createDF(
List(
("dance", "AdanceZ"),
("sing", "AsingZ")
),
List(
("word", StringType, true),
("expected", StringType, true)
)
)
.withColumn(
"fun",
col("word").chainUDF("appendZUdf").chainUDF("prependAUdf")
)
assertColumnEquality(
df,
"expected",
"fun"
)
}
"also works with udfs that take arguments" - {
def appendZ(s: String): String = {
s"${s}Z"
}
spark.udf.register(
"appendZUdf",
appendZ _
)
def appendWord(s: String, word: String): String = {
s"${s}${word}"
}
spark.udf.register(
"appendWordUdf",
appendWord _
)
val df = spark
.createDF(
List(
("dance", "danceZcool"),
("sing", "singZcool")
),
List(
("word", StringType, true),
("expected", StringType, true)
)
)
.withColumn(
"fun",
col("word")
.chainUDF("appendZUdf")
.chainUDF(
"appendWordUdf",
lit("cool")
)
)
assertColumnEquality(
df,
"expected",
"fun"
)
}
}
'chainANDchainUDF - {
"works with both chain and chainUDF" - {
def appendZ(s: String): String = {
s"${s}Z"
}
spark.udf.register(
"appendZUdf",
appendZ _
)
val df = spark
.createDF(
List(
("Batman ", "batmanZ"),
(" CATWOMAN", "catwomanZ"),
(" pikachu ", "pikachuZ")
),
List(
("word", StringType, true),
("expected", StringType, true)
)
)
.withColumn(
"cleaned_word",
col("word").chain(lower).chain(trim).chainUDF("appendZUdf")
)
assertColumnEquality(
df,
"expected",
"cleaned_word"
)
}
}
'nullBetween - {
"does a between operation factoring in null values" - {
val df = spark
.createDF(
List(
(17, null, 94, true),
(17, null, 10, false),
(null, 10, 5, true),
(null, 10, 88, false),
(10, 15, 11, true),
(null, null, 11, false),
(3, 5, null, false),
(null, null, null, false)
),
List(
("lower_age", IntegerType, true),
("upper_age", IntegerType, true),
("age", IntegerType, true),
("expected", BooleanType, true)
)
)
.withColumn(
"is_between",
col("age").nullBetween(
col("lower_age"),
col("upper_age")
)
)
assertColumnEquality(
df,
"expected",
"is_between"
)
}
"works with joins" - {
val playersDF = spark.createDF(
List(
("lil", "tball", 5),
("strawberry", "mets", 42),
("maddux", "braves", 45),
("frank", "noteam", null)
),
List(
("last_name", StringType, true),
("team", StringType, true),
("age", IntegerType, true)
)
)
val rangesDF = spark.createDF(
List(
(null, 20, "too_young"),
(21, 40, "prime"),
(41, null, "retired")
),
List(
("lower_age", IntegerType, true),
("upper_age", IntegerType, true),
("playing_status", StringType, true)
)
)
val actualDF = playersDF
.join(
broadcast(rangesDF),
playersDF("age").nullBetween(
rangesDF("lower_age"),
rangesDF("upper_age")
),
"leftouter"
)
.drop(
"lower_age",
"upper_age"
)
val expectedDF = spark.createDF(
List(
("lil", "tball", 5, "too_young"),
("strawberry", "mets", 42, "retired"),
("maddux", "braves", 45, "retired"),
("frank", "noteam", null, null)
),
List(
("last_name", StringType, true),
("team", StringType, true),
("age", IntegerType, true),
("playing_status", StringType, true)
)
)
assertSmallDataFrameEquality(
actualDF,
expectedDF
)
}
"operates differently than the built-in between method" - {
val sourceDF =
spark.createDF(
List(
(10, 15, 11),
(17, null, 94),
(null, 10, 5)
),
List(
("lower_bound", IntegerType, true),
("upper_bound", IntegerType, true),
("age", IntegerType, true)
)
)
val actualDF = sourceDF
.withColumn(
"between",
col("age").between(
col("lower_bound"),
col("upper_bound")
)
)
.withColumn(
"nullBetween",
col("age").nullBetween(
col("lower_bound"),
col("upper_bound")
)
)
val expectedDF = spark.createDF(
List(
(10, 15, 11, true, true),
(17, null, 94, null, true),
(null, 10, 5, null, true)
),
List(
("lower_bound", IntegerType, true),
("upper_bound", IntegerType, true),
("age", IntegerType, true),
("between", BooleanType, true),
("nullBetween", BooleanType, true)
)
)
assertSmallDataFrameEquality(
actualDF,
expectedDF
)
}
}
'isTrue - {
"returns true when the column is true" - {
val df = spark
.createDF(
List(
(true, true),
(false, false),
(null, false)
),
List(
("is_fun", BooleanType, true),
("expected_is_fun_true", BooleanType, true)
)
)
.withColumn(
"is_fun_true",
col("is_fun").isTrue
)
assertColumnEquality(
df,
"expected_is_fun_true",
"is_fun_true"
)
}
}
'isFalse - {
"returns true when the column is false" - {
val df = spark
.createDF(
List(
(true, false),
(false, true),
(null, false)
),
List(
("is_fun", BooleanType, true),
("expected_is_fun_false", BooleanType, true)
)
)
.withColumn(
"is_fun_false",
col("is_fun").isFalse
)
assertColumnEquality(
df,
"expected_is_fun_false",
"is_fun_false"
)
}
}
'isTruthy - {
"returns true when the column is truthy and false otherwise" - {
val df = spark
.createDF(
List(
(true, true),
(false, false),
(null, false)
),
List(
("is_fun", BooleanType, true),
("expected", BooleanType, false)
)
)
.withColumn(
"is_fun_truthy",
col("is_fun").isTruthy
)
assertColumnEquality(
df,
"expected",
"is_fun_truthy"
)
}
"computes a truthy value for string columns" - {
val df = spark
.createDF(
List(
("dog", true),
("cat", true),
(null, false)
),
List(
("animal_type", StringType, true),
("expected", BooleanType, false)
)
)
.withColumn(
"animal_type_truthy",
col("animal_type").isTruthy
)
assertColumnEquality(
df,
"expected",
"animal_type_truthy"
)
}
}
'isFalsy - {
"returns true when the column is falsy and false otherwise" - {
val df = spark
.createDF(
List(
(true, false),
(false, true),
(null, true)
),
List(
("is_fun", BooleanType, true),
("expected", BooleanType, false)
)
)
.withColumn(
"is_fun_falsy",
col("is_fun").isFalsy
)
assertColumnEquality(
df,
"expected",
"is_fun_falsy"
)
}
"computes a falsy value for string columns" - {
val df = spark
.createDF(
List(
("dog", false),
("cat", false),
(null, true)
),
List(
("animal_type", StringType, true),
("expected", BooleanType, false)
)
)
.withColumn(
"animal_type_falsy",
col("animal_type").isFalsy
)
assertColumnEquality(
df,
"expected",
"animal_type_falsy"
)
}
}
'isNullOrBlank - {
"returns true if a column is null or blank and false otherwise" - {
val df = spark
.createDF(
List(
("dog", false),
(null, true),
("", true),
(" ", true)
),
List(
("animal_type", StringType, true),
("expected", BooleanType, true)
)
)
.withColumn(
"animal_type_is_null_or_blank",
col("animal_type").isNullOrBlank
)
assertColumnEquality(
df,
"expected",
"animal_type_is_null_or_blank"
)
}
}
'isNotNullOrBlank - {
"returns true if a column is not null or blank and false otherwise" - {
val df = spark
.createDF(
List(
("notnullhere", true),
(null, false),
("", false),
(" ", false)
),
List(
("testColumn", StringType, true),
("expected", BooleanType, true)
)
)
.withColumn(
"testColumn_is_not_null_or_blank",
col("testColumn").isNotNullOrBlank
)
assertColumnEquality(
df,
"expected",
"testColumn_is_not_null_or_blank"
)
}
}
'isNotIn - {
"returns true if the column element is not in the list" - {
val footwearRelated = Seq(
"laces",
"shoes"
)
val df = spark
.createDF(
List(
("dog", true),
("shoes", false),
("laces", false),
(null, null)
),
List(
("stuff", StringType, true),
("expected", BooleanType, true)
)
)
.withColumn(
"is_not_footwear_related",
col("stuff").isNotIn(footwearRelated: _*)
)
assertColumnEquality(
df,
"expected",
"is_not_footwear_related"
)
}
}
'evalString - {
"lowercases a string" - {
assert(lower(lit("HI THERE")).evalString() == "hi there")
}
}
}
}
| MrPowers/spark-daria | src/test/scala/com/github/mrpowers/spark/daria/sql/ColumnExtTest.scala | Scala | mit | 14,335 |
package vegas.render
import vegas.macros.ShowRenderMacros
import scala.language.experimental.macros
import vegas.DSL.SpecBuilder
trait ShowRender extends (SpecBuilder => Unit)
object ShowRender {
def using(f: SpecBuilder => Unit): ShowRender = new ShowRender {
def apply(sb: SpecBuilder) = f(sb)
}
implicit def default: ShowRender = macro ShowRenderMacros.materializeDefault
}
case class ShowHTML(output: String => Unit) extends ShowRender {
def apply(sb: SpecBuilder): Unit = output(StaticHTMLRenderer(sb.toJson).frameHTML())
}
| vegas-viz/Vegas | core/src/main/scala/vegas/render/ShowRender.scala | Scala | mit | 547 |
package zillion
import org.scalatest._
import prop._
import org.scalacheck.{Arbitrary, Gen}
import Arbitrary.arbitrary
import spire.math.SafeLong
case class Exponent(value: Int) {
require(0 <= value && value <= 3003)
}
object Exponent {
implicit val arbitraryExponent: Arbitrary[Exponent] = {
//val upper = 3003 // scala.js currently blows up with this
val upper = 300
Arbitrary(Gen.choose(0, upper).map(Exponent(_)))
}
}
class CardinalTest extends GenericTest {
def render(n: SafeLong): String = cardinal(n)
}
class OrdinalTest extends GenericTest {
def render(n: SafeLong): String = ordinal(n)
}
trait GenericTest extends PropSpec with Matchers with PropertyChecks {
implicit lazy val arbitrarySafeLong: Arbitrary[SafeLong] =
Arbitrary(arbitrary[BigInt].map(SafeLong(_)))
def render(n: SafeLong): String
def checkModK(n: SafeLong, k: Int) {
val m = n.abs % k
if (m != 0) render(n).endsWith(render(m)) shouldBe true
}
property("big numbers don't crash") {
forAll { (k: Exponent, offset: SafeLong, b: Boolean) =>
val big = SafeLong(10).pow(k.value)
render(big - offset)
render(offset - big)
}
}
property("negative numbers are consistent") {
forAll { (n: SafeLong) =>
val (x, y) = if (n < 0) (-n, n) else (n, -n)
val sx = render(x)
val sy = render(y)
if (sx == sy) n shouldBe 0
else {
sy.startsWith("negative ") shouldBe true
sy.substring(9) shouldBe sx
}
}
}
property("last digit") {
forAll { (n: SafeLong) =>
val m = n.abs % 100
if (m < 10 || m > 20) checkModK(n, 10)
}
}
property("last two digits") {
forAll { (n: SafeLong) => checkModK(n, 100) }
}
property("last three digits") {
forAll { (n: SafeLong) => checkModK(n, 1000) }
}
property("Int and SafeLong match") {
forAll { (n: Int) => render(n) shouldBe render(SafeLong(n)) }
}
}
| non/zillion | src/test/scala/zillion/test.scala | Scala | mit | 1,934 |
package algebra
package lattice
import scala.{specialized => sp}
/**
* A lattice is a set `A` together with two operations (meet and
* join). Both operations individually constitute semilattices (join-
* and meet-semilattices respectively): each operation is commutative,
* associative, and idempotent.
*
* Join can be thought of as finding a least upper bound (supremum),
* and meet can be thought of as finding a greatest lower bound
* (infimum).
*
* The join and meet operations are also linked by absorption laws:
*
* meet(a, join(a, b)) = join(a, meet(a, b)) = a
*/
trait Lattice[@sp(Int, Long, Float, Double) A] extends Any with JoinSemilattice[A] with MeetSemilattice[A] { self =>
/**
* This is the lattice with meet and join swapped
*/
def dual: Lattice[A] = new Lattice[A] {
def meet(a: A, b: A) = self.join(a, b)
def join(a: A, b: A) = self.meet(a, b)
override def dual = self
}
}
object Lattice extends JoinSemilatticeFunctions[Lattice] with MeetSemilatticeFunctions[Lattice] {
/**
* Access an implicit `Lattice[A]`.
*/
@inline final def apply[@sp(Int, Long, Float, Double) A](implicit ev: Lattice[A]): Lattice[A] = ev
}
| sritchie/algebra | core/src/main/scala/algebra/lattice/Lattice.scala | Scala | mit | 1,184 |
object Test {
def main(args: Array[String]): Unit = {
println("CaseNums")
test(CaseNums)
println()
println("IntNums")
test(IntNums)
}
def test(numbers: Numbers) = {
import numbers.*
val zero: Nat = Zero()
val one: Nat = Succ(zero)
val two: Nat = Succ(one)
val three: Nat = Succ(two)
zero match {
case Succ(p) => println("error")
case Zero(_) => println("ok") // extra argument removed by language extension
}
one match {
case Zero(_) => println("error") // extra argument removed by language extension
case Succ(p) => println("ok")
}
zero match {
case s: Succ => println("ok - unchecked error")
case z: Zero => println("ok - unchecked no error")
}
def divOpt(a: Nat, b: Nat): Option[(Nat, Nat)] = b match {
case s @ Succ(p) =>
Some(safeDiv(a, s.asInstanceOf[Succ])) // cast should not be needed with extension
case _ => None
}
println(divOpt(one, zero))
println(divOpt(three, two))
def divOptExpanded(a: Nat, b: Nat): Option[(Nat, Nat)] = {
val x0 = Succ.unapply(b)
if (!x0.isEmpty) {
val s = x0.refined
val p = x0.get
Some(safeDiv(a, s))
} else {
None
}
}
println(divOptExpanded(one, zero))
println(divOptExpanded(three, two))
}
}
trait Numbers {
type Nat
type Zero <: Nat
type Succ <: Nat
val Zero: ZeroExtractor
trait ZeroExtractor {
def apply(): Zero
def unapply(nat: Nat): ZeroOpt // check that ZeroOpt#Refined <: Nat
}
trait ZeroOpt {
type Refined = Zero // optionally added by language extension
def get: Null // Language extension should remove this
def isEmpty: Boolean
}
val Succ: SuccExtractor
trait SuccExtractor {
def apply(nat: Nat): Succ
def unapply(nat: Nat): SuccOpt { type Refined <: nat.type } // type could be inserted by the compiler if Refined is declared
}
trait SuccOpt {
type Refined <: Singleton
def refined: Refined & Succ // optionally added by language extension
def get: Nat
def isEmpty: Boolean
}
implicit def SuccDeco(succ: Succ): SuccAPI
trait SuccAPI {
def pred: Nat
}
def safeDiv(a: Nat, b: Succ): (Nat, Nat)
}
object CaseNums extends Numbers {
trait NatClass
case object ZeroObj extends NatClass with ZeroOpt {
def get: Null = null // Should be removed by language extension
def isEmpty: Boolean = false
type Refined = this.type
def refined = this
}
case class SuccClass(pred: NatClass) extends NatClass with SuccOpt {
def get: NatClass = pred
def isEmpty: Boolean = false
type Refined = this.type
def refined = this
}
class EmptyZeroOpt extends ZeroOpt {
def isEmpty: Boolean = true
def get: Null = throw new Exception("empty")
}
class EmptySuccOpt extends SuccOpt {
def isEmpty: Boolean = true
def get: NatClass = throw new Exception("empty")
type Refined = Nothing
def refined = throw new Exception("empty")
}
type Nat = NatClass
type Zero = ZeroObj.type
type Succ = SuccClass
object Zero extends ZeroExtractor {
def apply(): Zero = ZeroObj
def unapply(nat: Nat): ZeroOpt =
if (nat == ZeroObj) ZeroObj
else new EmptyZeroOpt
}
object Succ extends SuccExtractor {
def apply(nat: Nat): Succ = SuccClass(nat)
def unapply(nat: Nat) = nat match {
case succ: SuccClass => succ.asInstanceOf[nat.type & SuccClass] // cast needed? looks like the type of succ was widden
case _ => new EmptySuccOpt
}
}
def SuccDeco(succ: Succ): SuccAPI = new SuccAPI {
def pred: Nat = succ.pred
}
def safeDiv(a: Nat, b: Succ): (Nat, Nat) = {
def sdiv(div: Nat, rem: Nat): (Nat, Nat) =
if (lessOrEq(rem, b)) (div, rem)
else sdiv(Succ(div), minus(rem, b))
sdiv(Zero(), a)
}
private def lessOrEq(a: Nat, b: Nat): Boolean = (a, b) match {
case (Succ(a1), Succ(b1)) => lessOrEq(a1, b1)
case (Zero(_), _) => true // extra argument removed by language extension
case _ => false
}
// assumes a >= b
private def minus(a: Nat, b: Nat): Nat = (a, b) match {
case (Succ(a1), Succ(b1)) => minus(a1, b1)
case _ => a
}
}
object IntNums extends Numbers {
type Nat = Int
type Zero = Int // 0
type Succ = Int // n > 0
object Zero extends ZeroExtractor {
def apply(): Int = 0
def unapply(nat: Nat): ZeroOpt = new ZeroOpt {
def isEmpty: Boolean = nat != 0
def get: Null = null // language extension will remove this
}
}
object Succ extends SuccExtractor {
def apply(nat: Nat): Int = nat + 1
def unapply(nat: Nat) = new SuccOpt {
def isEmpty: Boolean = nat <= 0
def get: Int = nat - 1
type Refined = nat.type
def refined = nat
}
}
def SuccDeco(succ: Succ): SuccAPI = new SuccAPI {
def pred: Int = succ - 1
}
def safeDiv(a: Nat, b: Succ): (Nat, Nat) = (a / b, a % b)
}
| dotty-staging/dotty | tests/run/fully-abstract-nat-5.scala | Scala | apache-2.0 | 4,975 |
package logcluster.alg
import scala.math._
object Levenshtein {
final class RealMatrix(val height: Int, val width: Int) {
val v = Array.ofDim[Int](height * width)
def apply(a: Int, b: Int) = v(a * width + b)
def update(a: Int, b: Int, n: Int) { v(a * width + b) = n }
}
def unormalizeMinSimilarity(similarity: Double, maxLen: Int) = maxLen - maxLen * similarity
def calc(i: Int, j: Int, tokens1: IndexedSeq[Any], tokens2: IndexedSeq[Any], d: RealMatrix) = {
if (i <= tokens1.length && j <= tokens2.length) {
val cost = if (tokens1(i-1) == tokens2(j-1)) 0 else 1
val v1 = d(i-1,j ) + 1
val v2 = d(i ,j-1) + 1
val v3 = d(i-1,j-1) + cost
d(i,j) = min(min(v1, v2), v3)
d(i,j)
} else {
Int.MaxValue
}
}
def apply(a: IndexedSeq[Any], b: IndexedSeq[Any], minSimilarity: Double): Double = {
val len1 = a.length
val len2 = b.length
val stopDistance = unormalizeMinSimilarity(minSimilarity, max(len1, len2))
var qty = 0
val d = new RealMatrix(len1 + 1, len2 + 1)
for (i <- 0 to len1) d(i,0) = i
for (j <- 0 to len2) d(0,j) = j
for (i <- 1 to max(len1, len2)) {
var minDist = Int.MaxValue
for (j <- 1 until i) {
minDist = min(minDist, calc(i,j,a,b,d))
minDist = min(minDist, calc(j,i,a,b,d))
qty += 2
}
minDist = math.min(minDist, calc(i,i,a,b,d))
qty += 1
if (minDist > stopDistance)
return 0
}
val nnld = d(len1,len2)
val maxLength = max(len1, len2)
(maxLength - nnld).toDouble / maxLength
}
} | despegar/logcluster | src/main/scala/logcluster/alg/Levenshtein.scala | Scala | bsd-2-clause | 1,589 |
package xitrum.handler.inbound
import java.io.File
import io.netty.channel.{ChannelHandler, SimpleChannelInboundHandler, ChannelHandlerContext}
import io.netty.handler.codec.http.{HttpMethod, HttpResponseStatus}
import ChannelHandler.Sharable
import HttpMethod._
import HttpResponseStatus._
import xitrum.Config
import xitrum.handler.HandlerEnv
import xitrum.handler.outbound.XSendFile
import xitrum.etag.NotModified
import xitrum.util.PathSanitizer
/**
* Serves static files in "public" directory.
* See DefaultHttpChannelInitializer, this handler is put after XSendFile.
*/
@Sharable
class PublicFileServer extends SimpleChannelInboundHandler[HandlerEnv] {
override def channelRead0(ctx: ChannelHandlerContext, env: HandlerEnv): Unit = {
val method = env.request.method
if (method != GET && method != HEAD && method != OPTIONS) {
ctx.fireChannelRead(env)
return
}
val pathInfo = env.pathInfo.decoded
if (Config.xitrum.staticFile.pathRegex.findFirstIn(pathInfo).isEmpty) {
ctx.fireChannelRead(env)
return
}
val response = env.response
sanitizedAbsStaticPath(pathInfo) match {
case None =>
XSendFile.set404Page(response, fromController = false)
ctx.channel.writeAndFlush(env)
case Some(abs) =>
val file = new File(abs)
if (file.isFile && file.exists) {
response.setStatus(OK)
if (method == OPTIONS) {
ctx.channel.writeAndFlush(env)
} else {
if (!Config.xitrum.staticFile.revalidate)
NotModified.setClientCacheAggressively(response)
XSendFile.setHeader(response, abs, fromAction = false)
ctx.channel.writeAndFlush(env)
}
} else {
ctx.fireChannelRead(env)
}
}
}
/**
* Sanitizes and returns absolute path.
*
* @param pathInfo Starts with "/"
*/
private def sanitizedAbsStaticPath(pathInfo: String): Option[String] = {
PathSanitizer.sanitize(pathInfo).map { path =>
xitrum.root + "/public" + path
}
}
}
| xitrum-framework/xitrum | src/main/scala/xitrum/handler/inbound/PublicFileServer.scala | Scala | mit | 2,080 |
import org.scalatest.{FunSuite, Matchers}
/**
* Created by inieto on 27/04/15.
*/
class _33_EmptyValues extends FunSuite with Matchers {
test("") {
}
}
| inieto/scala-47deg | ScalaExercises/src/test/scala-2.11/_33_EmptyValues.scala | Scala | mit | 161 |
/*
* The MIT License (MIT)
*
* Copyright (c) 2016 Algolia
* http://www.algolia.com/
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package algolia
import java.util.concurrent.ExecutionException
import algolia.http._
import io.netty.channel.EventLoop
import io.netty.channel.nio.NioEventLoopGroup
import io.netty.channel.socket.nio.NioDatagramChannel
import io.netty.resolver.dns.{DnsNameResolver, DnsNameResolverBuilder}
import org.asynchttpclient._
import org.json4s._
import org.json4s.native.JsonMethods._
import org.slf4j.{Logger, LoggerFactory}
import scala.concurrent.{ExecutionContext, Future, Promise}
import scala.util.{Failure, Success}
case class AlgoliaHttpClient(
configuration: AlgoliaClientConfiguration =
AlgoliaClientConfiguration.default
) {
val asyncClientConfig: DefaultAsyncHttpClientConfig =
new DefaultAsyncHttpClientConfig.Builder()
.setConnectTimeout(configuration.httpConnectTimeoutMs)
.setReadTimeout(configuration.httpReadTimeoutMs)
.setRequestTimeout(configuration.httpRequestTimeoutMs)
.setUseProxyProperties(configuration.useSystemProxy)
.build
val dnsEventLoop: EventLoop = new NioEventLoopGroup(1).next()
val dnsNameResolver: DnsNameResolver =
new DnsNameResolverBuilder(dnsEventLoop) //We only need 1 thread for DNS resolution
.channelType(classOf[NioDatagramChannel])
.queryTimeoutMillis(configuration.dnsTimeoutMs.toLong)
.maxQueriesPerResolve(2)
.build
val logger: Logger = LoggerFactory.getLogger("algoliasearch")
val _httpClient = new DefaultAsyncHttpClient(asyncClientConfig)
implicit val formats: Formats = AlgoliaDsl.formats
def close(): Unit = {
dnsNameResolver.close()
dnsEventLoop.shutdownGracefully()
_httpClient.close()
}
def request[T: Manifest](
host: String,
headers: Map[String, String],
payload: HttpPayload
)(implicit executor: ExecutionContext): Future[T] = {
val request = payload(host, headers, dnsNameResolver)
logger.debug(s"Trying $host")
logger.debug(s"Query ${payload.toString(host)}")
makeRequest(host, request, responseHandler)
}
def responseHandler[T: Manifest]: AsyncCompletionHandler[T] =
new AsyncCompletionHandler[T] {
override def onCompleted(response: Response): T = {
logger.debug("Response: {}", response.getResponseBody)
response.getStatusCode / 100 match {
case 2 =>
val a = fromJson(response).extract[T]
a
case 4 =>
throw `4XXAPIException`(
response.getStatusCode,
(fromJson(response) \ "message").extract[String]
)
case _ =>
logger.debug(s"Got HTTP code ${response.getStatusCode}, no retry")
throw UnexpectedResponseException(response.getStatusCode)
}
}
}
def fromJson(r: Response): JValue =
parse(StringInput(r.getResponseBody), useBigDecimalForDouble = true)
def makeRequest[T](host: String, request: Request, handler: AsyncHandler[T])(
implicit executor: ExecutionContext
): Future[T] = {
val javaFuture = _httpClient.executeRequest(request, handler)
val promise = Promise[T]()
val runnable = new java.lang.Runnable {
def run(): Unit = {
try {
promise.complete(Success(javaFuture.get()))
} catch {
case e: ExecutionException =>
logger.debug(s"Failing to query $host", e)
promise.complete(Failure(e.getCause))
case f: Throwable =>
logger.debug(s"Failing to query $host", f)
promise.complete(Failure(f))
}
}
}
val exec = new java.util.concurrent.Executor {
def execute(runnable: Runnable): Unit = {
executor.execute(runnable)
}
}
javaFuture.addListener(runnable, exec)
promise.future
}
}
case class `4XXAPIException`(code: Int, message: String)
extends Exception(
"Failure \"%s\", response status: %d".format(message, code)
)
case class UnexpectedResponseException(code: Int)
extends Exception("Unexpected response status: %d".format(code))
| algolia/algoliasearch-client-scala | src/main/scala/algolia/AlgoliaHttpClient.scala | Scala | mit | 5,191 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.plan.stream.table.stringexpr
import org.apache.flink.api.scala._
import org.apache.flink.table.api._
import org.apache.flink.table.planner.expressions.utils.Func1
import org.apache.flink.table.planner.plan.utils.JavaUserDefinedAggFunctions.{WeightedAvg, WeightedAvgWithRetract}
import org.apache.flink.table.planner.utils.TableTestBase
import org.junit.Test
class OverWindowStringExpressionTest extends TableTestBase {
@Test
def testPartitionedUnboundedOverRow(): Unit = {
val util = streamTestUtil()
val t = util.addDataStream[(Long, Int, String, Int, Long)](
"T1", 'a, 'b, 'c, 'd, 'e, 'rowtime.rowtime)
val weightAvgFun = new WeightedAvg
util.addFunction("weightAvgFun", weightAvgFun)
val resScala = t
.window(Over partitionBy 'a orderBy 'rowtime preceding UNBOUNDED_ROW as 'w)
.select('a, 'b.sum over 'w as 'cnt, call("weightAvgFun", 'a, 'b) over 'w as 'myCnt)
val resJava = t
.window(Over.partitionBy("a").orderBy("rowtime").preceding("unbounded_row").as("w"))
.select("a, SUM(b) OVER w as cnt, weightAvgFun(a, b) over w as myCnt")
verifyTableEquals(resScala, resJava)
}
@Test
def testUnboundedOverRow(): Unit = {
val util = streamTestUtil()
val t = util.addDataStream[(Long, Int, String, Int, Long)](
"T1", 'a, 'b, 'c, 'd, 'e, 'rowtime.rowtime)
val weightAvgFun = new WeightedAvg
util.addFunction("weightAvgFun", weightAvgFun)
val resScala = t
.window(Over orderBy 'rowtime preceding UNBOUNDED_ROW following CURRENT_ROW as 'w)
.select('a, 'b.sum over 'w, call("weightAvgFun", 'a, 'b) over 'w as 'myCnt)
val resJava = t
.window(Over.orderBy("rowtime").preceding("unbounded_row").following("current_row").as("w"))
.select("a, SUM(b) OVER w, weightAvgFun(a, b) over w as myCnt")
verifyTableEquals(resScala, resJava)
}
@Test
def testPartitionedBoundedOverRow(): Unit = {
val util = streamTestUtil()
val t = util.addDataStream[(Long, Int, String, Int, Long)](
"T1", 'a, 'b, 'c, 'd, 'e, 'rowtime.rowtime)
val weightAvgFun = new WeightedAvg
util.addFunction("weightAvgFun", weightAvgFun)
val resScala = t
.window(Over partitionBy('a, 'd) orderBy 'rowtime preceding 10.rows as 'w)
.select('a, 'b.sum over 'w, call("weightAvgFun", 'a, 'b) over 'w as 'myCnt)
val resJava = t
.window(Over.partitionBy("a, d").orderBy("rowtime").preceding("10.rows").as("w"))
.select("a, SUM(b) OVER w, weightAvgFun(a, b) over w as myCnt")
verifyTableEquals(resScala, resJava)
}
@Test
def testBoundedOverRow(): Unit = {
val util = streamTestUtil()
val t = util.addDataStream[(Long, Int, String, Int, Long)](
"T1", 'a, 'b, 'c, 'd, 'e, 'rowtime.rowtime)
val weightAvgFun = new WeightedAvg
util.addFunction("weightAvgFun", weightAvgFun)
val resScala = t
.window(Over orderBy 'rowtime preceding 10.rows following CURRENT_ROW as 'w)
.select('a, 'b.sum over 'w, call("weightAvgFun", 'a, 'b) over 'w as 'myCnt)
val resJava = t
.window(Over.orderBy("rowtime").preceding("10.rows").following("current_row").as("w"))
.select("a, SUM(b) OVER w, weightAvgFun(a, b) over w as myCnt")
verifyTableEquals(resScala, resJava)
}
@Test
def testPartitionedUnboundedOverRange(): Unit = {
val util = streamTestUtil()
val t = util.addDataStream[(Long, Int, String, Int, Long)](
"T1",'a, 'b, 'c, 'd, 'e, 'rowtime.rowtime)
val weightAvgFun = new WeightedAvg
util.addFunction("weightAvgFun", weightAvgFun)
val resScala = t
.window(Over partitionBy 'a orderBy 'rowtime preceding UNBOUNDED_RANGE as 'w)
.select('a, 'b.sum over 'w, call("weightAvgFun", 'a, 'b) over 'w as 'myCnt)
val resJava = t
.window(Over.partitionBy("a").orderBy("rowtime").preceding("unbounded_range").as("w"))
.select("a, SUM(b) OVER w, weightAvgFun(a, b) over w as myCnt")
verifyTableEquals(resScala, resJava)
}
@Test
def testRowTimeUnboundedOverRange(): Unit = {
val util = streamTestUtil()
val t = util.addDataStream[(Long, Int, String, Int, Long)](
"T1", 'a, 'b, 'c, 'd, 'e, 'rowtime.rowtime)
val weightAvgFun = new WeightedAvg
util.addFunction("weightAvgFun", weightAvgFun)
val resScala = t
.window(Over orderBy 'rowtime preceding UNBOUNDED_RANGE following CURRENT_RANGE as 'w)
.select('a, 'b.sum over 'w, call("weightAvgFun", 'a, 'b) over 'w as 'myCnt)
val resJava = t
.window(
Over.orderBy("rowtime").preceding("unbounded_range").following("current_range").as("w"))
.select("a, SUM(b) OVER w, weightAvgFun(a, b) over w as myCnt")
val resJava2 = t
.window(
Over.orderBy("rowtime").as("w"))
.select("a, SUM(b) OVER w, weightAvgFun(a, b) over w as myCnt")
verifyTableEquals(resScala, resJava)
verifyTableEquals(resScala, resJava2)
}
@Test
def testProcTimeUnboundedOverRange(): Unit = {
val util = streamTestUtil()
val t = util.addDataStream[(Long, Int, String, Int, Long)](
"T1", 'a, 'b, 'c, 'd, 'e, 'proctime.proctime)
val weightAvgFun = new WeightedAvg
util.addFunction("weightAvgFun", weightAvgFun)
val resScala = t
.window(Over orderBy 'proctime preceding UNBOUNDED_RANGE following CURRENT_RANGE as 'w)
.select('a, 'b.sum over 'w, call("weightAvgFun", 'a, 'b) over 'w as 'myCnt)
val resJava = t
.window(
Over.orderBy("proctime").preceding("unbounded_range").following("current_range").as("w"))
.select("a, SUM(b) OVER w, weightAvgFun(a, b) over w as myCnt")
val resJava2 = t
.window(
Over.orderBy("proctime").as("w"))
.select("a, SUM(b) OVER w, weightAvgFun(a, b) over w as myCnt")
verifyTableEquals(resScala, resJava)
verifyTableEquals(resScala, resJava2)
}
@Test
def testPartitionedBoundedOverRange(): Unit = {
val util = streamTestUtil()
val t = util.addDataStream[(Long, Int, String, Int, Long)](
"T1", 'a, 'b, 'c, 'd, 'e, 'rowtime.rowtime)
val weightAvgFun = new WeightedAvg
util.addFunction("weightAvgFun", weightAvgFun)
val resScala = t
.window(Over partitionBy('a, 'c) orderBy 'rowtime preceding 10.minutes as 'w)
.select('a, 'b.sum over 'w, call("weightAvgFun", 'a, 'b) over 'w as 'myCnt)
val resJava = t
.window(Over.partitionBy("a, c").orderBy("rowtime").preceding("10.minutes").as("w"))
.select("a, SUM(b) OVER w, weightAvgFun(a, b) over w as myCnt")
verifyTableEquals(resScala, resJava)
}
@Test
def testBoundedOverRange(): Unit = {
val util = streamTestUtil()
val t = util.addDataStream[(Long, Int, String, Int, Long)](
"T1", 'a, 'b, 'c, 'd, 'e, 'rowtime.rowtime)
val weightAvgFun = new WeightedAvg
util.addFunction("weightAvgFun", weightAvgFun)
val resScala = t
.window(Over orderBy 'rowtime preceding 4.hours following CURRENT_RANGE as 'w)
.select('a, 'b.sum over 'w, call("weightAvgFun", 'a, 'b) over 'w as 'myCnt)
val resJava = t
.window(Over.orderBy("rowtime").preceding("4.hours").following("current_range").as("w"))
.select("a, SUM(b) OVER w, weightAvgFun(a, b) over w as myCnt")
verifyTableEquals(resScala, resJava)
}
@Test
def testScalarFunctionsOnOverWindow(): Unit = {
val util = streamTestUtil()
val t = util.addDataStream[(Long, Int, String, Int, Long)](
"T1", 'a, 'b, 'c, 'd, 'e, 'rowtime.rowtime)
val weightedAvg = new WeightedAvgWithRetract
val plusOne = Func1
util.addFunction("plusOne", plusOne)
util.addFunction("weightedAvg", weightedAvg)
val resScala = t
.window(Over partitionBy 'a orderBy 'rowtime preceding UNBOUNDED_ROW as 'w)
.select(
array('a.sum over 'w, 'a.count over 'w),
call("plusOne", 'b.sum over 'w as 'wsum) as 'd,
('a.count over 'w).exp(),
(call("weightedAvg", 'a, 'b) over 'w) + 1,
"AVG:".toExpr + (call("weightedAvg", 'a, 'b) over 'w))
val resJava = t
.window(Over.partitionBy("a").orderBy("rowtime").preceding("unbounded_row").as("w"))
.select(
s"""
|ARRAY(SUM(a) OVER w, COUNT(a) OVER w),
|plusOne(SUM(b) OVER w AS wsum) AS d,
|EXP(COUNT(a) OVER w),
|(weightedAvg(a, b) OVER w) + 1,
|'AVG:' + (weightedAvg(a, b) OVER w)
""".stripMargin)
verifyTableEquals(resScala, resJava)
}
}
| GJL/flink | flink-table/flink-table-planner-blink/src/test/scala/org/apache/flink/table/planner/plan/stream/table/stringexpr/OverWindowStringExpressionTest.scala | Scala | apache-2.0 | 9,280 |
/**
* Copyright (C) 2009-2011 the original author or authors.
* See the notice.md file distributed with this work for additional
* information regarding copyright ownership.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.fusesource.scalate.sample.springmvc.controller
import org.springframework.stereotype.Controller
import org.springframework.web.bind.annotation.RequestMapping
import org.springframework.web.servlet.ModelAndView
import sample.SomeClass
@Controller
class IndexController {
@RequestMapping(Array("/layout"))
def layout = "/index.scaml"
@RequestMapping(Array("/view"))
def view: ModelAndView = {
val mav = new ModelAndView
mav.addObject("it", new SomeClass)
return mav
}
@RequestMapping(Array("/", "/render"))
def render = "render:/index.scaml"
} | arashi01/scalate-samples | scalate-sample-spring-mvc/src/main/scala/org/fusesource/scalate/sample/springmvc/controller/IndexController.scala | Scala | apache-2.0 | 1,329 |
/*
* Copyright 2012 Eike Kettner
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.eknet.publet.ext
import javax.mail.internet.InternetAddress
import org.eknet.squaremail._
import org.eknet.publet.web.util.PubletWeb
/**
* @author Eike Kettner eike.kettner@gmail.com
* @since 14.04.12 17:49
*/
trait MailSupport {
import org.eknet.squaremail.Messages._
implicit def email(str: String) = parseAddress(str)
implicit def str2CharArray(str: String): Array[Char] = str.toCharArray
implicit def mail2EasyMail(m: MailMessage) = new EasyMail(m)
def newMail(from: InternetAddress) = new DefaulMailMessage(from)
class EasyMail(mail: MailMessage) {
def send() {
PubletWeb.instance[MailSender].get.send(mail)
}
def to(em: InternetAddress) = {
mail.addTo(em)
this
}
def subject(s: String) = {
mail.setSubject(s)
this
}
def text(t: String) = {
mail.setText(t)
this
}
def html(t: String) = {
mail.setHtmlText(t)
this
}
}
}
| eikek/publet | ext/src/main/scala/org/eknet/publet/ext/MailSupport.scala | Scala | apache-2.0 | 1,557 |
/*
* Copyright (C) 2015 Stratio (http://stratio.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.streaming.kafka
import com.stratio.crossdata.streaming.kafka.KafkaInput
import com.stratio.crossdata.streaming.test.{BaseSparkStreamingXDTest, CommonValues}
import org.apache.spark.sql.crossdata.models.ConnectionModel
import org.apache.spark.streaming.{Milliseconds, StreamingContext}
import org.apache.spark.{SparkContext, SparkConf}
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
import scala.collection.mutable
import scala.concurrent.duration._
import scala.language.postfixOps
@RunWith(classOf[JUnitRunner])
class KafkaStreamIT extends BaseSparkStreamingXDTest with CommonValues {
val sparkConf = new SparkConf().setMaster("local[2]").setAppName(this.getClass.getSimpleName)
val sc = SparkContext.getOrCreate(sparkConf)
var ssc: StreamingContext = _
val kafkaTestUtils: KafkaTestUtils = new KafkaTestUtils
kafkaTestUtils.setup()
after {
if (ssc != null) {
ssc.stop(stopSparkContext = false, stopGracefully = false)
ssc.awaitTerminationOrTimeout(3000)
ssc = null
}
}
override def afterAll : Unit = {
kafkaTestUtils.teardown()
}
test("Kafka input stream with kafkaOptionsModel from Map of values") {
ssc = new StreamingContext(sc, Milliseconds(1000))
val valuesToSent = Map("a" -> 5, "b" -> 3, "c" -> 10)
kafkaTestUtils.createTopic(TopicTest)
kafkaTestUtils.sendMessages(TopicTest, valuesToSent)
val consumerHostZK = connectionHostModel.zkConnection.head.host
val consumerPortZK = kafkaTestUtils.zkAddress.split(":").last.toInt
val producerHostKafka = connectionHostModel.kafkaConnection.head.host
val producerPortKafka = kafkaTestUtils.brokerAddress.split(":").last
val kafkaStreamModelZk = kafkaStreamModel.copy(
connection = connectionHostModel.copy(
zkConnection = Seq(ConnectionModel(consumerHostZK, consumerPortZK)),
kafkaConnection = Seq(ConnectionModel(producerHostKafka, producerPortKafka.toInt))))
val input = new KafkaInput(kafkaStreamModelZk)
val stream = input.createStream(ssc)
val result = new mutable.HashMap[String, Long]() with mutable.SynchronizedMap[String, Long]
stream.map(_._2).countByValue().foreachRDD { rdd =>
val ret = rdd.collect()
ret.toMap.foreach { case (key, value) =>
val count = result.getOrElseUpdate(key, 0) + value
result.put(key, count)
}
}
ssc.start()
eventually(timeout(10000 milliseconds), interval(1000 milliseconds)) {
assert(valuesToSent === result)
}
}
test("Kafka input stream with kafkaOptionsModel from list of Strings") {
ssc = new StreamingContext(sc, Milliseconds(500))
val valuesToSent = Array("a", "b", "c")
kafkaTestUtils.createTopic(TopicTestProject)
kafkaTestUtils.sendMessages(TopicTestProject, valuesToSent)
val consumerHostZK = connectionHostModel.zkConnection.head.host
val consumerPortZK = kafkaTestUtils.zkAddress.split(":").last.toInt
val producerHostKafka = connectionHostModel.kafkaConnection.head.host
val producerPortKafka = kafkaTestUtils.brokerAddress.split(":").last
val kafkaStreamModelZk = kafkaStreamModelProject.copy(
connection = connectionHostModel.copy(
zkConnection = Seq(ConnectionModel(consumerHostZK, consumerPortZK)),
kafkaConnection = Seq(ConnectionModel(producerHostKafka, producerPortKafka.toInt))))
val input = new KafkaInput(kafkaStreamModelZk)
val stream = input.createStream(ssc)
val result = new mutable.MutableList[String]()
stream.map(_._2).foreachRDD { rdd =>
val ret = rdd.collect()
ret.foreach(value => result.+=(value))
}
ssc.start()
eventually(timeout(10000 milliseconds), interval(1000 milliseconds)) {
assert(valuesToSent === result)
}
}
}
| darroyocazorla/crossdata | streaming/src/test/scala/org/apache/spark/streaming/kafka/KafkaStreamIT.scala | Scala | apache-2.0 | 4,429 |
/**
* (c) Copyright 2013 WibiData, Inc.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.kiji.schema.shell.ddl
import scala.collection.JavaConversions._
import org.kiji.annotations.ApiAudience
import org.kiji.schema.avro.TableLayoutDesc
import org.kiji.schema.shell.DDLException
import org.kiji.schema.shell.Environment
/** Add a locality group to a table. */
@ApiAudience.Private
final class AlterTableCreateLocalityGroupCommand(
val env: Environment,
val tableName: String,
val locGroup: LocalityGroupClause) extends TableDDLCommand with NewLocalityGroup {
override def validateArguments(): Unit = {
checkTableExists()
val layout = getInitialLayout()
// Make sure that the locality group doesn't exist, nor do any column families
// share names with the column families we want to create in this locality group.
checkLocalityGroupMissing(layout, locGroup.name)
val families = locGroup.getFamilyNames()
families.foreach { name =>
checkColFamilyMissing(layout, name)
}
if (families.distinct.size != families.size) {
throw new DDLException("A column family is defined more than once in this locality group")
}
}
override def updateLayout(layout: TableLayoutDesc.Builder): Unit = {
val avroLocGroupBuilder = newLocalityGroup()
locGroup.updateLocalityGroup(avroLocGroupBuilder, CellSchemaContext.create(env, layout))
layout.getLocalityGroups().add(avroLocGroupBuilder.build())
}
}
| kijiproject/kiji-schema-shell | src/main/scala/org/kiji/schema/shell/ddl/AlterTableCreateLocalityGroupCommand.scala | Scala | apache-2.0 | 2,110 |
package com.shorrockin.cascal.utils
import _root_.scala.io.Source
import java.io.{FileWriter, InputStream, FileOutputStream, File}
import java.util.concurrent.TimeUnit
/**
* common utility functions that don't fit elsewhere.
*/
object Utils extends Logging {
/**
* recursively deletes a directory and all of it's contents
*/
def delete(directory:File):Boolean = {
if (directory.exists && directory.isDirectory) {
val result = directory.listFiles.foldLeft(true) { (left, right) => delete(right) && left }
val out = directory.delete
log.debug("deletion attempt on directory: " + directory.getCanonicalPath + " - " + out)
out
} else {
val out = directory.delete
log.debug("deletion attempt on file: " + directory.getCanonicalPath + " - " + out)
out
}
}
/**
* copies the specified resource in the classpath to the specified.
* resource should generally start with a "/".
*/
def copy(is:InputStream, file:File):File = {
var out = new FileOutputStream(file)
var buf = new Array[Byte](1024)
var len = 0
manage(out, is) {
while (-1 != len) {
len = is.read(buf, 0, buf.length)
if (-1 != len) out.write(buf, 0, len)
}
out.flush
}
file
}
/**
* replaces all instances of the specified token with the specified replacement
* file in the source file.
*/
def replace(file:File, replacements:(String, String)*):File = {
val contents = Source.fromFile(file).getLines().toList.map { (line) =>
var current = line
replacements.foreach { (r) => current = current.replace(r._1, r._2) }
current
}
val writer = new FileWriter(file)
manage(writer) {
contents.foreach { writer.write(_) }
writer.flush
}
file
}
/**
* simple function to ignore any error which occurs
*/
def ignore(f: => Unit):Unit = try { f } catch { case e:Throwable => /* ignore */ }
/**
* performs the close method on the specified object(s) after
* the specified function has been called
*/
def manage(closeable:{ def close() }*)(f: => Unit) {
try { f } finally {
closeable.foreach { (c) => ignore(c.close()) }
}
}
private val epocBaseMicros = TimeUnit.MILLISECONDS.toMicros(System.currentTimeMillis)
private val runBaseNanos = System.nanoTime
def currentTimeMicros = epocBaseMicros + TimeUnit.NANOSECONDS.toMicros(System.nanoTime-runBaseNanos)
var COMPENSATE_FOR_LOW_PRECISION_SYSTEM_TIME = System.getProperty("com.shorrockin.cascal.COMPENSATE_FOR_LOW_PRESCISION_SYSTEM_TIME", "false") == "true"
private var previousNow = System.currentTimeMillis
/**
* retuns the current time in micro seconds
*/
def now = {
var rc = currentTimeMicros
// It's very possible the platform can issue repetitive calls to now faster than
// the the platforms timer can change.
if( COMPENSATE_FOR_LOW_PRECISION_SYSTEM_TIME ) {
Utils.synchronized {
if( rc <= previousNow ) {
rc = previousNow + 1
}
previousNow = rc
}
}
rc
}
} | shorrockin/cascal | src/main/scala/com/shorrockin/cascal/utils/Utils.scala | Scala | apache-2.0 | 3,225 |
package nest.sparkle.datastream
import nest.sparkle.util.PeriodWithZone
import org.joda.time.DateTime
import nest.sparkle.util.Period
import org.joda.time.{Interval => JodaInterval}
import spire.math.Numeric
import spire.implicits._
import com.github.nscala_time.time.Implicits._
object PeriodGroups {
def jodaIntervals[K: Numeric](periodWithZone: PeriodWithZone, startKey: K): Iterator[JodaInterval] = {
val PeriodWithZone(period, dateTimeZone) = periodWithZone
val startDate: DateTime = {
val baseStartDate = new DateTime(startKey, dateTimeZone)
period.roundDate(baseStartDate)
}
timePartitions(period, startDate)
}
/** return an iterator that iterates over period sized portions */
private def timePartitions(period: Period, startDate: DateTime): Iterator[JodaInterval] = {
val partPeriod = period.toJoda
def nonEmptyParts(): Iterator[JodaInterval] = {
var partStart = period.roundDate(startDate)
var partEnd: DateTime = null
new Iterator[JodaInterval] {
override def hasNext: Boolean = true
override def next(): JodaInterval = {
partEnd = partStart + partPeriod
val interval = new JodaInterval(partStart, partEnd)
partStart = partEnd
interval
}
}
}
if (partPeriod.getValues.forall(_ == 0)) {
Iterator.empty
} else {
nonEmptyParts()
}
}
} | mighdoll/sparkle | sparkle/src/main/scala/nest/sparkle/datastream/PeriodGroups.scala | Scala | apache-2.0 | 1,412 |
package me.axiometry.blocknet.entity
import me.axiometry.blocknet.item.ItemStack
trait Enderman extends Monster {
def aggravated: Boolean
def heldItem: Option[ItemStack]
def aggravated_=(aggravated: Boolean)
def heldItem_=(heldItem: Option[ItemStack])
} | Axiometry/Blocknet | blocknet-api/src/main/scala/me/axiometry/blocknet/entity/Enderman.scala | Scala | bsd-2-clause | 264 |
package gapt.examples.recschem
import gapt.examples.Script
import gapt.expr._
import gapt.expr.formula.fol.FOLConst
import gapt.expr.formula.fol.FOLFunction
import gapt.expr.formula.fol.FOLFunctionConst
import gapt.expr.formula.fol.FOLVar
import gapt.expr.formula.fol.Numeral
import gapt.expr.formula.hol.lcomp
import gapt.grammars._
import gapt.logic.hol.simplifyPropositional
import gapt.logic.hol.toNNF
import gapt.provers.maxsat.bestAvailableMaxSatSolver
import gapt.utils.{ LogHandler, time, verbose }
object vtrat_comparison extends Script {
verbose {
val N = 11
val terms = ( 0 until N ).map { i => FOLFunction( "r", Numeral( i ), Numeral( N - i ) ) }.toSet
val A = FOLConst( "A" )
val B = FOLFunctionConst( "B", 2 )
val Seq( x, y, z ) = Seq( "x", "y", "z" ) map { FOLVar( _ ) }
val rst = RecSchemTemplate( A, A -> B( x, y ), A -> z, B( x, y ) -> z )
val targets = terms.map( A -> _ ).toSet[( Expr, Expr )]
val nfRecSchem = rst.stableRecSchem( targets )
println( lcomp( simplifyPropositional( toNNF( ( new RecSchemGenLangFormula( nfRecSchem ) )( targets ) ) ) ) )
val nfG = stableVTRATG( terms.toSet, Seq( 2 ) )
println( lcomp( simplifyPropositional( toNNF( new VectGrammarMinimizationFormula( nfG ).coversLanguage( terms ) ) ) ) )
val minimized = time { minimizeRecursionScheme( nfRecSchem, targets, solver = bestAvailableMaxSatSolver ) }
println( minimized )
println( terms.toSet diff minimized.language )
println( recSchemToVTRATG( minimized ) )
val minG = time { minimizeVTRATG( nfG, terms.toSet, bestAvailableMaxSatSolver ) }
println( minG )
}
}
| gapt/gapt | examples/recschem/vtrat-comparison.scala | Scala | gpl-3.0 | 1,637 |
package scaladot
import scala.util.parsing.combinator._
import scala.util.parsing.combinator.syntactical._
import scala.util.parsing.combinator.lexical._
import scala.util.parsing.input.CharArrayReader.EofCh
/**
* Created on 3/18/16.
*/
class BazLexer extends StdLexical with ImplicitConversions {
override def token: Parser[Token] =
( string ^^ StringLit
| number ~ letter ^^ { case n ~ l => ErrorToken("Invalid number format : " + n + l) }
| '-' ~ whitespace ~ number ~ letter ^^ { case ws ~ num ~ l => ErrorToken("Invalid number format : -" + num + l) }
| '-' ~ whitespace ~ number ^^ { case ws ~ num => NumericLit("-" + num) }
| number ^^ NumericLit
| EofCh ^^^ EOF
| delim
| '\\"' ~> failure("Unterminated string")
| id ^^ checkKeyword
| failure("Illegal character")
)
// def idcont = letter | digit | underscore
def id = rep(letter | digit | elem("underscore", _=='_')) ^^ { _ mkString "" }
// def underscore: Parser[String] = elem('_')
def checkKeyword(strRep: String) = {
if (reserved contains strRep) Keyword(strRep) else Identifier(strRep)
}
/** A string is a collection of zero or more Unicode characters, wrapped in
* double quotes, using backslash escapes (cf. http://www.json.org/).
*/
def string = '\\"' ~> rep(charSeq | chrExcept('\\"', '\\n', EofCh)) <~ '\\"' ^^ { _ mkString "" }
override def whitespace = rep(whitespaceChar)
def number = intPart ^^ { case i => i }
def intPart = zero | intList
def intList = nonzero ~ rep(digit) ^^ {case x ~ y => (x :: y) mkString ""}
private def optString[A](pre: String, a: Option[A]) : String= a match {
case Some(x) => pre + x.toString
case None => ""
}
def zero: Parser[String] = '0' ^^^ "0"
def nonzero = elem("nonzero digit", d => d.isDigit && d != '0')
def exponent = elem("exponent character", d => d == 'e' || d == 'E')
def sign = elem("sign character", d => d == '-' || d == '+')
def charSeq: Parser[String] =
('\\\\' ~ '\\"' ^^^ "\\""
|'\\\\' ~ '\\\\' ^^^ "\\\\"
|'\\\\' ~ '/' ^^^ "/"
|'\\\\' ~ 'b' ^^^ "\\b"
|'\\\\' ~ 'f' ^^^ "\\f"
|'\\\\' ~ 'n' ^^^ "\\n"
|'\\\\' ~ 'r' ^^^ "\\r"
|'\\\\' ~ 't' ^^^ "\\t"
|'\\\\' ~ 'u' ~> unicodeBlock)
val hexDigits = Set[Char]() ++ "0123456789abcdefABCDEF".toArray
def hexDigit = elem("hex digit", hexDigits.contains(_))
private def unicodeBlock = hexDigit ~ hexDigit ~ hexDigit ~ hexDigit ^^ {
case a ~ b ~ c ~ d =>
new String(Array(Integer.parseInt(List(a, b, c, d) mkString "", 16)), 0, 1)
}
}
class BazParser extends StdTokenParsers with ImplicitConversions {
type Tokens = BazLexer
val lexical = new Tokens
// Configure lexical parsing
lexical.reserved ++= List("baz", "foo", "bar")
lexical.delimiters ++= List("{", "}", "[", "]", "(", ")", ";", ",","->","=>", "--","\\"")
import lexical._
def baz = "baz" ~> id ~ ("=>" ~> foolist) ^^
{ case id ~ foos => Baz(id, foos:_*) }
def foolist = "[" ~> (repsep(foo,",")) <~ "]"
def foo = "foo" ~> id ~ ("->" ~> barlist) ^^
{ case id ~ bars => Foo(id, bars:_*) }
def barlist = "{" ~> (repsep(bar,",")) <~ "}"
def bar = "bar" ~> id ^^
{ case id => Bar(id) }
def id:Parser[String] = "(" ~> ID <~ ")"
def ID:Parser[String] = IDs | IDi
def IDs = accept("string", { case StringLit(n) => n })
def IDi = accept("identifier", { case Identifier(n) => n})
}
object Baz extends BazParser {
def parse(input: String) =
phrase(baz)(new lexical.Scanner(input)) match {
case Success(result, _) => println("Success!"); Some(result)
case n @ _ => println(n); None
}
def main(args: Array[String]) {
testbaz()
}
def testbaz(): Unit = {
testrule(baz, """
baz("O") => [
foo("A") -> { bar("x1"), bar("x2"), bar("x3") },
foo("B") -> { bar("y1"), bar("y2") },
foo("C") -> { bar("z1") },
]
""")
}
def testrule[T](p: Parser[T],input: String) = {
var x = phrase(p)(new lexical.Scanner(input)) match {
case Success(result, _) => println("Success!"); Some(result)
case n @ _ => println(n); None
}
println(x)
}
}
abstract class BazComponent {
override def toString = {
val b = new StringBuilder
buildString(0, b)
b.toString()
}
private def indent(level: Int, b: StringBuilder) {
for (i <- 0 to level) b append ' '
}
def buildString(implicit level: Int, b: StringBuilder) {
def between(sep: String, things: Seq[BazComponent])(implicit lev: Int) {
b append "[ "
var first = true
for (t <- things) {
if (first) first = false else b append sep
t.buildString(lev, b)
}
b append "] "
}
def betweenList(before: String, sep: String, after: String, things: Seq[BazComponent])(implicit lev: Int) {
if (!things.isEmpty) {
b append before
between(sep, things)(lev)
b append after
}
}
this match {
case Bar(id, foos @ _*) =>
indent(level,b)
b append "bar " append id
between(" -> ", foos)
case Foo(id, bars @ _*) =>
indent(level,b)
b append "Foo " append id
between(" -> ", bars)
case Baz(id, foos @ _*) =>
indent(level,b)
b append "BAZ " append id
between(" => ", foos)
}
}
}
trait BarComponent extends BazComponent
trait FooComponent extends BazComponent
trait RootComponent extends BazComponent
case class Bar(id: String, foos: Foo*) extends BazComponent with BarComponent
case class Foo(id: String, bars: Bar*) extends FooComponent with BarComponent
case class Baz(id: String, foos: Foo*) extends BazComponent with RootComponent
| hute37/snoob | src/main/scala/scaladot/BazParser.scala | Scala | gpl-3.0 | 5,742 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib.tree.impurity
/**
* Factory for Impurity instances.
*/
private[mllib] object Impurities {
def fromString(name: String): Impurity = name match {
case "gini" => Gini
case "entropy" => Entropy
case "variance" => Variance
case _ => throw new IllegalArgumentException(s"Did not recognize Impurity name: $name")
}
}
| xieguobin/Spark_2.0.0_cn1 | mllib/tree/impurity/Impurities.scala | Scala | apache-2.0 | 1,168 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.streaming.state
import java.io.{File, IOException}
import java.net.URI
import java.util
import java.util.UUID
import scala.collection.JavaConverters._
import scala.collection.mutable
import scala.util.Random
import org.apache.commons.io.FileUtils
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs._
import org.scalatest.{BeforeAndAfter, PrivateMethodTester}
import org.scalatest.concurrent.Eventually._
import org.scalatest.time.SpanSugar._
import org.apache.spark._
import org.apache.spark.LocalSparkContext._
import org.apache.spark.internal.config.Network.RPC_NUM_RETRIES
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.catalyst.expressions.{GenericInternalRow, UnsafeProjection, UnsafeRow}
import org.apache.spark.sql.catalyst.util.quietly
import org.apache.spark.sql.execution.streaming._
import org.apache.spark.sql.functions.count
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types._
import org.apache.spark.unsafe.types.UTF8String
import org.apache.spark.util.Utils
class StateStoreSuite extends StateStoreSuiteBase[HDFSBackedStateStoreProvider]
with BeforeAndAfter {
import StateStoreTestsHelper._
import StateStoreCoordinatorSuite._
before {
StateStore.stop()
require(!StateStore.isMaintenanceRunning)
}
after {
StateStore.stop()
require(!StateStore.isMaintenanceRunning)
}
test("retaining only two latest versions when MAX_BATCHES_TO_RETAIN_IN_MEMORY set to 2") {
val provider = newStoreProvider(minDeltasForSnapshot = 10, numOfVersToRetainInMemory = 2)
var currentVersion = 0
// commit the ver 1 : cache will have one element
currentVersion = incrementVersion(provider, currentVersion)
assert(getLatestData(provider) === Set(("a", 0) -> 1))
var loadedMaps = provider.getLoadedMaps()
checkLoadedVersions(loadedMaps, count = 1, earliestKey = 1, latestKey = 1)
checkVersion(loadedMaps, 1, Map(("a", 0) -> 1))
// commit the ver 2 : cache will have two elements
currentVersion = incrementVersion(provider, currentVersion)
assert(getLatestData(provider) === Set(("a", 0) -> 2))
loadedMaps = provider.getLoadedMaps()
checkLoadedVersions(loadedMaps, count = 2, earliestKey = 2, latestKey = 1)
checkVersion(loadedMaps, 2, Map(("a", 0) -> 2))
checkVersion(loadedMaps, 1, Map(("a", 0) -> 1))
// commit the ver 3 : cache has already two elements and adding ver 3 incurs exceeding cache,
// and ver 3 will be added but ver 1 will be evicted
currentVersion = incrementVersion(provider, currentVersion)
assert(getLatestData(provider) === Set(("a", 0) -> 3))
loadedMaps = provider.getLoadedMaps()
checkLoadedVersions(loadedMaps, count = 2, earliestKey = 3, latestKey = 2)
checkVersion(loadedMaps, 3, Map(("a", 0) -> 3))
checkVersion(loadedMaps, 2, Map(("a", 0) -> 2))
}
test("failure after committing with MAX_BATCHES_TO_RETAIN_IN_MEMORY set to 1") {
val provider = newStoreProvider(opId = Random.nextInt, partition = 0,
numOfVersToRetainInMemory = 1)
var currentVersion = 0
// commit the ver 1 : cache will have one element
currentVersion = incrementVersion(provider, currentVersion)
assert(getLatestData(provider) === Set(("a", 0) -> 1))
var loadedMaps = provider.getLoadedMaps()
checkLoadedVersions(loadedMaps, count = 1, earliestKey = 1, latestKey = 1)
checkVersion(loadedMaps, 1, Map(("a", 0) -> 1))
// commit the ver 2 : cache has already one elements and adding ver 2 incurs exceeding cache,
// and ver 2 will be added but ver 1 will be evicted
// this fact ensures cache miss will occur when this partition succeeds commit
// but there's a failure afterwards so have to reprocess previous batch
currentVersion = incrementVersion(provider, currentVersion)
assert(getLatestData(provider) === Set(("a", 0) -> 2))
loadedMaps = provider.getLoadedMaps()
checkLoadedVersions(loadedMaps, count = 1, earliestKey = 2, latestKey = 2)
checkVersion(loadedMaps, 2, Map(("a", 0) -> 2))
// suppose there has been failure after committing, and it decided to reprocess previous batch
currentVersion = 1
// committing to existing version which is committed partially but abandoned globally
val store = provider.getStore(currentVersion)
// negative value to represent reprocessing
put(store, "a", 0, -2)
store.commit()
currentVersion += 1
// make sure newly committed version is reflected to the cache (overwritten)
assert(getLatestData(provider) === Set(("a", 0) -> -2))
loadedMaps = provider.getLoadedMaps()
checkLoadedVersions(loadedMaps, count = 1, earliestKey = 2, latestKey = 2)
checkVersion(loadedMaps, 2, Map(("a", 0) -> -2))
}
test("no cache data with MAX_BATCHES_TO_RETAIN_IN_MEMORY set to 0") {
val provider = newStoreProvider(opId = Random.nextInt, partition = 0,
numOfVersToRetainInMemory = 0)
var currentVersion = 0
// commit the ver 1 : never cached
currentVersion = incrementVersion(provider, currentVersion)
assert(getLatestData(provider) === Set(("a", 0) -> 1))
var loadedMaps = provider.getLoadedMaps()
assert(loadedMaps.size() === 0)
// commit the ver 2 : never cached
currentVersion = incrementVersion(provider, currentVersion)
assert(getLatestData(provider) === Set(("a", 0) -> 2))
loadedMaps = provider.getLoadedMaps()
assert(loadedMaps.size() === 0)
}
test("cleaning") {
val provider = newStoreProvider(opId = Random.nextInt, partition = 0, minDeltasForSnapshot = 5)
for (i <- 1 to 20) {
val store = provider.getStore(i - 1)
put(store, "a", 0, i)
store.commit()
provider.doMaintenance() // do cleanup
}
require(
rowPairsToDataSet(provider.latestIterator()) === Set(("a", 0) -> 20),
"store not updated correctly")
assert(!fileExists(provider, version = 1, isSnapshot = false)) // first file should be deleted
// last couple of versions should be retrievable
assert(getData(provider, 20) === Set(("a", 0) -> 20))
assert(getData(provider, 19) === Set(("a", 0) -> 19))
}
testQuietly("SPARK-19677: Committing a delta file atop an existing one should not fail on HDFS") {
val conf = new Configuration()
conf.set("fs.fake.impl", classOf[RenameLikeHDFSFileSystem].getName)
conf.set("fs.defaultFS", "fake:///")
val provider = newStoreProvider(opId = Random.nextInt, partition = 0, hadoopConf = conf)
provider.getStore(0).commit()
provider.getStore(0).commit()
// Verify we don't leak temp files
val tempFiles = FileUtils.listFiles(new File(provider.stateStoreId.checkpointRootLocation),
null, true).asScala.filter(_.getName.startsWith("temp-"))
assert(tempFiles.isEmpty)
}
test("corrupted file handling") {
val provider = newStoreProvider(opId = Random.nextInt, partition = 0, minDeltasForSnapshot = 5)
for (i <- 1 to 6) {
val store = provider.getStore(i - 1)
put(store, "a", 0, i)
store.commit()
provider.doMaintenance() // do cleanup
}
val snapshotVersion = (0 to 10).find( version =>
fileExists(provider, version, isSnapshot = true)).getOrElse(fail("snapshot file not found"))
// Corrupt snapshot file and verify that it throws error
assert(getData(provider, snapshotVersion) === Set(("a", 0) -> snapshotVersion))
corruptFile(provider, snapshotVersion, isSnapshot = true)
intercept[Exception] {
getData(provider, snapshotVersion)
}
// Corrupt delta file and verify that it throws error
assert(getData(provider, snapshotVersion - 1) === Set(("a", 0) -> (snapshotVersion - 1)))
corruptFile(provider, snapshotVersion - 1, isSnapshot = false)
intercept[Exception] {
getData(provider, snapshotVersion - 1)
}
// Delete delta file and verify that it throws error
deleteFilesEarlierThanVersion(provider, snapshotVersion)
intercept[Exception] {
getData(provider, snapshotVersion - 1)
}
}
test("reports memory usage on current version") {
def getSizeOfStateForCurrentVersion(metrics: StateStoreMetrics): Long = {
val metricPair = metrics.customMetrics.find(_._1.name == "stateOnCurrentVersionSizeBytes")
assert(metricPair.isDefined)
metricPair.get._2
}
val provider = newStoreProvider()
val store = provider.getStore(0)
val noDataMemoryUsed = getSizeOfStateForCurrentVersion(store.metrics)
put(store, "a", 0, 1)
store.commit()
assert(getSizeOfStateForCurrentVersion(store.metrics) > noDataMemoryUsed)
}
test("maintenance") {
val conf = new SparkConf()
.setMaster("local")
.setAppName("test")
// Make sure that when SparkContext stops, the StateStore maintenance thread 'quickly'
// fails to talk to the StateStoreCoordinator and unloads all the StateStores
.set(RPC_NUM_RETRIES, 1)
val opId = 0
val dir1 = newDir()
val storeProviderId1 = StateStoreProviderId(StateStoreId(dir1, opId, 0), UUID.randomUUID)
val dir2 = newDir()
val storeProviderId2 = StateStoreProviderId(StateStoreId(dir2, opId, 1), UUID.randomUUID)
val sqlConf = getDefaultSQLConf(SQLConf.STATE_STORE_MIN_DELTAS_FOR_SNAPSHOT.defaultValue.get,
SQLConf.MAX_BATCHES_TO_RETAIN_IN_MEMORY.defaultValue.get)
sqlConf.setConf(SQLConf.MIN_BATCHES_TO_RETAIN, 2)
// Make maintenance thread do snapshots and cleanups very fast
sqlConf.setConf(SQLConf.STREAMING_MAINTENANCE_INTERVAL, 10L)
val storeConf = StateStoreConf(sqlConf)
val hadoopConf = new Configuration()
val provider = newStoreProvider(storeProviderId1.storeId)
var latestStoreVersion = 0
def generateStoreVersions(): Unit = {
for (i <- 1 to 20) {
val store = StateStore.get(storeProviderId1, keySchema, valueSchema, numColsPrefixKey = 0,
latestStoreVersion, storeConf, hadoopConf)
put(store, "a", 0, i)
store.commit()
latestStoreVersion += 1
}
}
val timeoutDuration = 1.minute
quietly {
withSpark(new SparkContext(conf)) { sc =>
withCoordinatorRef(sc) { coordinatorRef =>
require(!StateStore.isMaintenanceRunning, "StateStore is unexpectedly running")
// Generate sufficient versions of store for snapshots
generateStoreVersions()
eventually(timeout(timeoutDuration)) {
// Store should have been reported to the coordinator
assert(coordinatorRef.getLocation(storeProviderId1).nonEmpty,
"active instance was not reported")
// Background maintenance should clean up and generate snapshots
assert(StateStore.isMaintenanceRunning, "Maintenance task is not running")
// Some snapshots should have been generated
val snapshotVersions = (1 to latestStoreVersion).filter { version =>
fileExists(provider, version, isSnapshot = true)
}
assert(snapshotVersions.nonEmpty, "no snapshot file found")
}
// Generate more versions such that there is another snapshot and
// the earliest delta file will be cleaned up
generateStoreVersions()
// Earliest delta file should get cleaned up
eventually(timeout(timeoutDuration)) {
assert(!fileExists(provider, 1, isSnapshot = false), "earliest file not deleted")
}
// If driver decides to deactivate all stores related to a query run,
// then this instance should be unloaded
coordinatorRef.deactivateInstances(storeProviderId1.queryRunId)
eventually(timeout(timeoutDuration)) {
assert(!StateStore.isLoaded(storeProviderId1))
}
// Reload the store and verify
StateStore.get(storeProviderId1, keySchema, valueSchema, numColsPrefixKey = 0,
latestStoreVersion, storeConf, hadoopConf)
assert(StateStore.isLoaded(storeProviderId1))
// If some other executor loads the store, then this instance should be unloaded
coordinatorRef
.reportActiveInstance(storeProviderId1, "other-host", "other-exec", Seq.empty)
eventually(timeout(timeoutDuration)) {
assert(!StateStore.isLoaded(storeProviderId1))
}
// Reload the store and verify
StateStore.get(storeProviderId1, keySchema, valueSchema, numColsPrefixKey = 0,
latestStoreVersion, storeConf, hadoopConf)
assert(StateStore.isLoaded(storeProviderId1))
// If some other executor loads the store, and when this executor loads other store,
// then this executor should unload inactive instances immediately.
coordinatorRef
.reportActiveInstance(storeProviderId1, "other-host", "other-exec", Seq.empty)
StateStore.get(storeProviderId2, keySchema, valueSchema, numColsPrefixKey = 0,
0, storeConf, hadoopConf)
assert(!StateStore.isLoaded(storeProviderId1))
assert(StateStore.isLoaded(storeProviderId2))
}
}
// Verify if instance is unloaded if SparkContext is stopped
eventually(timeout(timeoutDuration)) {
require(SparkEnv.get === null)
assert(!StateStore.isLoaded(storeProviderId1))
assert(!StateStore.isLoaded(storeProviderId2))
assert(!StateStore.isMaintenanceRunning)
}
}
}
test("snapshotting") {
val provider = newStoreProvider(minDeltasForSnapshot = 5, numOfVersToRetainInMemory = 2)
var currentVersion = 0
currentVersion = updateVersionTo(provider, currentVersion, 2)
require(getLatestData(provider) === Set(("a", 0) -> 2))
provider.doMaintenance() // should not generate snapshot files
assert(getLatestData(provider) === Set(("a", 0) -> 2))
for (i <- 1 to currentVersion) {
assert(fileExists(provider, i, isSnapshot = false)) // all delta files present
assert(!fileExists(provider, i, isSnapshot = true)) // no snapshot files present
}
// After version 6, snapshotting should generate one snapshot file
currentVersion = updateVersionTo(provider, currentVersion, 6)
require(getLatestData(provider) === Set(("a", 0) -> 6), "store not updated correctly")
provider.doMaintenance() // should generate snapshot files
val snapshotVersion = (0 to 6).find(version => fileExists(provider, version, isSnapshot = true))
assert(snapshotVersion.nonEmpty, "snapshot file not generated")
deleteFilesEarlierThanVersion(provider, snapshotVersion.get)
assert(
getData(provider, snapshotVersion.get) === Set(("a", 0) -> snapshotVersion.get),
"snapshotting messed up the data of the snapshotted version")
assert(
getLatestData(provider) === Set(("a", 0) -> 6),
"snapshotting messed up the data of the final version")
// After version 20, snapshotting should generate newer snapshot files
currentVersion = updateVersionTo(provider, currentVersion, 20)
require(getLatestData(provider) === Set(("a", 0) -> 20), "store not updated correctly")
provider.doMaintenance() // do snapshot
val latestSnapshotVersion = (0 to 20).filter(version =>
fileExists(provider, version, isSnapshot = true)).lastOption
assert(latestSnapshotVersion.nonEmpty, "no snapshot file found")
assert(latestSnapshotVersion.get > snapshotVersion.get, "newer snapshot not generated")
deleteFilesEarlierThanVersion(provider, latestSnapshotVersion.get)
assert(getLatestData(provider) === Set(("a", 0) -> 20), "snapshotting messed up the data")
}
testQuietly("SPARK-18342: commit fails when rename fails") {
import RenameReturnsFalseFileSystem._
val dir = scheme + "://" + newDir()
val conf = new Configuration()
conf.set(s"fs.$scheme.impl", classOf[RenameReturnsFalseFileSystem].getName)
val provider = newStoreProvider(
opId = Random.nextInt, partition = 0, dir = dir, hadoopConf = conf)
val store = provider.getStore(0)
put(store, "a", 0, 0)
val e = intercept[IllegalStateException](store.commit())
assert(e.getCause.getMessage.contains("Failed to rename"))
}
test("SPARK-18416: do not create temp delta file until the store is updated") {
val dir = newDir()
val storeId = StateStoreProviderId(StateStoreId(dir, 0, 0), UUID.randomUUID)
val storeConf = StateStoreConf.empty
val hadoopConf = new Configuration()
val deltaFileDir = new File(s"$dir/0/0/")
def numTempFiles: Int = {
if (deltaFileDir.exists) {
deltaFileDir.listFiles.map(_.getName).count(n => n.endsWith(".tmp"))
} else 0
}
def numDeltaFiles: Int = {
if (deltaFileDir.exists) {
deltaFileDir.listFiles.map(_.getName).count(n => n.contains(".delta") && !n.startsWith("."))
} else 0
}
def shouldNotCreateTempFile[T](body: => T): T = {
val before = numTempFiles
val result = body
assert(numTempFiles === before)
result
}
// Getting the store should not create temp file
val store0 = shouldNotCreateTempFile {
StateStore.get(
storeId, keySchema, valueSchema, numColsPrefixKey = 0,
version = 0, storeConf, hadoopConf)
}
// Put should create a temp file
put(store0, "a", 0, 1)
assert(numTempFiles === 1)
assert(numDeltaFiles === 0)
// Commit should remove temp file and create a delta file
store0.commit()
assert(numTempFiles === 0)
assert(numDeltaFiles === 1)
// Remove should create a temp file
val store1 = shouldNotCreateTempFile {
StateStore.get(
storeId, keySchema, valueSchema, numColsPrefixKey = 0,
version = 1, storeConf, hadoopConf)
}
remove(store1, _._1 == "a")
assert(numTempFiles === 1)
assert(numDeltaFiles === 1)
// Commit should remove temp file and create a delta file
store1.commit()
assert(numTempFiles === 0)
assert(numDeltaFiles === 2)
// Commit without any updates should create a delta file
val store2 = shouldNotCreateTempFile {
StateStore.get(
storeId, keySchema, valueSchema, numColsPrefixKey = 0,
version = 2, storeConf, hadoopConf)
}
store2.commit()
assert(numTempFiles === 0)
assert(numDeltaFiles === 3)
}
test("SPARK-21145: Restarted queries create new provider instances") {
try {
val checkpointLocation = Utils.createTempDir().getAbsoluteFile
val spark = SparkSession.builder().master("local[2]").getOrCreate()
SparkSession.setActiveSession(spark)
implicit val sqlContext = spark.sqlContext
spark.conf.set(SQLConf.SHUFFLE_PARTITIONS.key, "1")
import spark.implicits._
val inputData = MemoryStream[Int]
def runQueryAndGetLoadedProviders(): Seq[StateStoreProvider] = {
val aggregated = inputData.toDF().groupBy("value").agg(count("*"))
// stateful query
val query = aggregated.writeStream
.format("memory")
.outputMode("complete")
.queryName("query")
.option("checkpointLocation", checkpointLocation.toString)
.start()
inputData.addData(1, 2, 3)
query.processAllAvailable()
require(query.lastProgress != null) // at least one batch processed after start
val loadedProvidersMethod =
PrivateMethod[mutable.HashMap[StateStoreProviderId, StateStoreProvider]](
Symbol("loadedProviders"))
val loadedProvidersMap = StateStore invokePrivate loadedProvidersMethod()
val loadedProviders = loadedProvidersMap.synchronized { loadedProvidersMap.values.toSeq }
query.stop()
loadedProviders
}
val loadedProvidersAfterRun1 = runQueryAndGetLoadedProviders()
require(loadedProvidersAfterRun1.length === 1)
val loadedProvidersAfterRun2 = runQueryAndGetLoadedProviders()
assert(loadedProvidersAfterRun2.length === 2) // two providers loaded for 2 runs
// Both providers should have the same StateStoreId, but the should be different objects
assert(loadedProvidersAfterRun2(0).stateStoreId === loadedProvidersAfterRun2(1).stateStoreId)
assert(loadedProvidersAfterRun2(0) ne loadedProvidersAfterRun2(1))
} finally {
SparkSession.getActiveSession.foreach { spark =>
spark.streams.active.foreach(_.stop())
spark.stop()
}
}
}
test("error writing [version].delta cancels the output stream") {
val hadoopConf = new Configuration()
hadoopConf.set(
SQLConf.STREAMING_CHECKPOINT_FILE_MANAGER_CLASS.parent.key,
classOf[CreateAtomicTestManager].getName)
val remoteDir = Utils.createTempDir().getAbsolutePath
val provider = newStoreProvider(
opId = Random.nextInt, partition = 0, dir = remoteDir, hadoopConf = hadoopConf)
// Disable failure of output stream and generate versions
CreateAtomicTestManager.shouldFailInCreateAtomic = false
for (version <- 1 to 10) {
val store = provider.getStore(version - 1)
put(store, version.toString, 0, version) // update "1" -> 1, "2" -> 2, ...
store.commit()
}
val version10Data = (1L to 10).map(_.toString).map(x => x -> x).toSet
CreateAtomicTestManager.cancelCalledInCreateAtomic = false
val store = provider.getStore(10)
// Fail commit for next version and verify that reloading resets the files
CreateAtomicTestManager.shouldFailInCreateAtomic = true
put(store, "11", 0, 11)
val e = intercept[IllegalStateException] { quietly { store.commit() } }
assert(e.getCause.isInstanceOf[IOException])
CreateAtomicTestManager.shouldFailInCreateAtomic = false
// Abort commit for next version and verify that reloading resets the files
CreateAtomicTestManager.cancelCalledInCreateAtomic = false
val store2 = provider.getStore(10)
put(store2, "11", 0, 11)
store2.abort()
assert(CreateAtomicTestManager.cancelCalledInCreateAtomic)
}
test("expose metrics with custom metrics to StateStoreMetrics") {
def getCustomMetric(metrics: StateStoreMetrics, name: String): Long = {
val metricPair = metrics.customMetrics.find(_._1.name == name)
assert(metricPair.isDefined)
metricPair.get._2
}
def getLoadedMapSizeMetric(metrics: StateStoreMetrics): Long = {
metrics.memoryUsedBytes
}
def assertCacheHitAndMiss(
metrics: StateStoreMetrics,
expectedCacheHitCount: Long,
expectedCacheMissCount: Long): Unit = {
val cacheHitCount = getCustomMetric(metrics, "loadedMapCacheHitCount")
val cacheMissCount = getCustomMetric(metrics, "loadedMapCacheMissCount")
assert(cacheHitCount === expectedCacheHitCount)
assert(cacheMissCount === expectedCacheMissCount)
}
val provider = newStoreProvider()
// Verify state before starting a new set of updates
assert(getLatestData(provider).isEmpty)
val store = provider.getStore(0)
assert(!store.hasCommitted)
assert(store.metrics.numKeys === 0)
val initialLoadedMapSize = getLoadedMapSizeMetric(store.metrics)
assert(initialLoadedMapSize >= 0)
assertCacheHitAndMiss(store.metrics, expectedCacheHitCount = 0, expectedCacheMissCount = 0)
put(store, "a", 0, 1)
assert(store.metrics.numKeys === 1)
put(store, "b", 0, 2)
put(store, "aa", 0, 3)
assert(store.metrics.numKeys === 3)
remove(store, _._1.startsWith("a"))
assert(store.metrics.numKeys === 1)
assert(store.commit() === 1)
assert(store.hasCommitted)
val loadedMapSizeForVersion1 = getLoadedMapSizeMetric(store.metrics)
assert(loadedMapSizeForVersion1 > initialLoadedMapSize)
assertCacheHitAndMiss(store.metrics, expectedCacheHitCount = 0, expectedCacheMissCount = 0)
val storeV2 = provider.getStore(1)
assert(!storeV2.hasCommitted)
assert(storeV2.metrics.numKeys === 1)
put(storeV2, "cc", 0, 4)
assert(storeV2.metrics.numKeys === 2)
assert(storeV2.commit() === 2)
assert(storeV2.hasCommitted)
val loadedMapSizeForVersion1And2 = getLoadedMapSizeMetric(storeV2.metrics)
assert(loadedMapSizeForVersion1And2 > loadedMapSizeForVersion1)
assertCacheHitAndMiss(storeV2.metrics, expectedCacheHitCount = 1, expectedCacheMissCount = 0)
val reloadedProvider = newStoreProvider(store.id)
// intended to load version 2 instead of 1
// version 2 will not be loaded to the cache in provider
val reloadedStore = reloadedProvider.getStore(1)
assert(reloadedStore.metrics.numKeys === 1)
assertCacheHitAndMiss(reloadedStore.metrics, expectedCacheHitCount = 0,
expectedCacheMissCount = 1)
// now we are loading version 2
val reloadedStoreV2 = reloadedProvider.getStore(2)
assert(reloadedStoreV2.metrics.numKeys === 2)
assert(getLoadedMapSizeMetric(reloadedStoreV2.metrics) > loadedMapSizeForVersion1)
assertCacheHitAndMiss(reloadedStoreV2.metrics, expectedCacheHitCount = 0,
expectedCacheMissCount = 2)
}
override def newStoreProvider(): HDFSBackedStateStoreProvider = {
newStoreProvider(opId = Random.nextInt(), partition = 0)
}
override def newStoreProvider(storeId: StateStoreId): HDFSBackedStateStoreProvider = {
newStoreProvider(storeId.operatorId, storeId.partitionId, dir = storeId.checkpointRootLocation)
}
override def newStoreProvider(
minDeltasForSnapshot: Int,
numOfVersToRetainInMemory: Int): HDFSBackedStateStoreProvider = {
newStoreProvider(opId = Random.nextInt(), partition = 0,
minDeltasForSnapshot = minDeltasForSnapshot,
numOfVersToRetainInMemory = numOfVersToRetainInMemory)
}
override def getLatestData(
storeProvider: HDFSBackedStateStoreProvider): Set[((String, Int), Int)] = {
getData(storeProvider, -1)
}
override def getData(
provider: HDFSBackedStateStoreProvider,
version: Int): Set[((String, Int), Int)] = {
val reloadedProvider = newStoreProvider(provider.stateStoreId)
if (version < 0) {
reloadedProvider.latestIterator().map(rowPairToDataPair).toSet
} else {
reloadedProvider.getStore(version).iterator().map(rowPairToDataPair).toSet
}
}
override def getDefaultSQLConf(
minDeltasForSnapshot: Int,
numOfVersToRetainInMemory: Int): SQLConf = {
val sqlConf = new SQLConf()
sqlConf.setConf(SQLConf.STATE_STORE_MIN_DELTAS_FOR_SNAPSHOT, minDeltasForSnapshot)
sqlConf.setConf(SQLConf.MAX_BATCHES_TO_RETAIN_IN_MEMORY, numOfVersToRetainInMemory)
sqlConf.setConf(SQLConf.MIN_BATCHES_TO_RETAIN, 2)
sqlConf.setConf(SQLConf.STATE_STORE_COMPRESSION_CODEC, SQLConf.get.stateStoreCompressionCodec)
sqlConf
}
def newStoreProvider(
opId: Long,
partition: Int,
numColsPrefixKey: Int = 0,
dir: String = newDir(),
minDeltasForSnapshot: Int = SQLConf.STATE_STORE_MIN_DELTAS_FOR_SNAPSHOT.defaultValue.get,
numOfVersToRetainInMemory: Int = SQLConf.MAX_BATCHES_TO_RETAIN_IN_MEMORY.defaultValue.get,
hadoopConf: Configuration = new Configuration): HDFSBackedStateStoreProvider = {
val sqlConf = getDefaultSQLConf(minDeltasForSnapshot, numOfVersToRetainInMemory)
val provider = new HDFSBackedStateStoreProvider()
provider.init(
StateStoreId(dir, opId, partition),
keySchema,
valueSchema,
numColsPrefixKey = numColsPrefixKey,
new StateStoreConf(sqlConf),
hadoopConf)
provider
}
override def newStoreProvider(numPrefixCols: Int): HDFSBackedStateStoreProvider = {
newStoreProvider(opId = Random.nextInt(), partition = 0, numColsPrefixKey = numPrefixCols)
}
def checkLoadedVersions(
loadedMaps: util.SortedMap[Long, HDFSBackedStateStoreMap],
count: Int,
earliestKey: Long,
latestKey: Long): Unit = {
assert(loadedMaps.size() === count)
assert(loadedMaps.firstKey() === earliestKey)
assert(loadedMaps.lastKey() === latestKey)
}
def checkVersion(
loadedMaps: util.SortedMap[Long, HDFSBackedStateStoreMap],
version: Long,
expectedData: Map[(String, Int), Int]): Unit = {
val originValueMap = loadedMaps.get(version).iterator().map { entry =>
keyRowToData(entry.key) -> valueRowToData(entry.value)
}.toMap
assert(originValueMap === expectedData)
}
def corruptFile(
provider: HDFSBackedStateStoreProvider,
version: Long,
isSnapshot: Boolean): Unit = {
val method = PrivateMethod[Path](Symbol("baseDir"))
val basePath = provider invokePrivate method()
val fileName = if (isSnapshot) s"$version.snapshot" else s"$version.delta"
val filePath = new File(basePath.toString, fileName)
filePath.delete()
filePath.createNewFile()
}
}
abstract class StateStoreSuiteBase[ProviderClass <: StateStoreProvider]
extends StateStoreCodecsTest with PrivateMethodTester {
import StateStoreTestsHelper._
type MapType = mutable.HashMap[UnsafeRow, UnsafeRow]
protected val keySchema: StructType = StateStoreTestsHelper.keySchema
protected val valueSchema: StructType = StateStoreTestsHelper.valueSchema
testWithAllCodec("get, put, remove, commit, and all data iterator") {
val provider = newStoreProvider()
// Verify state before starting a new set of updates
assert(getLatestData(provider).isEmpty)
val store = provider.getStore(0)
assert(!store.hasCommitted)
assert(get(store, "a", 0) === None)
assert(store.iterator().isEmpty)
assert(store.metrics.numKeys === 0)
// Verify state after updating
put(store, "a", 0, 1)
assert(get(store, "a", 0) === Some(1))
assert(store.iterator().nonEmpty)
assert(getLatestData(provider).isEmpty)
// Make updates, commit and then verify state
put(store, "b", 0, 2)
put(store, "aa", 0, 3)
remove(store, _._1.startsWith("a"))
assert(store.commit() === 1)
assert(store.hasCommitted)
assert(rowPairsToDataSet(store.iterator()) === Set(("b", 0) -> 2))
assert(getLatestData(provider) === Set(("b", 0) -> 2))
// Trying to get newer versions should fail
intercept[Exception] {
provider.getStore(2)
}
intercept[Exception] {
getData(provider, 2)
}
// New updates to the reloaded store with new version, and does not change old version
val reloadedProvider = newStoreProvider(store.id)
val reloadedStore = reloadedProvider.getStore(1)
put(reloadedStore, "c", 0, 4)
assert(reloadedStore.commit() === 2)
assert(rowPairsToDataSet(reloadedStore.iterator()) === Set(("b", 0) -> 2, ("c", 0) -> 4))
assert(getLatestData(provider) === Set(("b", 0) -> 2, ("c", 0) -> 4))
assert(getData(provider, version = 1) === Set(("b", 0) -> 2))
}
testWithAllCodec("prefix scan") {
val provider = newStoreProvider(numPrefixCols = 1)
// Verify state before starting a new set of updates
assert(getLatestData(provider).isEmpty)
val store = provider.getStore(0)
val key1 = Seq("a", "b", "c")
val key2 = Seq(1, 2, 3)
val keys = for (k1 <- key1; k2 <- key2) yield (k1, k2)
val randomizedKeys = scala.util.Random.shuffle(keys.toList)
randomizedKeys.foreach { case (key1, key2) =>
put(store, key1, key2, key2)
}
key1.foreach { k1 =>
val keyValueSet = store.prefixScan(dataToPrefixKeyRow(k1)).map { pair =>
rowPairToDataPair(pair.withRows(pair.key.copy(), pair.value.copy()))
}.toSet
assert(keyValueSet === key2.map(k2 => ((k1, k2), k2)).toSet)
}
assert(store.prefixScan(dataToPrefixKeyRow("non-exist")).isEmpty)
}
testWithAllCodec("numKeys metrics") {
val provider = newStoreProvider()
// Verify state before starting a new set of updates
assert(getLatestData(provider).isEmpty)
val store = provider.getStore(0)
put(store, "a", 0, 1)
put(store, "b", 0, 2)
put(store, "c", 0, 3)
put(store, "d", 0, 4)
put(store, "e", 0, 5)
assert(store.commit() === 1)
assert(store.metrics.numKeys === 5)
assert(rowPairsToDataSet(store.iterator()) ===
Set(("a", 0) -> 1, ("b", 0) -> 2, ("c", 0) -> 3, ("d", 0) -> 4, ("e", 0) -> 5))
val reloadedProvider = newStoreProvider(store.id)
val reloadedStore = reloadedProvider.getStore(1)
remove(reloadedStore, _._1 == "b")
assert(reloadedStore.commit() === 2)
assert(reloadedStore.metrics.numKeys === 4)
assert(rowPairsToDataSet(reloadedStore.iterator()) ===
Set(("a", 0) -> 1, ("c", 0) -> 3, ("d", 0) -> 4, ("e", 0) -> 5))
}
testWithAllCodec("removing while iterating") {
val provider = newStoreProvider()
// Verify state before starting a new set of updates
assert(getLatestData(provider).isEmpty)
val store = provider.getStore(0)
put(store, "a", 0, 1)
put(store, "b", 0, 2)
// Updates should work while iterating of filtered entries
val filtered = store.iterator.filter { tuple => keyRowToData(tuple.key) == ("a", 0) }
filtered.foreach { tuple =>
store.put(tuple.key, dataToValueRow(valueRowToData(tuple.value) + 1))
}
assert(get(store, "a", 0) === Some(2))
// Removes should work while iterating of filtered entries
val filtered2 = store.iterator.filter { tuple => keyRowToData(tuple.key) == ("b", 0) }
filtered2.foreach { tuple => store.remove(tuple.key) }
assert(get(store, "b", 0) === None)
}
testWithAllCodec("abort") {
val provider = newStoreProvider()
val store = provider.getStore(0)
put(store, "a", 0, 1)
store.commit()
assert(rowPairsToDataSet(store.iterator()) === Set(("a", 0) -> 1))
// cancelUpdates should not change the data in the files
val store1 = provider.getStore(1)
put(store1, "b", 0, 1)
store1.abort()
}
testWithAllCodec("getStore with invalid versions") {
val provider = newStoreProvider()
def checkInvalidVersion(version: Int): Unit = {
intercept[Exception] {
provider.getStore(version)
}
}
checkInvalidVersion(-1)
checkInvalidVersion(1)
val store = provider.getStore(0)
put(store, "a", 0, 1)
assert(store.commit() === 1)
assert(rowPairsToDataSet(store.iterator()) === Set(("a", 0) -> 1))
val store1_ = provider.getStore(1)
assert(rowPairsToDataSet(store1_.iterator()) === Set(("a", 0) -> 1))
checkInvalidVersion(-1)
checkInvalidVersion(2)
// Update store version with some data
val store1 = provider.getStore(1)
assert(rowPairsToDataSet(store1.iterator()) === Set(("a", 0) -> 1))
put(store1, "b", 0, 1)
assert(store1.commit() === 2)
assert(rowPairsToDataSet(store1.iterator()) === Set(("a", 0) -> 1, ("b", 0) -> 1))
checkInvalidVersion(-1)
checkInvalidVersion(3)
}
testWithAllCodec("two concurrent StateStores - one for read-only and one for read-write") {
// During Streaming Aggregation, we have two StateStores per task, one used as read-only in
// `StateStoreRestoreExec`, and one read-write used in `StateStoreSaveExec`. `StateStore.abort`
// will be called for these StateStores if they haven't committed their results. We need to
// make sure that `abort` in read-only store after a `commit` in the read-write store doesn't
// accidentally lead to the deletion of state.
val dir = newDir()
val storeId = StateStoreId(dir, 0L, 1)
val provider0 = newStoreProvider(storeId)
// prime state
val store = provider0.getStore(0)
val key1 = "a"
val key2 = 0
put(store, key1, key2, 1)
store.commit()
assert(rowPairsToDataSet(store.iterator()) === Set((key1, key2) -> 1))
// two state stores
val provider1 = newStoreProvider(storeId)
val restoreStore = provider1.getReadStore(1)
val saveStore = provider1.getStore(1)
put(saveStore, key1, key2, get(restoreStore, key1, key2).get + 1)
saveStore.commit()
restoreStore.abort()
// check that state is correct for next batch
val provider2 = newStoreProvider(storeId)
val finalStore = provider2.getStore(2)
assert(rowPairsToDataSet(finalStore.iterator()) === Set((key1, key2) -> 2))
}
test("StateStore.get") {
quietly {
val dir = newDir()
val storeId = StateStoreProviderId(StateStoreId(dir, 0, 0), UUID.randomUUID)
val storeConf = getDefaultStoreConf
val hadoopConf = new Configuration()
// Verify that trying to get incorrect versions throw errors
intercept[IllegalArgumentException] {
StateStore.get(
storeId, keySchema, valueSchema, 0, -1, storeConf, hadoopConf)
}
assert(!StateStore.isLoaded(storeId)) // version -1 should not attempt to load the store
intercept[IllegalStateException] {
StateStore.get(
storeId, keySchema, valueSchema, 0, 1, storeConf, hadoopConf)
}
// Increase version of the store and try to get again
val store0 = StateStore.get(
storeId, keySchema, valueSchema, 0, 0, storeConf, hadoopConf)
assert(store0.version === 0)
put(store0, "a", 0, 1)
store0.commit()
val store1 = StateStore.get(
storeId, keySchema, valueSchema, 0, 1, storeConf, hadoopConf)
assert(StateStore.isLoaded(storeId))
assert(store1.version === 1)
assert(rowPairsToDataSet(store1.iterator()) === Set(("a", 0) -> 1))
// Verify that you can also load older version
val store0reloaded = StateStore.get(
storeId, keySchema, valueSchema, 0, 0, storeConf, hadoopConf)
assert(store0reloaded.version === 0)
assert(rowPairsToDataSet(store0reloaded.iterator()) === Set.empty)
// Verify that you can remove the store and still reload and use it
StateStore.unload(storeId)
assert(!StateStore.isLoaded(storeId))
val store1reloaded = StateStore.get(
storeId, keySchema, valueSchema, 0, 1, storeConf, hadoopConf)
assert(StateStore.isLoaded(storeId))
assert(store1reloaded.version === 1)
put(store1reloaded, "a", 0, 2)
assert(store1reloaded.commit() === 2)
assert(rowPairsToDataSet(store1reloaded.iterator()) === Set(("a", 0) -> 2))
}
}
test("reports memory usage") {
val provider = newStoreProvider()
val store = provider.getStore(0)
val noDataMemoryUsed = store.metrics.memoryUsedBytes
put(store, "a", 0, 1)
store.commit()
assert(store.metrics.memoryUsedBytes > noDataMemoryUsed)
}
test("SPARK-34270: StateStoreMetrics.combine should not override individual metrics") {
val customSumMetric = StateStoreCustomSumMetric("metric1", "custom metric 1")
val customSizeMetric = StateStoreCustomSizeMetric("metric2", "custom metric 2")
val customTimingMetric = StateStoreCustomTimingMetric("metric3", "custom metric 3")
val leftCustomMetrics: Map[StateStoreCustomMetric, Long] =
Map(customSumMetric -> 10L, customSizeMetric -> 5L, customTimingMetric -> 100L)
val leftMetrics = StateStoreMetrics(1, 10, leftCustomMetrics)
val rightCustomMetrics: Map[StateStoreCustomMetric, Long] =
Map(customSumMetric -> 20L, customSizeMetric -> 15L, customTimingMetric -> 300L)
val rightMetrics = StateStoreMetrics(3, 20, rightCustomMetrics)
val combinedMetrics = StateStoreMetrics.combine(Seq(leftMetrics, rightMetrics))
assert(combinedMetrics.numKeys == 4)
assert(combinedMetrics.memoryUsedBytes == 30)
assert(combinedMetrics.customMetrics.size == 3)
assert(combinedMetrics.customMetrics(customSumMetric) == 30L)
assert(combinedMetrics.customMetrics(customSizeMetric) == 20L)
assert(combinedMetrics.customMetrics(customTimingMetric) == 400L)
}
test("SPARK-35659: StateStore.put cannot put null value") {
val provider = newStoreProvider()
// Verify state before starting a new set of updates
assert(getLatestData(provider).isEmpty)
val store = provider.getStore(0)
val err = intercept[IllegalArgumentException] {
store.put(dataToKeyRow("key", 0), null)
}
assert(err.getMessage.contains("Cannot put a null value"))
}
test("SPARK-35763: StateStoreCustomMetric withNewDesc and createSQLMetric") {
val metric = StateStoreCustomSizeMetric(name = "m1", desc = "desc1")
val metricNew = metric.withNewDesc("new desc")
assert(metricNew.desc === "new desc", "incorrect description in copied instance")
assert(metricNew.name === "m1", "incorrect name in copied instance")
val conf = new SparkConf().setMaster("local").setAppName("SPARK-35763").set(RPC_NUM_RETRIES, 1)
withSpark(new SparkContext(conf)) { sc =>
val sqlMetric = metric.createSQLMetric(sc)
assert(sqlMetric != null)
assert(sqlMetric.name === Some("desc1"))
}
}
/** Return a new provider with a random id */
def newStoreProvider(): ProviderClass
/** Return a new provider with the given id */
def newStoreProvider(storeId: StateStoreId): ProviderClass
/** Return a new provider with minimum delta and version to retain in memory */
def newStoreProvider(minDeltasForSnapshot: Int, numOfVersToRetainInMemory: Int): ProviderClass
/** Return a new provider with setting prefix key */
def newStoreProvider(numPrefixCols: Int): ProviderClass
/** Get the latest data referred to by the given provider but not using this provider */
def getLatestData(storeProvider: ProviderClass): Set[((String, Int), Int)]
/**
* Get a specific version of data referred to by the given provider but not using
* this provider
*/
def getData(storeProvider: ProviderClass, version: Int): Set[((String, Int), Int)]
protected def testQuietly(name: String)(f: => Unit): Unit = {
test(name) {
quietly {
f
}
}
}
/** Get the `SQLConf` by the given minimum delta and version to retain in memory */
def getDefaultSQLConf(minDeltasForSnapshot: Int, numOfVersToRetainInMemory: Int): SQLConf
/** Get the `StateStoreConf` used by the tests with default setting */
def getDefaultStoreConf(): StateStoreConf = StateStoreConf.empty
protected def fileExists(
provider: ProviderClass,
version: Long,
isSnapshot: Boolean): Boolean = {
val method = PrivateMethod[Path](Symbol("baseDir"))
val basePath = provider invokePrivate method()
val fileName = if (isSnapshot) s"$version.snapshot" else s"$version.delta"
val filePath = new File(basePath.toString, fileName)
filePath.exists
}
def updateVersionTo(
provider: StateStoreProvider,
currentVersion: Int,
targetVersion: Int): Int = {
var newCurrentVersion = currentVersion
for (i <- newCurrentVersion until targetVersion) {
newCurrentVersion = incrementVersion(provider, i)
}
require(newCurrentVersion === targetVersion)
newCurrentVersion
}
def incrementVersion(provider: StateStoreProvider, currentVersion: Int): Int = {
val store = provider.getStore(currentVersion)
put(store, "a", 0, currentVersion + 1)
store.commit()
currentVersion + 1
}
def deleteFilesEarlierThanVersion(provider: ProviderClass, version: Long): Unit = {
val method = PrivateMethod[Path](Symbol("baseDir"))
val basePath = provider invokePrivate method()
for (version <- 0 until version.toInt) {
for (isSnapshot <- Seq(false, true)) {
val fileName = if (isSnapshot) s"$version.snapshot" else s"$version.delta"
val filePath = new File(basePath.toString, fileName)
if (filePath.exists) filePath.delete()
}
}
}
}
object StateStoreTestsHelper {
val keySchema = StructType(
Seq(StructField("key1", StringType, true), StructField("key2", IntegerType, true)))
val valueSchema = StructType(Seq(StructField("value", IntegerType, true)))
val keyProj = UnsafeProjection.create(Array[DataType](StringType, IntegerType))
val prefixKeyProj = UnsafeProjection.create(Array[DataType](StringType))
val valueProj = UnsafeProjection.create(Array[DataType](IntegerType))
def dataToPrefixKeyRow(s: String): UnsafeRow = {
prefixKeyProj.apply(new GenericInternalRow(Array[Any](UTF8String.fromString(s)))).copy()
}
def dataToKeyRow(s: String, i: Int): UnsafeRow = {
keyProj.apply(new GenericInternalRow(Array[Any](UTF8String.fromString(s), i))).copy()
}
def dataToValueRow(i: Int): UnsafeRow = {
valueProj.apply(new GenericInternalRow(Array[Any](i))).copy()
}
def keyRowToData(row: UnsafeRow): (String, Int) = {
(row.getUTF8String(0).toString, row.getInt(1))
}
def valueRowToData(row: UnsafeRow): Int = {
row.getInt(0)
}
def rowPairToDataPair(row: UnsafeRowPair): ((String, Int), Int) = {
(keyRowToData(row.key), valueRowToData(row.value))
}
def rowPairsToDataSet(iterator: Iterator[UnsafeRowPair]): Set[((String, Int), Int)] = {
iterator.map(rowPairToDataPair).toSet
}
def remove(store: StateStore, condition: ((String, Int)) => Boolean): Unit = {
store.iterator().foreach { rowPair =>
if (condition(keyRowToData(rowPair.key))) store.remove(rowPair.key)
}
}
def put(store: StateStore, key1: String, key2: Int, value: Int): Unit = {
store.put(dataToKeyRow(key1, key2), dataToValueRow(value))
}
def get(store: ReadStateStore, key1: String, key2: Int): Option[Int] = {
Option(store.get(dataToKeyRow(key1, key2))).map(valueRowToData)
}
def newDir(): String = Utils.createTempDir().toString
}
/**
* Fake FileSystem that simulates HDFS rename semantic, i.e. renaming a file atop an existing
* one should return false.
* See hadoop.apache.org/docs/stable/hadoop-project-dist/hadoop-common/filesystem/filesystem.html
*/
class RenameLikeHDFSFileSystem extends RawLocalFileSystem {
override def rename(src: Path, dst: Path): Boolean = {
if (exists(dst)) {
return false
} else {
return super.rename(src, dst)
}
}
}
/**
* Fake FileSystem to test that the StateStore throws an exception while committing the
* delta file, when `fs.rename` returns `false`.
*/
class RenameReturnsFalseFileSystem extends RawLocalFileSystem {
import RenameReturnsFalseFileSystem._
override def getUri: URI = {
URI.create(s"$scheme:///")
}
override def rename(src: Path, dst: Path): Boolean = false
}
object RenameReturnsFalseFileSystem {
val scheme = s"StateStoreSuite${math.abs(Random.nextInt)}fs"
}
| jiangxb1987/spark | sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/state/StateStoreSuite.scala | Scala | apache-2.0 | 47,073 |
/*
* Copyright (C) 2014 GRNET S.A.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package gr.grnet.egi.vmcatcher.cmdline
import com.beust.jcommander.{ParametersDelegate, Parameters}
import gr.grnet.egi.vmcatcher.cmdline.common.ConfDelegate
/**
*
*/
@Parameters(
commandNames = Array("drain-queue"),
commandDescription = "Remove all events from the queue and do nothing with them"
)
class DrainQueue
| grnet/snf-vmcatcher | src/main/scala/gr/grnet/egi/vmcatcher/cmdline/DrainQueue.scala | Scala | gpl-3.0 | 1,015 |
/*
* Copyright 2022 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package iht.controllers.registration.executor
import iht.config.AppConfig
import iht.connector.CachingConnector
import iht.controllers.registration.{RegistrationControllerTest, routes => registrationRoutes}
import iht.forms.registration.CoExecutorForms
import iht.models.{DeceasedDateOfDeath, RegistrationDetails}
import iht.testhelpers.CommonBuilder
import iht.views.html.registration.executor.others_applying_for_probate
import org.joda.time.LocalDate
import play.api.mvc.MessagesControllerComponents
import play.api.test.Helpers._
import uk.gov.hmrc.play.bootstrap.frontend.controller.FrontendController
import scala.concurrent.Future
class OthersApplyingForProbateControllerTest extends RegistrationControllerTest with CoExecutorForms {
val appConfig = mockAppConfig
protected abstract class TestController extends FrontendController(mockControllerComponents) with OthersApplyingForProbateController {
override val cc: MessagesControllerComponents = mockControllerComponents
override implicit val appConfig: AppConfig = mockAppConfig
override val othersApplyingForProbateView: others_applying_for_probate = app.injector.instanceOf[others_applying_for_probate]
}
//Create controller object and pass in mock.
def othersApplyingForProbateController = new TestController {
override def cachingConnector: CachingConnector = mockCachingConnector
override val authConnector = mockAuthConnector
override val cc: MessagesControllerComponents = mockControllerComponents
override implicit val appConfig: AppConfig = mockAppConfig
}
def othersApplyingForProbateControllerNotAuthorised = new TestController {
override def cachingConnector: CachingConnector = mockCachingConnector
override val authConnector = mockAuthConnector
override val cc: MessagesControllerComponents = mockControllerComponents
override implicit val appConfig: AppConfig = mockAppConfig
}
"OthersApplyingForProbateController" must {
"redirect to GG login page on PageLoad if the user is not logged in" in {
val result = othersApplyingForProbateControllerNotAuthorised.onPageLoad()(createFakeRequest(false))
status(result) mustBe SEE_OTHER
redirectLocation(result) mustBe Some(loginUrl)
}
"redirect to GG login page on Submit if the user is not logged in" in {
val result = othersApplyingForProbateControllerNotAuthorised.onSubmit()(createFakeRequest(false))
status(result) mustBe SEE_OTHER
redirectLocation(result) mustBe Some(loginUrl)
}
"redirect to GG login page on PageLoad in edit mode if the user is not logged in" in {
val result = othersApplyingForProbateControllerNotAuthorised.onEditPageLoad()(createFakeRequest(false))
status(result) mustBe SEE_OTHER
redirectLocation(result) mustBe Some(loginUrl)
}
"redirect to GG login page on Submit in edit mode if the user is not logged in" in {
val result = othersApplyingForProbateControllerNotAuthorised.onEditSubmit()(createFakeRequest(false))
status(result) mustBe SEE_OTHER
redirectLocation(result) mustBe Some(loginUrl)
}
"redirect to GG login page on PageLoad if the user is not logged in and arrived from overview" in {
val result = othersApplyingForProbateControllerNotAuthorised.onPageLoadFromOverview()(createFakeRequest(false))
status(result) mustBe SEE_OTHER
redirectLocation(result) mustBe Some(loginUrl)
}
"redirect to GG login page on Submit if the user is not logged in and arrived from overview" in {
val result = othersApplyingForProbateControllerNotAuthorised.onSubmitFromOverview()(createFakeRequest(false))
status(result) mustBe SEE_OTHER
redirectLocation(result) mustBe Some(loginUrl)
}
"respond appropriately to a submit with a value of Yes" in {
val registrationDetails = RegistrationDetails(None, None, None)
val probateForm = othersApplyingForProbateForm.fill(Some(true))
val request = createFakeRequestWithReferrerWithBody(referrerURL=referrerURL,host=host,
data=probateForm.data.toSeq, authRetrieveNino = false)
createMockToGetRegDetailsFromCacheNoOption(mockCachingConnector, Future.successful(Some(registrationDetails)))
createMockToGetRegDetailsFromCache(mockCachingConnector, Some(registrationDetails))
createMockToStoreRegDetailsInCache(mockCachingConnector, Some(registrationDetails))
val result = othersApplyingForProbateController.onSubmit()(request)
status(result) mustBe(SEE_OTHER)
redirectLocation(result) must be(Some(routes.CoExecutorPersonalDetailsController.onPageLoad(None).url))
}
"respond appropriately to a submit with a value of No" in {
val registrationDetails = RegistrationDetails(None, None, None)
val probateForm = othersApplyingForProbateForm.fill(Some(false))
val request = createFakeRequestWithReferrerWithBody(referrerURL=referrerURL,host=host,
data=probateForm.data.toSeq, authRetrieveNino = false)
createMockToGetRegDetailsFromCacheNoOption(mockCachingConnector, Future.successful(Some(registrationDetails)))
createMockToGetRegDetailsFromCache(mockCachingConnector, Some(registrationDetails))
createMockToStoreRegDetailsInCache(mockCachingConnector, Some(registrationDetails))
val result = othersApplyingForProbateController.onSubmitFromOverview()(request)
status(result) mustBe(SEE_OTHER)
redirectLocation(result) must be(Some(registrationRoutes.RegistrationSummaryController.onPageLoad.url))
}
"When submitting a yes - the areOthersApplyingForProbate must be set to false and any coexcutors must be removed from registration details" in {
val registrationDetails = CommonBuilder.buildRegistrationDetailsWithCoExecutors
val probateForm = othersApplyingForProbateForm.fill(Some(true))
val request = createFakeRequestWithReferrerWithBody(referrerURL=referrerURL,host=host, data=probateForm.data.toSeq, authRetrieveNino = false)
createMockToGetRegDetailsFromCacheNoOption(mockCachingConnector, Future.successful(Some(registrationDetails)))
createMockToGetRegDetailsFromCache(mockCachingConnector, Some(registrationDetails))
createMockToStoreRegDetailsInCache(mockCachingConnector, Some(registrationDetails))
val result = othersApplyingForProbateController.onSubmitFromOverview()(request)
status(result) mustBe(SEE_OTHER)
val capturedValue = verifyAndReturnStoredRegistationDetails(mockCachingConnector)
capturedValue.coExecutors.length mustBe 1
capturedValue.areOthersApplyingForProbate mustBe Some(true)
}
"When submitting a no - the areOthersApplyingForProbate must be set to false and any coexcutors must be removed from registration details" in {
val registrationDetails = CommonBuilder.buildRegistrationDetailsWithCoExecutors
val probateForm = othersApplyingForProbateForm.fill(Some(false))
val request = createFakeRequestWithReferrerWithBody(referrerURL=referrerURL,host=host, data=probateForm.data.toSeq, authRetrieveNino = false)
createMockToGetRegDetailsFromCacheNoOption(mockCachingConnector, Future.successful(Some(registrationDetails)))
createMockToGetRegDetailsFromCache(mockCachingConnector, Some(registrationDetails))
createMockToStoreRegDetailsInCache(mockCachingConnector, Some(registrationDetails))
val result = othersApplyingForProbateController.onSubmitFromOverview()(request)
status(result) mustBe(SEE_OTHER)
val capturedValue = verifyAndReturnStoredRegistationDetails(mockCachingConnector)
capturedValue.coExecutors.length mustBe 0
capturedValue.areOthersApplyingForProbate mustBe Some(false)
}
"respond appropriately to an invalid submit: Missing mandatory fields" in {
val registrationDetails = RegistrationDetails(None, None, None)
val probateForm = othersApplyingForProbateForm.fill(None)
val request = createFakeRequestWithReferrerWithBody(referrerURL=referrerURL,host=host,
data=probateForm.data.toSeq, authRetrieveNino = false)
createMockToGetRegDetailsFromCache(mockCachingConnector, None)
createMockToStoreRegDetailsInCache(mockCachingConnector, Some(registrationDetails))
val result = await(othersApplyingForProbateController.onSubmit()(request))
status(result) mustBe(BAD_REQUEST)
}
"return true if the guard conditions are true" in {
val rd = CommonBuilder.buildRegistrationDetails copy (deceasedDateOfDeath =
Some(DeceasedDateOfDeath(LocalDate.now)), applicantDetails = Some(CommonBuilder.buildApplicantDetails))
othersApplyingForProbateController.checkGuardCondition(rd, "") mustBe true
}
"raise an error when the submit has a value of Yes but the storage fails" in {
val registrationDetails = RegistrationDetails(None, None, None)
val probateForm = othersApplyingForProbateForm.fill(Some(true))
val request = createFakeRequestWithReferrerWithBody(referrerURL=referrerURL,host=host,
data=probateForm.data.toSeq, authRetrieveNino = false)
createMockToGetRegDetailsFromCacheNoOption(mockCachingConnector, Future.successful(Some(registrationDetails)))
createMockToGetRegDetailsFromCache(mockCachingConnector, Some(registrationDetails))
createMockToStoreRegDetailsInCacheWithFailure(mockCachingConnector, Some(registrationDetails))
val result = othersApplyingForProbateController.onSubmit()(request)
status(result) mustBe(INTERNAL_SERVER_ERROR)
}
}
}
| hmrc/iht-frontend | test/iht/controllers/registration/executor/OthersApplyingForProbateControllerTest.scala | Scala | apache-2.0 | 10,157 |
package com.wargaming.dwh
import java.io.{ByteArrayOutputStream, File, OutputStream, PrintStream}
/**
* Created by d_balyka on 15.05.14.
*/
object LoggingUtils {
def fixLoggingToErrStream(logFilename: String = "") = {
if (logFilename.isEmpty) {
System.setErr(new PrintStream(new BlackHoleOutputStream()))
} else {
try {
val file = new File(logFilename)
file.getParentFile.mkdirs()
if (!file.exists()) {
file.createNewFile()
}
System.setErr(new PrintStream(logFilename))
} catch {
case t: Throwable => {
System.setErr(new PrintStream(new BlackHoleOutputStream()))
}
}
}
}
}
class BlackHoleOutputStream extends OutputStream {
override def write(b: Int): Unit = {
/*do nothing*/
}
} | wgnet/spark-kafka-streaming | src/main/scala/com/wargaming/dwh/LoggingUtils.scala | Scala | apache-2.0 | 842 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.api.scala
import com.esotericsoftware.kryo.Serializer
import org.apache.flink.annotation.{PublicEvolving, Public}
import org.apache.flink.api.common.io.{FileInputFormat, InputFormat}
import org.apache.flink.api.common.restartstrategy.RestartStrategies.RestartStrategyConfiguration
import org.apache.flink.api.common.typeinfo.{BasicTypeInfo, TypeInformation}
import org.apache.flink.api.common.typeutils.CompositeType
import org.apache.flink.api.common.{ExecutionConfig, JobExecutionResult}
import org.apache.flink.api.java.io._
import org.apache.flink.api.java.operators.DataSource
import org.apache.flink.api.java.typeutils.runtime.kryo.KryoSerializer
import org.apache.flink.api.java.typeutils.{PojoTypeInfo, TupleTypeInfoBase, ValueTypeInfo}
import org.apache.flink.api.java.{CollectionEnvironment, ExecutionEnvironment => JavaEnv}
import org.apache.flink.configuration.Configuration
import org.apache.flink.core.fs.Path
import org.apache.flink.types.StringValue
import org.apache.flink.util.{NumberSequenceIterator, Preconditions, SplittableIterator}
import scala.collection.JavaConverters._
import scala.reflect.ClassTag
/**
* The ExecutionEnvironment is the context in which a program is executed. A local environment will
* cause execution in the current JVM, a remote environment will cause execution on a remote
* cluster installation.
*
* The environment provides methods to control the job execution (such as setting the parallelism)
* and to interact with the outside world (data access).
*
* To get an execution environment use the methods on the companion object:
*
* - [[ExecutionEnvironment#getExecutionEnvironment]]
* - [[ExecutionEnvironment#createLocalEnvironment]]
* - [[ExecutionEnvironment#createRemoteEnvironment]]
*
* Use [[ExecutionEnvironment#getExecutionEnvironment]] to get the correct environment depending
* on where the program is executed. If it is run inside an IDE a local environment will be
* created. If the program is submitted to a cluster a remote execution environment will
* be created.
*/
@Public
class ExecutionEnvironment(javaEnv: JavaEnv) {
/**
* @return the Java Execution environment.
*/
def getJavaEnv: JavaEnv = javaEnv
/**
* Gets the config object.
*/
def getConfig: ExecutionConfig = {
javaEnv.getConfig
}
/**
* Sets the parallelism (parallelism) for operations executed through this environment.
* Setting a parallelism of x here will cause all operators (such as join, map, reduce) to run
* with x parallel instances. This value can be overridden by specific operations using
* [[DataSet.setParallelism]].
*/
def setParallelism(parallelism: Int): Unit = {
javaEnv.setParallelism(parallelism)
}
/**
* Returns the default parallelism for this execution environment. Note that this
* value can be overridden by individual operations using [[DataSet.setParallelism]]
*/
def getParallelism = javaEnv.getParallelism
/**
* Sets the restart strategy configuration. The configuration specifies which restart strategy
* will be used for the execution graph in case of a restart.
*
* @param restartStrategyConfiguration Restart strategy configuration to be set
*/
@PublicEvolving
def setRestartStrategy(restartStrategyConfiguration: RestartStrategyConfiguration): Unit = {
javaEnv.setRestartStrategy(restartStrategyConfiguration)
}
/**
* Returns the specified restart strategy configuration.
*
* @return The restart strategy configuration to be used
*/
@PublicEvolving
def getRestartStrategy: RestartStrategyConfiguration = {
javaEnv.getRestartStrategy
}
/**
* Sets the number of times that failed tasks are re-executed. A value of zero
* effectively disables fault tolerance. A value of "-1" indicates that the system
* default value (as defined in the configuration) should be used.
*
* @deprecated This method will be replaced by [[setRestartStrategy()]]. The
* FixedDelayRestartStrategyConfiguration contains the number of execution retries.
*/
@Deprecated
@PublicEvolving
def setNumberOfExecutionRetries(numRetries: Int): Unit = {
javaEnv.setNumberOfExecutionRetries(numRetries)
}
/**
* Gets the number of times the system will try to re-execute failed tasks. A value
* of "-1" indicates that the system default value (as defined in the configuration)
* should be used.
*
* @deprecated This method will be replaced by [[getRestartStrategy]]. The
* FixedDelayRestartStrategyConfiguration contains the number of execution retries.
*/
@Deprecated
@PublicEvolving
def getNumberOfExecutionRetries = javaEnv.getNumberOfExecutionRetries
/**
* Gets the JobExecutionResult of the last executed job.
*/
def getLastJobExecutionResult = javaEnv.getLastJobExecutionResult
/**
* Registers the given type with the serializer at the [[KryoSerializer]].
*
* Note that the serializer instance must be serializable (as defined by java.io.Serializable),
* because it may be distributed to the worker nodes by java serialization.
*/
def registerTypeWithKryoSerializer[T <: Serializer[_] with Serializable](
clazz: Class[_],
serializer: T)
: Unit = {
javaEnv.registerTypeWithKryoSerializer(clazz, serializer)
}
/**
* Registers the given type with the serializer at the [[KryoSerializer]].
*/
def registerTypeWithKryoSerializer(clazz: Class[_], serializer: Class[_ <: Serializer[_]]) {
javaEnv.registerTypeWithKryoSerializer(clazz, serializer)
}
/**
* Registers a default serializer for the given class and its sub-classes at Kryo.
*/
def addDefaultKryoSerializer(clazz: Class[_], serializer: Class[_ <: Serializer[_]]) {
javaEnv.addDefaultKryoSerializer(clazz, serializer)
}
/**
* Registers a default serializer for the given class and its sub-classes at Kryo.
*
* Note that the serializer instance must be serializable (as defined by java.io.Serializable),
* because it may be distributed to the worker nodes by java serialization.
*/
def addDefaultKryoSerializer[T <: Serializer[_] with Serializable](
clazz: Class[_],
serializer: T)
: Unit = {
javaEnv.addDefaultKryoSerializer(clazz, serializer)
}
/**
* Registers the given type with the serialization stack. If the type is eventually
* serialized as a POJO, then the type is registered with the POJO serializer. If the
* type ends up being serialized with Kryo, then it will be registered at Kryo to make
* sure that only tags are written.
*
*/
def registerType(typeClass: Class[_]) {
javaEnv.registerType(typeClass)
}
/**
* Creates a DataSet of Strings produced by reading the given file line wise.
*
* @param filePath The path of the file, as a URI (e.g., "file:///some/local/file" or
* "hdfs://host:port/file/path").
* @param charsetName The name of the character set used to read the file. Default is UTF-0
*/
def readTextFile(filePath: String, charsetName: String = "UTF-8"): DataSet[String] = {
require(filePath != null, "The file path may not be null.")
val format = new TextInputFormat(new Path(filePath))
format.setCharsetName(charsetName)
val source = new DataSource[String](javaEnv, format, BasicTypeInfo.STRING_TYPE_INFO,
getCallLocationName())
wrap(source)
}
/**
* Creates a DataSet of Strings produced by reading the given file line wise.
* This method is similar to [[readTextFile]], but it produces a DataSet with mutable
* [[StringValue]] objects, rather than Java Strings. StringValues can be used to tune
* implementations to be less object and garbage collection heavy.
*
* @param filePath The path of the file, as a URI (e.g., "file:///some/local/file" or
* "hdfs://host:port/file/path").
* @param charsetName The name of the character set used to read the file. Default is UTF-0
*/
def readTextFileWithValue(
filePath: String,
charsetName: String = "UTF-8"): DataSet[StringValue] = {
require(filePath != null, "The file path may not be null.")
val format = new TextValueInputFormat(new Path(filePath))
format.setCharsetName(charsetName)
val source = new DataSource[StringValue](
javaEnv, format, new ValueTypeInfo[StringValue](classOf[StringValue]), getCallLocationName())
wrap(source)
}
/**
* Creates a DataSet by reading the given CSV file. The type parameter must be used to specify
* a Tuple type that has the same number of fields as there are fields in the CSV file. If the
* number of fields in the CSV file is not the same, the `includedFields` parameter can be used
* to only read specific fields.
*
* @param filePath The path of the file, as a URI (e.g., "file:///some/local/file" or
* "hdfs://host:port/file/path").
* @param lineDelimiter The string that separates lines, defaults to newline.
* @param fieldDelimiter The string that separates individual fields, defaults to ",".
* @param quoteCharacter The character to use for quoted String parsing, disabled by default.
* @param ignoreFirstLine Whether the first line in the file should be ignored.
* @param ignoreComments Lines that start with the given String are ignored, disabled by default.
* @param lenient Whether the parser should silently ignore malformed lines.
* @param includedFields The fields in the file that should be read. Per default all fields
* are read.
* @param pojoFields The fields of the POJO which are mapped to CSV fields.
*/
def readCsvFile[T : ClassTag : TypeInformation](
filePath: String,
lineDelimiter: String = "\\n",
fieldDelimiter: String = ",",
quoteCharacter: Character = null,
ignoreFirstLine: Boolean = false,
ignoreComments: String = null,
lenient: Boolean = false,
includedFields: Array[Int] = null,
pojoFields: Array[String] = null): DataSet[T] = {
val typeInfo = implicitly[TypeInformation[T]]
Preconditions.checkArgument(
typeInfo.isInstanceOf[CompositeType[T]],
s"The type $typeInfo has to be a tuple or pojo type.",
null)
var inputFormat: CsvInputFormat[T] = null
typeInfo match {
case info: TupleTypeInfoBase[T] =>
inputFormat = new TupleCsvInputFormat[T](
new Path(filePath),
typeInfo.asInstanceOf[TupleTypeInfoBase[T]],
includedFields)
case info: PojoTypeInfo[T] =>
if (pojoFields == null) {
throw new IllegalArgumentException(
"POJO fields must be specified (not null) if output type is a POJO.")
}
inputFormat = new PojoCsvInputFormat[T](
new Path(filePath),
typeInfo.asInstanceOf[PojoTypeInfo[T]],
pojoFields,
includedFields)
case _ => throw new IllegalArgumentException("Type information is not valid.")
}
if (quoteCharacter != null) {
inputFormat.enableQuotedStringParsing(quoteCharacter)
}
inputFormat.setDelimiter(lineDelimiter)
inputFormat.setFieldDelimiter(fieldDelimiter)
inputFormat.setSkipFirstLineAsHeader(ignoreFirstLine)
inputFormat.setLenient(lenient)
inputFormat.setCommentPrefix(ignoreComments)
wrap(new DataSource[T](javaEnv, inputFormat, typeInfo, getCallLocationName()))
}
/**
* Creates a DataSet that represents the primitive type produced by reading the
* given file in delimited way.This method is similar to [[readCsvFile]] with
* single field, but it produces a DataSet not through Tuple.
* The type parameter must be used to specify the primitive type.
*
* @param filePath The path of the file, as a URI (e.g., "file:///some/local/file" or
* "hdfs://host:port/file/path").
* @param delimiter The string that separates primitives , defaults to newline.
*/
def readFileOfPrimitives[T : ClassTag : TypeInformation](
filePath : String,
delimiter : String = "\\n") : DataSet[T] = {
require(filePath != null, "File path must not be null.")
val typeInfo = implicitly[TypeInformation[T]]
val datasource = new DataSource[T](
javaEnv,
new PrimitiveInputFormat(new Path(filePath), delimiter, typeInfo.getTypeClass),
typeInfo,
getCallLocationName())
wrap(datasource)
}
/**
* Creates a new DataSource by reading the specified file using the custom
* [[org.apache.flink.api.common.io.FileInputFormat]].
*/
def readFile[T : ClassTag : TypeInformation](
inputFormat: FileInputFormat[T],
filePath: String): DataSet[T] = {
require(inputFormat != null, "InputFormat must not be null.")
require(filePath != null, "File path must not be null.")
inputFormat.setFilePath(new Path(filePath))
createInput(inputFormat, explicitFirst(inputFormat, implicitly[TypeInformation[T]]))
}
/**
* Generic method to create an input DataSet with an
* [[org.apache.flink.api.common.io.InputFormat]].
*/
def createInput[T : ClassTag : TypeInformation](inputFormat: InputFormat[T, _]): DataSet[T] = {
if (inputFormat == null) {
throw new IllegalArgumentException("InputFormat must not be null.")
}
createInput(inputFormat, explicitFirst(inputFormat, implicitly[TypeInformation[T]]))
}
/**
* Generic method to create an input DataSet with an
* [[org.apache.flink.api.common.io.InputFormat]].
*/
private def createInput[T: ClassTag](
inputFormat: InputFormat[T, _],
producedType: TypeInformation[T]): DataSet[T] = {
if (inputFormat == null) {
throw new IllegalArgumentException("InputFormat must not be null.")
}
require(producedType != null, "Produced type must not be null")
wrap(new DataSource[T](javaEnv, inputFormat, producedType, getCallLocationName()))
}
/**
* Creates a DataSet from the given non-empty [[Iterable]].
*
* Note that this operation will result in a non-parallel data source, i.e. a data source with
* a parallelism of one.
*/
def fromCollection[T: ClassTag : TypeInformation](
data: Iterable[T]): DataSet[T] = {
require(data != null, "Data must not be null.")
val typeInfo = implicitly[TypeInformation[T]]
CollectionInputFormat.checkCollection(data.asJavaCollection, typeInfo.getTypeClass)
val dataSource = new DataSource[T](
javaEnv,
new CollectionInputFormat[T](data.asJavaCollection, typeInfo.createSerializer(getConfig)),
typeInfo,
getCallLocationName())
wrap(dataSource)
}
/**
* Creates a DataSet from the given [[Iterator]].
*
* Note that this operation will result in a non-parallel data source, i.e. a data source with
* a parallelism of one.
*/
def fromCollection[T: ClassTag : TypeInformation] (
data: Iterator[T]): DataSet[T] = {
require(data != null, "Data must not be null.")
val typeInfo = implicitly[TypeInformation[T]]
val dataSource = new DataSource[T](
javaEnv,
new IteratorInputFormat[T](data.asJava),
typeInfo,
getCallLocationName())
wrap(dataSource)
}
/**
* Creates a new data set that contains the given elements.
*
* * Note that this operation will result in a non-parallel data source, i.e. a data source with
* a parallelism of one.
*/
def fromElements[T: ClassTag : TypeInformation](data: T*): DataSet[T] = {
require(data != null, "Data must not be null.")
val typeInfo = implicitly[TypeInformation[T]]
fromCollection(data)(implicitly[ClassTag[T]], typeInfo)
}
/**
* Creates a new data set that contains elements in the iterator. The iterator is splittable,
* allowing the framework to create a parallel data source that returns the elements in the
* iterator.
*/
def fromParallelCollection[T: ClassTag : TypeInformation](
iterator: SplittableIterator[T]): DataSet[T] = {
val typeInfo = implicitly[TypeInformation[T]]
wrap(new DataSource[T](javaEnv,
new ParallelIteratorInputFormat[T](iterator),
typeInfo,
getCallLocationName()))
}
/**
* Creates a new data set that contains a sequence of numbers. The data set will be created in
* parallel, so there is no guarantee about the oder of the elements.
*
* @param from The number to start at (inclusive).
* @param to The number to stop at (inclusive).
*/
def generateSequence(from: Long, to: Long): DataSet[Long] = {
val iterator = new NumberSequenceIterator(from, to)
val source = new DataSource(
javaEnv,
new ParallelIteratorInputFormat[java.lang.Long](iterator),
BasicTypeInfo.LONG_TYPE_INFO,
getCallLocationName())
wrap(source).asInstanceOf[DataSet[Long]]
}
def union[T](sets: Seq[DataSet[T]]): DataSet[T] = {
sets.reduce( (l, r) => l.union(r) )
}
/**
* Registers a file at the distributed cache under the given name. The file will be accessible
* from any user-defined function in the (distributed) runtime under a local path. Files
* may be local files (which will be distributed via BlobServer), or files in a distributed file
* system. The runtime will copy the files temporarily to a local cache, if needed.
*
* The [[org.apache.flink.api.common.functions.RuntimeContext]] can be obtained inside UDFs
* via
* [[org.apache.flink.api.common.functions.RichFunction#getRuntimeContext]] and provides
* access via
* [[org.apache.flink.api.common.functions.RuntimeContext#getDistributedCache]]
*
* @param filePath The path of the file, as a URI (e.g. "file:///some/path" or
* "hdfs://host:port/and/path")
* @param name The name under which the file is registered.
* @param executable Flag indicating whether the file should be executable
*/
def registerCachedFile(filePath: String, name: String, executable: Boolean = false): Unit = {
javaEnv.registerCachedFile(filePath, name, executable)
}
/**
* Triggers the program execution. The environment will execute all parts of the program that have
* resulted in a "sink" operation. Sink operations are for example printing results
* [[DataSet.print]], writing results (e.g. [[DataSet.writeAsText]], [[DataSet.write]], or other
* generic data sinks created with [[DataSet.output]].
*
* The program execution will be logged and displayed with a generated default name.
*
* @return The result of the job execution, containing elapsed time and accumulators.
*/
def execute(): JobExecutionResult = {
javaEnv.execute()
}
/**
* Triggers the program execution. The environment will execute all parts of the program that have
* resulted in a "sink" operation. Sink operations are for example printing results
* [[DataSet.print]], writing results (e.g. [[DataSet.writeAsText]], [[DataSet.write]], or other
* generic data sinks created with [[DataSet.output]].
*
* The program execution will be logged and displayed with the given name.
*
* @return The result of the job execution, containing elapsed time and accumulators.
*/
def execute(jobName: String): JobExecutionResult = {
javaEnv.execute(jobName)
}
/**
* Creates the plan with which the system will execute the program, and returns it as a String
* using a JSON representation of the execution data flow graph.
*/
def getExecutionPlan() = {
javaEnv.getExecutionPlan
}
/**
* Creates the program's [[org.apache.flink.api.common.Plan]].
* The plan is a description of all data sources, data sinks,
* and operations and how they interact, as an isolated unit that can be executed with a
* [[org.apache.flink.api.common.PlanExecutor]]. Obtaining a plan and starting it with an
* executor is an alternative way to run a program and is only possible if the program only
* consists of distributed operations.
*/
def createProgramPlan(jobName: String = "") = {
if (jobName.isEmpty) {
javaEnv.createProgramPlan()
} else {
javaEnv.createProgramPlan(jobName)
}
}
}
@Public
object ExecutionEnvironment {
/**
* Sets the default parallelism that will be used for the local execution
* environment created by [[createLocalEnvironment()]].
*
* @param parallelism The default parallelism to use for local execution.
*/
@PublicEvolving
def setDefaultLocalParallelism(parallelism: Int) : Unit =
JavaEnv.setDefaultLocalParallelism(parallelism)
/**
* Gets the default parallelism that will be used for the local execution environment created by
* [[createLocalEnvironment()]].
*/
@PublicEvolving
def getDefaultLocalParallelism: Int = JavaEnv.getDefaultLocalParallelism
// --------------------------------------------------------------------------
// context environment
// --------------------------------------------------------------------------
/**
* Creates an execution environment that represents the context in which the program is
* currently executed. If the program is invoked standalone, this method returns a local
* execution environment. If the program is invoked from within the command line client
* to be submitted to a cluster, this method returns the execution environment of this cluster.
*/
def getExecutionEnvironment: ExecutionEnvironment = {
new ExecutionEnvironment(JavaEnv.getExecutionEnvironment)
}
// --------------------------------------------------------------------------
// local environment
// --------------------------------------------------------------------------
/**
* Creates a local execution environment. The local execution environment will run the
* program in a multi-threaded fashion in the same JVM as the environment was created in.
*
* This method sets the environment's default parallelism to given parameter, which
* defaults to the value set via [[setDefaultLocalParallelism(Int)]].
*/
def createLocalEnvironment(parallelism: Int = JavaEnv.getDefaultLocalParallelism):
ExecutionEnvironment = {
new ExecutionEnvironment(JavaEnv.createLocalEnvironment(parallelism))
}
/**
* Creates a local execution environment. The local execution environment will run the program in
* a multi-threaded fashion in the same JVM as the environment was created in.
* This method allows to pass a custom Configuration to the local environment.
*/
def createLocalEnvironment(customConfiguration: Configuration): ExecutionEnvironment = {
val javaEnv = JavaEnv.createLocalEnvironment(customConfiguration)
new ExecutionEnvironment(javaEnv)
}
/**
* Creates a [[ExecutionEnvironment]] for local program execution that also starts the
* web monitoring UI.
*
* The local execution environment will run the program in a multi-threaded fashion in
* the same JVM as the environment was created in. It will use the parallelism specified in the
* parameter.
*
* If the configuration key 'rest.port' was set in the configuration, that particular
* port will be used for the web UI. Otherwise, the default port (8081) will be used.
*
* @param config optional config for the local execution
* @return The created StreamExecutionEnvironment
*/
@PublicEvolving
def createLocalEnvironmentWithWebUI(config: Configuration = null): ExecutionEnvironment = {
val conf: Configuration = if (config == null) new Configuration() else config
new ExecutionEnvironment(JavaEnv.createLocalEnvironmentWithWebUI(conf))
}
/**
* Creates an execution environment that uses Java Collections underneath. This will execute in a
* single thread in the current JVM. It is very fast but will fail if the data does not fit into
* memory. This is useful during implementation and for debugging.
*
* @return
*/
@PublicEvolving
def createCollectionsEnvironment: ExecutionEnvironment = {
new ExecutionEnvironment(new CollectionEnvironment)
}
// --------------------------------------------------------------------------
// remote environment
// --------------------------------------------------------------------------
/**
* Creates a remote execution environment. The remote environment sends (parts of) the program to
* a cluster for execution. Note that all file paths used in the program must be accessible from
* the cluster. The execution will use the cluster's default parallelism, unless the
* parallelism is set explicitly via [[ExecutionEnvironment.setParallelism()]].
*
* @param host The host name or address of the master (JobManager),
* where the program should be executed.
* @param port The port of the master (JobManager), where the program should be executed.
* @param jarFiles The JAR files with code that needs to be shipped to the cluster. If the
* program uses
* user-defined functions, user-defined input formats, or any libraries,
* those must be
* provided in the JAR files.
*/
def createRemoteEnvironment(host: String, port: Int, jarFiles: String*): ExecutionEnvironment = {
new ExecutionEnvironment(JavaEnv.createRemoteEnvironment(host, port, jarFiles: _*))
}
/**
* Creates a remote execution environment. The remote environment sends (parts of) the program
* to a cluster for execution. Note that all file paths used in the program must be accessible
* from the cluster. The execution will use the specified parallelism.
*
* @param host The host name or address of the master (JobManager),
* where the program should be executed.
* @param port The port of the master (JobManager), where the program should be executed.
* @param parallelism The parallelism to use during the execution.
* @param jarFiles The JAR files with code that needs to be shipped to the cluster. If the
* program uses
* user-defined functions, user-defined input formats, or any libraries,
* those must be
* provided in the JAR files.
*/
def createRemoteEnvironment(
host: String,
port: Int,
parallelism: Int,
jarFiles: String*): ExecutionEnvironment = {
val javaEnv = JavaEnv.createRemoteEnvironment(host, port, jarFiles: _*)
javaEnv.setParallelism(parallelism)
new ExecutionEnvironment(javaEnv)
}
/**
* Creates a remote execution environment. The remote environment sends (parts of) the program
* to a cluster for execution. Note that all file paths used in the program must be accessible
* from the cluster. The custom configuration file is used to configure Akka specific
* configuration parameters for the Client only; Program parallelism can be set via
* [[ExecutionEnvironment.setParallelism]].
*
* ClusterClient configuration has to be done in the remotely running Flink instance.
*
* @param host The host name or address of the master (JobManager), where the program should be
* executed.
* @param port The port of the master (JobManager), where the program should be executed.
* @param clientConfiguration Pass a custom configuration to the Client.
* @param jarFiles The JAR files with code that needs to be shipped to the cluster. If the
* program uses user-defined functions, user-defined input formats, or any
* libraries, those must be provided in the JAR files.
* @return A remote environment that executes the program on a cluster.
*/
def createRemoteEnvironment(
host: String,
port: Int,
clientConfiguration: Configuration,
jarFiles: String*): ExecutionEnvironment = {
val javaEnv = JavaEnv.createRemoteEnvironment(host, port, clientConfiguration, jarFiles: _*)
new ExecutionEnvironment(javaEnv)
}
}
| fhueske/flink | flink-scala/src/main/scala/org/apache/flink/api/scala/ExecutionEnvironment.scala | Scala | apache-2.0 | 28,758 |
package models
import javax.inject.{Inject, Singleton}
import anorm.SqlParser._
import anorm._
import play.api.db.Database
/**
* Created by manuel on 19.04.16.
*/
case class Method(id: Option[Int], conferenceId: Int, name: String, delta: Int, synonyms: String) extends Serializable
class MethodService @Inject()(db:Database) {
private val answerParser: RowParser[Method] =
get[Option[Int]]("id") ~
get[Int]("conference_id") ~
get[String]("name") ~
get[Int]("delta") ~
get[String]("synonyms") map {
case id ~ conference_id ~ name ~ delta ~ synonyms =>
Method(id, conference_id, name, delta, synonyms)
}
def findById(id: Int, conferenceId: Int): Option[Method] =
db.withConnection { implicit c =>
SQL("SELECT * FROM methods WHERE id = {id} AND conference_id = {conference_id}").on(
'id -> id,
'conference_id -> conferenceId
).as(answerParser.singleOpt)
}
def findByName(conferenceId: Int, name: String): Option[Method] =
db.withConnection { implicit c =>
SQL("SELECT * FROM methods WHERE name = {name} AND conference_id = {conference_id}").on(
'name -> name,
'conference_id -> conferenceId
).as(answerParser.singleOpt)
}
def findAll(conferenceId: Int): List[Method] = {
db.withConnection { implicit c =>
SQL("SELECT * FROM methods " +
"WHERE conference_id = {conference_id} " +
"ORDER BY name ASC").on(
'conference_id -> conferenceId
).as(answerParser *)
}
}
def create(conferenceId: Int, name: String, delta: Int = 0, synonyms: String = "") =
db.withConnection { implicit c =>
SQL("INSERT INTO methods(conference_id, name, delta, synonyms) VALUES ({conference_id}, {name}, {delta}, {synonyms})").on(
'conference_id -> conferenceId,
'name -> name,
'delta -> delta,
'synonyms -> synonyms
).executeInsert()
}
def update(id: Int, conferenceId: Int, name: String, delta: Int, synonyms: String) =
db.withConnection { implicit c =>
SQL("UPDATE methods SET name={name}, delta={delta}, synonyms={synonyms} " +
"WHERE id={id} AND conference_id={conference_id}").on(
'id -> id,
'conference_id -> conferenceId,
'name -> name,
'delta -> delta,
'synonyms -> synonyms
).executeUpdate()
}
def delete(id: Int, conferenceId: Int) =
db.withConnection { implicit c =>
SQL("DELETE FROM methods " +
"WHERE id={id} AND conference_id={conference_id}").on(
'id -> id,
'conference_id -> conferenceId
).executeUpdate()
}
} | manuelroesch/PaperValidator | app/models/Method.scala | Scala | mit | 2,486 |
package akashic.storage.backend
import com.typesafe.config.Config
import org.scalatest.{BeforeAndAfterEach, FunSuite}
trait BALTraitTest extends FunSuite with BeforeAndAfterEach {
val config: Config
var bal: BAL = _
override def beforeEach(): Unit = {
super.beforeEach()
bal = new BALFactory(config).build
}
test("add directory") {
bal.makeDirectory(bal.getRoot, "aaa")
val dir = bal.lookup(bal.getRoot, "aaa").get
bal.makeDirectory(dir, "bbb")
}
}
abstract class BALTraitTestTemplate(val config: Config) extends BALTraitTest
| akiradeveloper/fss3 | src/test/scala/akashic/storage/backend/BALTraitTest.scala | Scala | apache-2.0 | 561 |
package com.stripe
import org.scalatest.FunSuite
import org.scalatest.matchers.ShouldMatchers
import java.util.UUID
trait StripeSuite extends ShouldMatchers {
//set the stripe API key
apiKey = "tGN0bIwXnHdwOa85VABjPdSn8nWY7G7I"
val DefaultCardMap = Map(
"name" -> "Scala User",
"cvc" -> "100",
"address_line1" -> "12 Main Street",
"address_line2" -> "Palo Alto",
"address_zip" -> "94105",
"address_country" -> "USA",
"number" -> "4242424242424242",
"exp_month" -> 3,
"exp_year" -> 2015)
val DefaultChargeMap = Map("amount" -> 100, "currency" -> "usd", "card" -> DefaultCardMap)
val DefaultCustomerMap = Map("description" -> "Scala Customer", "card" -> DefaultCardMap)
val DefaultPlanMap = Map("amount" -> 100, "currency" -> "usd", "interval" -> "month", "name" -> "Scala Plan")
def getUniquePlanId(): String = return "PLAN-%s".format(UUID.randomUUID())
def getUniquePlanMap(): Map[String,_] = return DefaultPlanMap + ("id" -> getUniquePlanId())
val DefaultInvoiceItemMap = Map("amount" -> 100, "currency" -> "usd")
def getUniqueCouponMap(): Map[String,_] = Map("id" -> "COUPON-%s".format(UUID.randomUUID()),
"duration" -> "once",
"percent_off" -> 10
)
}
class ChargeSuite extends FunSuite with StripeSuite {
test("Charges can be created") {
val charge = Charge.create(Map("amount" -> 100, "currency" -> "usd", "card" -> DefaultCardMap))
charge.refunded should be (false)
}
test("Charges can be retrieved individually") {
val createdCharge = Charge.create(DefaultChargeMap)
val retrievedCharge = Charge.retrieve(createdCharge.id)
createdCharge.created should equal (retrievedCharge.created)
}
test("Charges can be refunded") {
val charge = Charge.create(DefaultChargeMap)
val refundedCharge = charge.refund()
refundedCharge.refunded should equal (true)
}
test("Charges can be listed") {
val charge = Charge.create(DefaultChargeMap)
val charges = Charge.all().data
charges.head.isInstanceOf[Charge] should be (true)
}
test("Invalid card raises CardException") {
val e = intercept[CardException] {
Charge.create(Map(
"amount" -> 100,
"currency" -> "usd",
"card" -> Map("number" -> "4242424242424241", "exp_month" -> 3, "exp_year" -> 2015)
))
}
e.param.get should equal ("number")
}
test("CVC, address and zip checks should pass in testmode") {
val charge = Charge.create(DefaultChargeMap)
charge.card.cvcCheck.get should equal ("pass")
charge.card.addressLine1Check.get should equal ("pass")
charge.card.addressZipCheck.get should equal ("pass")
}
}
class CustomerSuite extends FunSuite with StripeSuite {
test("Customers can be created") {
val customer = Customer.create(DefaultCustomerMap + ("description" -> "Test Description"))
customer.description.get should be ("Test Description")
customer.defaultCard.isEmpty should be (false)
}
test("Customers can be retrieved individually") {
val createdCustomer = Customer.create(DefaultCustomerMap)
val retrievedCustomer = Customer.retrieve(createdCustomer.id)
createdCustomer.created should equal (retrievedCustomer.created)
}
test("Customers can be updated") {
val customer = Customer.create(DefaultCustomerMap)
val updatedCustomer = customer.update(Map("description" -> "Updated Scala Customer"))
updatedCustomer.description.get should equal ("Updated Scala Customer")
}
test("Customers can be deleted") {
val customer = Customer.create(DefaultCustomerMap)
val deletedCustomer = customer.delete()
deletedCustomer.deleted should be (true)
deletedCustomer.id should equal (customer.id)
}
test("Customers can be listed") {
val customer = Customer.create(DefaultCustomerMap)
val customers = Customer.all().data
customers.head.isInstanceOf[Customer] should be (true)
}
}
class PlanSuite extends FunSuite with StripeSuite {
test("Plans can be created") {
val plan = Plan.create(getUniquePlanMap + ("interval" -> "year"))
plan.interval should equal ("year")
}
test("Plans can be retrieved individually") {
val createdPlan = Plan.create(getUniquePlanMap)
val retrievedPlan = Plan.retrieve(createdPlan.id)
createdPlan should equal (retrievedPlan)
}
test("Plans can be deleted") {
val plan = Plan.create(getUniquePlanMap)
val deletedPlan = plan.delete()
deletedPlan.deleted should be (true)
deletedPlan.id should equal (plan.id)
}
test("Plans can be listed") {
val plan = Plan.create(getUniquePlanMap)
val plans = Plan.all().data
plans.head.isInstanceOf[Plan] should be (true)
}
test("Customers can be created with a plan") {
val plan = Plan.create(getUniquePlanMap)
val customer = Customer.create(DefaultCustomerMap + ("plan" -> plan.id))
customer.subscription.get.plan.id should equal (plan.id)
}
test("A plan can be added to a customer without a plan") {
val customer = Customer.create(DefaultCustomerMap)
val plan = Plan.create(getUniquePlanMap)
val subscription = customer.updateSubscription(Map("plan" -> plan.id))
subscription.customer should equal (customer.id)
subscription.plan.id should equal (plan.id)
}
test("A customer's existing plan can be replaced") {
val origPlan = Plan.create(getUniquePlanMap)
val customer = Customer.create(DefaultCustomerMap + ("plan" -> origPlan.id))
customer.subscription.get.plan.id should equal (origPlan.id)
val newPlan = Plan.create(getUniquePlanMap)
val subscription = customer.updateSubscription(Map("plan" -> newPlan.id))
val updatedCustomer = Customer.retrieve(customer.id)
updatedCustomer.subscription.get.plan.id should equal (newPlan.id)
}
test("Customer subscriptions can be canceled") {
val plan = Plan.create(getUniquePlanMap)
val customer = Customer.create(DefaultCustomerMap + ("plan" -> plan.id))
customer.subscription.get.status should equal ("active")
val canceledSubscription = customer.cancelSubscription()
canceledSubscription.status should be ("canceled")
}
}
class InvoiceItemSuite extends FunSuite with StripeSuite {
def createDefaultInvoiceItem(): InvoiceItem = {
val customer = Customer.create(DefaultCustomerMap)
return InvoiceItem.create(DefaultInvoiceItemMap + ("customer" -> customer.id))
}
test("InvoiceItems can be created") {
val invoiceItem = createDefaultInvoiceItem()
invoiceItem.date should be > (0L)
}
test("InvoiceItems can be retrieved individually") {
val createdInvoiceItem = createDefaultInvoiceItem()
val retrievedInvoiceItem = InvoiceItem.retrieve(createdInvoiceItem.id)
createdInvoiceItem.date should equal (retrievedInvoiceItem.date)
}
test("InvoiceItems can be updated") {
val invoiceItem = createDefaultInvoiceItem()
val updatedInvoiceItem = invoiceItem.update(Map(
"amount" -> 200, "description" -> "Updated Scala InvoiceItem"
))
updatedInvoiceItem.amount should equal (200)
updatedInvoiceItem.description.get should equal ("Updated Scala InvoiceItem")
}
test("InvoiceItems can be deleted") {
val invoiceItem = createDefaultInvoiceItem()
val deletedInvoiceItem = invoiceItem.delete()
deletedInvoiceItem.deleted should be (true)
deletedInvoiceItem.id should equal (invoiceItem.id)
}
test("InvoiceItems can be listed") {
val invoiceItem = createDefaultInvoiceItem()
val invoiceItems = InvoiceItem.all().data
invoiceItems.head.isInstanceOf[InvoiceItem] should be (true)
}
}
class InvoiceSuite extends FunSuite with StripeSuite {
test("Invoices can be retrieved individually") {
val plan = Plan.create(getUniquePlanMap)
val customer = Customer.create(DefaultCustomerMap + ("plan" -> plan.id))
val invoices = Invoice.all(Map("customer" -> customer.id)).data
val createdInvoice = invoices.head
val retrievedInvoice = Invoice.retrieve(createdInvoice.id.get)
retrievedInvoice.id should equal (createdInvoice.id)
}
test("Invoices can be listed") {
val plan = Plan.create(getUniquePlanMap)
val customer = Customer.create(DefaultCustomerMap + ("plan" -> plan.id))
val invoices = Invoice.all().data
invoices.head.isInstanceOf[Invoice] should be (true)
}
test("Invoices can be retrieved for a customer") {
val plan = Plan.create(getUniquePlanMap)
val customer = Customer.create(DefaultCustomerMap + ("plan" -> plan.id))
val invoices = Invoice.all(Map("customer" -> customer.id)).data
val invoice = invoices.head
invoice.customer should equal (customer.id)
val invoiceLineSubscription = invoice.lines.subscriptions.head
invoiceLineSubscription.plan.id should equal (plan.id)
}
test("Upcoming Invoices can be retrieved") {
val customer = Customer.create(DefaultCustomerMap)
val customerId = customer.id
val invoiceItem = InvoiceItem.create(DefaultInvoiceItemMap + ("customer" -> customerId))
val upcomingInvoice = Invoice.upcoming(Map("customer" -> customerId))
// upcomingInvoice.attempted should be (false)
}
}
class TokenSuite extends FunSuite with StripeSuite {
test("Tokens can be created") {
val token = Token.create(Map("card" -> DefaultCardMap))
token.used should be (false)
}
test("Tokens can be retrieved") {
val createdToken = Token.create(Map("card" -> DefaultCardMap))
val retrievedToken = Token.retrieve(createdToken.id)
createdToken.created should equal (retrievedToken.created)
}
test("Tokens can be used") {
val createdToken = Token.create(Map("card" -> DefaultCardMap))
createdToken.used should be (false)
val charge = Charge.create(Map("amount" -> 100, "currency" -> "usd", "card" -> createdToken.id))
val retrievedToken = Token.retrieve(createdToken.id)
retrievedToken.used should equal (true)
}
}
class CouponSuite extends FunSuite with StripeSuite {
test("Coupons can be created") {
val coupon = Coupon.create(getUniqueCouponMap)
coupon.percentOff should equal (10)
}
test("Coupons can be retrieved individually") {
val createdCoupon = Coupon.create(getUniqueCouponMap)
val retrievedCoupon = Coupon.retrieve(createdCoupon.id)
createdCoupon should equal (retrievedCoupon)
}
test("Coupons can be deleted") {
val coupon = Coupon.create(getUniqueCouponMap)
val deletedCoupon = coupon.delete()
deletedCoupon.deleted should be (true)
deletedCoupon.id should equal (coupon.id)
}
test("Coupons can be listed") {
val coupon = Coupon.create(getUniqueCouponMap)
val coupons = Coupon.all().data
coupons.head.isInstanceOf[Coupon] should be (true)
}
}
class AccountSuite extends FunSuite with StripeSuite {
test("Account can be retrieved") {
val account = Account.retrieve
account.email should equal (Some("test+bindings@stripe.com"))
account.chargeEnabled should equal (false)
account.detailsSubmitted should be (false)
account.statementDescriptor should be (None)
account.currenciesSupported.length should be (1)
account.currenciesSupported.head should be ("USD")
}
}
| GalacticFog/stripe-scala | src/test/scala/com/stripe/StripeSuite.scala | Scala | mit | 11,137 |
package ionroller
import scalaz.Equal
final case class DesiredSystemState(timelines: Map[TimelineName, DesiredTimelineState])
object DesiredSystemState {
implicit lazy val desiredSystemStateEquality = Equal.equalA[DesiredSystemState]
}
| browngeek666/ionroller | core/src/main/scala/ionroller/DesiredSystemState.scala | Scala | mit | 241 |
package com.twitter.finagle.http
import org.junit.runner.RunWith
import org.scalatest.FunSuite
import org.scalatest.junit.JUnitRunner
@RunWith(classOf[JUnitRunner])
class EmptyParamMapTest extends FunSuite {
test("isValid") {
assert(EmptyParamMap.isValid == true)
}
test("get") {
assert(EmptyParamMap.get("key") == None)
}
test("getAll") {
assert(EmptyParamMap.getAll("key").isEmpty == true)
}
test("+") {
val map = EmptyParamMap + ("key" -> "value")
assert(map.get("key") == Some("value"))
}
test("-") {
val map = EmptyParamMap - "key"
assert(map.get("key") == None)
}
}
| koshelev/finagle | finagle-http/src/test/scala/com/twitter/finagle/http/EmptyParamMapTest.scala | Scala | apache-2.0 | 627 |
package com.sksamuel.scrimage.filter
import com.sksamuel.scrimage.ImmutableImage
import org.scalatest.{BeforeAndAfter, FunSuite, OneInstancePerTest}
class DiffuseFilterTest extends FunSuite with BeforeAndAfter with OneInstancePerTest {
val original = ImmutableImage.fromStream(getClass.getResourceAsStream("/bird_small.png"))
test("filter output matches expected") {
assert(original.filter(new DiffuseFilter()) != original)
}
}
| sksamuel/scrimage | scrimage-filters/src/test/scala/com/sksamuel/scrimage/filter/DiffuseFilterTest.scala | Scala | apache-2.0 | 442 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.tools
import java.net.URI
import java.text.SimpleDateFormat
import kafka.api.{PartitionOffsetRequestInfo, FetchRequestBuilder, OffsetRequest}
import kafka.consumer.SimpleConsumer
import kafka.utils._
import org.apache.log4j.Logger
import kafka.common.TopicAndPartition
/**
* Performance test for the simple consumer
*/
object SimpleConsumerPerformance {
def main(args: Array[String]) {
val logger = Logger.getLogger(getClass)
val config = new ConsumerPerfConfig(args)
if(!config.hideHeader) {
if(!config.showDetailedStats)
println("start.time, end.time, fetch.size, data.consumed.in.MB, MB.sec, data.consumed.in.nMsg, nMsg.sec")
else
println("time, fetch.size, data.consumed.in.MB, MB.sec, data.consumed.in.nMsg, nMsg.sec")
}
val consumer = new SimpleConsumer(config.url.getHost, config.url.getPort, 30*1000, 2*config.fetchSize, config.clientId)
// reset to latest or smallest offset
val topicAndPartition = TopicAndPartition(config.topic, config.partition)
val request = OffsetRequest(Map(
topicAndPartition -> PartitionOffsetRequestInfo(if (config.fromLatest) OffsetRequest.LatestTime else OffsetRequest.EarliestTime, 1)
))
var offset: Long = consumer.getOffsetsBefore(request).partitionErrorAndOffsets(topicAndPartition).offsets.head
val startMs = System.currentTimeMillis
var done = false
var totalBytesRead = 0L
var totalMessagesRead = 0L
var consumedInterval = 0
var lastReportTime: Long = startMs
var lastBytesRead = 0L
var lastMessagesRead = 0L
while(!done) {
// TODO: add in the maxWait and minBytes for performance
val request = new FetchRequestBuilder()
.clientId(config.clientId)
.addFetch(config.topic, config.partition, offset, config.fetchSize)
.build()
val fetchResponse = consumer.fetch(request)
var messagesRead = 0
var bytesRead = 0
val messageSet = fetchResponse.messageSet(config.topic, config.partition)
for (message <- messageSet) {
messagesRead += 1
bytesRead += message.message.payloadSize
}
if(messagesRead == 0 || totalMessagesRead > config.numMessages)
done = true
else
// we only did one fetch so we find the offset for the first (head) messageset
offset += messageSet.validBytes
totalBytesRead += bytesRead
totalMessagesRead += messagesRead
consumedInterval += messagesRead
if(consumedInterval > config.reportingInterval) {
if(config.showDetailedStats) {
val reportTime = System.currentTimeMillis
val elapsed = (reportTime - lastReportTime)/1000.0
val totalMBRead = ((totalBytesRead-lastBytesRead)*1.0)/(1024*1024)
println(("%s, %d, %.4f, %.4f, %d, %.4f").format(config.dateFormat.format(reportTime), config.fetchSize,
(totalBytesRead*1.0)/(1024*1024), totalMBRead/elapsed,
totalMessagesRead, (totalMessagesRead-lastMessagesRead)/elapsed))
}
lastReportTime = SystemTime.milliseconds
lastBytesRead = totalBytesRead
lastMessagesRead = totalMessagesRead
consumedInterval = 0
}
}
val reportTime = System.currentTimeMillis
val elapsed = (reportTime - startMs) / 1000.0
if(!config.showDetailedStats) {
val totalMBRead = (totalBytesRead*1.0)/(1024*1024)
println(("%s, %s, %d, %.4f, %.4f, %d, %.4f").format(config.dateFormat.format(startMs),
config.dateFormat.format(reportTime), config.fetchSize, totalMBRead, totalMBRead/elapsed,
totalMessagesRead, totalMessagesRead/elapsed))
}
System.exit(0)
}
class ConsumerPerfConfig(args: Array[String]) extends PerfConfig(args) {
val urlOpt = parser.accepts("server", "REQUIRED: The hostname of the server to connect to.")
.withRequiredArg
.describedAs("kafka://hostname:port")
.ofType(classOf[String])
val topicOpt = parser.accepts("topic", "REQUIRED: The topic to consume from.")
.withRequiredArg
.describedAs("topic")
.ofType(classOf[String])
val resetBeginningOffsetOpt = parser.accepts("from-latest", "If the consumer does not already have an established " +
"offset to consume from, start with the latest message present in the log rather than the earliest message.")
val partitionOpt = parser.accepts("partition", "The topic partition to consume from.")
.withRequiredArg
.describedAs("partition")
.ofType(classOf[java.lang.Integer])
.defaultsTo(0)
val fetchSizeOpt = parser.accepts("fetch-size", "REQUIRED: The fetch size to use for consumption.")
.withRequiredArg
.describedAs("bytes")
.ofType(classOf[java.lang.Integer])
.defaultsTo(1024*1024)
val clientIdOpt = parser.accepts("clientId", "The ID of this client.")
.withRequiredArg
.describedAs("clientId")
.ofType(classOf[String])
.defaultsTo("SimpleConsumerPerformanceClient")
val options = parser.parse(args : _*)
for(arg <- List(topicOpt, urlOpt)) {
if(!options.has(arg)) {
System.err.println("Missing required argument \\"" + arg + "\\"")
parser.printHelpOn(System.err)
System.exit(1)
}
}
val url = new URI(options.valueOf(urlOpt))
val fetchSize = options.valueOf(fetchSizeOpt).intValue
val fromLatest = options.has(resetBeginningOffsetOpt)
val partition = options.valueOf(partitionOpt).intValue
val topic = options.valueOf(topicOpt)
val numMessages = options.valueOf(numMessagesOpt).longValue
val reportingInterval = options.valueOf(reportingIntervalOpt).intValue
val showDetailedStats = options.has(showDetailedStatsOpt)
val dateFormat = new SimpleDateFormat(options.valueOf(dateFormatOpt))
val hideHeader = options.has(hideHeaderOpt)
val clientId = options.valueOf(clientIdOpt).toString
}
}
| stealthly/kafka | core/src/main/scala/kafka/tools/SimpleConsumerPerformance.scala | Scala | apache-2.0 | 7,037 |
package org.http4s
package object argonaut extends ArgonautInstances
| hvesalai/http4s | argonaut/src/main/scala/org/http4s/argonaut/package.scala | Scala | apache-2.0 | 70 |
package sgl
package html5
import scala.scalajs.js
import org.scalajs.dom
import dom.html
import sgl.util._
/** AudioProvider implementation using Cordova.
*
* This requires the plugin cordova-plugin-media:
* cordova plugin add cordova-plugin-media
* You will also need to setup your own corodva project
* and import cordova.js in your index.html file:
* <script type="text/javascript" src="cordova.js"></script>
*
* This provider assumes the above is setup, and will use
* the Cordova Media API in order to implement the AudioProvider.
*
* This implementation has some limitations:
* - You cannot start looping a Music once it's already playing.
* - You cannot end a loop in a played sound.
* - Most looping features are only working on iOS
*
* Besides these, it seems the plugin have some bugs on iOS, because I have
* observed some basic sounds that have been randomly looping, and
* kept looping forever. That makes it pretty much unusable unless
* this can be fixed. I suspect the problem might be related to using
* a high number of sounds, and thus instantiating a high number of
* Media (which are AVAudioPlayer in iOS, which are relatively heavy.
* Or it might be a weird bug in the plugin implementation (like
* sharing a player or reusing one that is set to loop?). Either way,
* it's not clear what to do to address this.
*
* My understanding is that this plugin is not really meant for the use
* case of playing many small sound effects, but rather is meant for
* long-running music, and it works well for the Music class, but not
* the Sound class.
*/
trait CordovaMediaAudioProvider extends AudioProvider {
this: Html5SystemProvider with Html5InputProvider =>
/** The list of supported audio format.
*
* The load methods are going to pick the first resource that
* matches any of these, and include it.
*
* The default value is the list of supported format on iOS,
* as this is the main target of this Cordova plugin.
**/
val SupportedAudioFormats: Set[String] = Set("ogg", "wav", "m4a", "mp3")
private val MediaNone = 0
private val MediaStarting = 1
private val MediaRunning = 2
private val MediaPaused = 3
private val MediaStopped = 4
object CordovaMediaAudio extends Audio {
//class SoundTagInstance(val loader: Loader[HTMLAudioElement], var inUse: Boolean, var loop: Int)
//private class SoundTagPool(pathes: Seq[ResourcePath], initialTag: HTMLAudioElement) {
// private var audioTags: Vector[SoundTagInstance] = Vector(
// new SoundTagInstance(Loader.successful(initialTag), false, 0)
// )
// def getReadyTag(): SoundTagInstance = {
// for(i <- 0 until audioTags.length) {
// if(!audioTags(i).inUse) {
// audioTags(i).inUse = true
// return audioTags(i)
// }
// }
// // None are free, we need to instantiate a new one.
// val t = new SoundTagInstance(loadAudioTag(pathes), true, 0)
// audioTags = audioTags :+ t
// t
// }
// def returnTag(soundTag: SoundTagInstance): Unit = {
// soundTag.inUse = false
// }
//}
class CordovaMediaSound(path: ResourcePath, loop: Int = 0, rate: Float = 1f) extends AbstractSound {
type PlayedSound = js.Dynamic.global.Media
var media: js.Dynamic = js.Dynamic.newInstance(js.Dynamic.global.Media)(
path.path,
() => {
// println("success callback")
},
(code: Int) => {
// println("failure: " + code)
},
onStatusChange _)
def onStatusChange(code: Int): Unit = {
if(code == MediaStopped) {
// reset the media for next play call.
media.seekTo(0)
}
}
override def play(volume: Float): Option[PlayedSound] = {
// TODO: multi play should spawn multi media.
media.setVolume(volume)
media.setRate(rate)
// TODO: numberOfLoops is only supported on iOS.
media.play(js.Dynamic.literal(numberOfLoops = loop))
Some(media)
}
override def withConfig(loop: Int, rate: Float): Sound = {
new CordovaMediaSound(path, loop, rate)
}
override def dispose(): Unit = {
media.release()
}
override def stop(id: PlayedSound): Unit = {
id.stop()
}
override def pause(id: PlayedSound): Unit = {
id.pause()
}
override def resume(id: PlayedSound): Unit = {
id.play()
}
override def endLoop(id: PlayedSound): Unit = {
// TODO: not supported.
}
}
type Sound = CordovaMediaSound
override def loadSound(path: ResourcePath, extras: ResourcePath*): Loader[Sound] = {
val bestPath = (path +: extras).find(p => SupportedAudioFormats.contains(p.extension.getOrElse(""))).getOrElse(path)
Loader.successful(new CordovaMediaSound(bestPath))
}
class CordovaMediaMusic(media: js.Dynamic) extends AbstractMusic {
private var isLooping = false
private var volume = 1f
override def play(): Unit = {
// TODO: {numberOfLoops = -1} is an iOS only option, so it won't work on
// other platforms, but we only use cordova for iOS so that's probably fine?
if(isLooping)
media.play(js.Dynamic.literal(numberOfLoops = -1))
else
media.play()
media.setVolume(volume)
}
override def pause(): Unit = {
media.pause()
}
override def stop(): Unit = {
media.stop()
}
override def setVolume(volume: Float): Unit = {
media.setVolume(volume)
}
override def setLooping(isLooping: Boolean): Unit = {
// TODO: support on already playing music.
this.isLooping = isLooping
}
override def dispose(): Unit = {}
}
type Music = CordovaMediaMusic
override def loadMusic(path: ResourcePath, extras: ResourcePath*): Loader[Music] = {
val bestPath = (path +: extras).find(p => SupportedAudioFormats.contains(p.extension.getOrElse(""))).getOrElse(path)
val media = js.Dynamic.newInstance(js.Dynamic.global.Media)(bestPath.path,
() => { println("success callback") },
(code: Int) => { println("failure: " + code) },
(code: Int) => { println("status: " + code) },
)
Loader.successful(new CordovaMediaMusic(media))
}
}
override val Audio: Audio = CordovaMediaAudio
}
| regb/scala-game-library | html5/cordova/src/main/scala/sgl/html5/CordovaMediaAudioProvider.scala | Scala | mit | 6,468 |
// scalac: -Xasync
import scala.concurrent._
import ExecutionContext.Implicits.global
import scala.tools.testkit.async.Async._
import scala.concurrent.duration.Duration
object Test extends App { test
def test: Any = Await.result({
def one = async { 1 }
def test = async {
Option(true) match {
case null | None => false
case Some(v) => await(one); v
}
}
test
}, Duration.Inf)
}
| scala/scala | test/async/jvm/concurrent_patternAlternative.scala | Scala | apache-2.0 | 433 |
//package org.cloudio.morpheus.samples
//
///**
// * Created by zslajchrt on 13/01/15.
// *
// */
//class Hierarchy {
//
//}
//
//
//class BaseClass1 extends Sample1 {
//
// def getX = "X"
//
// def printText(s: String): Unit = {
// println(s)
// }
//}
//
//class BaseClass2 extends BaseClass1 {
//
// def getY = "Y"
//
// override def printText(s: String): Unit = {
// super.printText(s + "1")
// }
//}
//
//trait Trait1 extends BaseClass2 {
//
// def ww: Int = 1
//
// override def printText(s: String): Unit = {
// super.printText(s + "2")
// }
//}
//
//trait TraitY {
// def doSomething(): String
//}
//
//trait TraitX extends BaseClass2 with TraitY {
//
// def doSomething(): String = {
// "X"
// }
//
//}
//
//trait Trait2 extends TraitX {
// def getZ = "Z"
// override def printText(s: String): Unit = {
// super.printText(s + "3" + uu(2) + doSomething())
// }
//
// override def doSomething() = "Y" + super.doSomething()
//
// def uu(i: Int): Int
//}
//abstract class Trait2fragment extends Trait2
//
//trait Trait3 extends Trait2 {
//
// this: Trait1 =>
//
// private[this] var k: Int = 0
// private[this] lazy val tt: Int = 0
// //override def getZ: String = super.getZ
//
// override def printText(s: String): Unit = {
// k += 1
// val s2 = s + getX + getY + getZ + u + tt + ww
// super.printText(s + "4")
// }
//}
//abstract class Trait3fragment extends Trait3 {
// this: Trait1 =>
//}
//
//trait BaseClass2Interceptor1 extends BaseClass2 {
// override def getX: String = super.getX + "!"
//}
//
//trait TraitYInterceptor1 extends TraitY {
// abstract override def doSomething(): String = super.doSomething()
//}
//
//class BBB extends TraitX with TraitYInterceptor1 {
//}
//
//
//abstract class DerivedClass extends BaseClass2 with Trait1 with Trait3 with BaseClass2Interceptor1 {
// override def printText(s: String): Unit = super.printText(s + "5")
//} | zslajchrt/morpheus-tests | src/main/scala/org/cloudio/morpheus/samples/Hierarchy.scala | Scala | apache-2.0 | 1,918 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.geode.spark.connector.internal.geodefunctions
import org.apache.geode.DataSerializer
import org.apache.geode.cache.execute.{ResultCollector, ResultSender}
import org.apache.geode.cache.query.internal.types.{ObjectTypeImpl, StructTypeImpl}
import org.apache.geode.cache.query.types.ObjectType
import org.apache.geode.internal.{Version, ByteArrayDataInput, HeapDataOutputStream}
import org.apache.geode.internal.cache.{CachedDeserializable, CachedDeserializableFactory}
import org.scalatest.{BeforeAndAfter, FunSuite}
import scala.collection.JavaConversions._
import scala.concurrent.{Await, ExecutionContext, Future}
import ExecutionContext.Implicits.global
import scala.concurrent.duration._
class StructStreamingResultSenderAndCollectorTest extends FunSuite with BeforeAndAfter {
/**
* A test ResultSender that connects struct ResultSender and ResultCollector
* Note: this test ResultSender has to copy the data (byte array) since the
* StructStreamingResultSender will reuse the byte array.
*/
class LocalResultSender(collector: ResultCollector[Array[Byte], _], num: Int = 1) extends ResultSender[Object] {
var finishedNum = 0
override def sendResult(result: Object): Unit =
collector.addResult(null, result.asInstanceOf[Array[Byte]].clone())
/** exception should be sent via lastResult() */
override def sendException(throwable: Throwable): Unit =
throw new UnsupportedOperationException("sendException is not supported.")
override def lastResult(result: Object): Unit = {
collector.addResult(null, result.asInstanceOf[Array[Byte]].clone())
this.synchronized {
finishedNum += 1
if (finishedNum == num)
collector.endResults()
}
}
}
/** common variables */
var collector: StructStreamingResultCollector = _
var baseSender: LocalResultSender = _
/** common types */
val objType = new ObjectTypeImpl("java.lang.Object").asInstanceOf[ObjectType]
val TwoColType = new StructTypeImpl(Array("key", "value"), Array(objType, objType))
val OneColType = new StructTypeImpl(Array("value"), Array(objType))
before {
collector = new StructStreamingResultCollector
baseSender = new LocalResultSender(collector, 1)
}
test("transfer simple data") {
verifySimpleTransfer(sendDataType = true)
}
test("transfer simple data with no type info") {
verifySimpleTransfer(sendDataType = false)
}
def verifySimpleTransfer(sendDataType: Boolean): Unit = {
val iter = (0 to 9).map(i => Array(i.asInstanceOf[Object], (i.toString * 5).asInstanceOf[Object])).toIterator
val dataType = if (sendDataType) TwoColType else null
new StructStreamingResultSender(baseSender, dataType , iter).send()
// println("type: " + collector.getResultType.toString)
assert(TwoColType.equals(collector.getResultType))
val iter2 = collector.getResult
(0 to 9).foreach { i =>
assert(iter2.hasNext)
val o = iter2.next()
assert(o.size == 2)
assert(o(0).asInstanceOf[Int] == i)
assert(o(1).asInstanceOf[String] == i.toString * 5)
}
assert(! iter2.hasNext)
}
/**
* A test iterator that generate integer data
* @param start the 1st value
* @param n number of integers generated
* @param genExcp generate Exception if true. This is used to test exception handling.
*/
def intIterator(start: Int, n: Int, genExcp: Boolean): Iterator[Array[Object]] = {
new Iterator[Array[Object]] {
val max = if (genExcp) start + n else start + n - 1
var index: Int = start - 1
override def hasNext: Boolean = if (index < max) true else false
override def next(): Array[Object] =
if (index < (start + n - 1)) {
index += 1
Array(index.asInstanceOf[Object])
} else throw new RuntimeException("simulated error")
}
}
test("transfer data with 0 row") {
new StructStreamingResultSender(baseSender, OneColType, intIterator(1, 0, genExcp = false)).send()
// println("type: " + collector.getResultType.toString)
assert(collector.getResultType == null)
val iter = collector.getResult
assert(! iter.hasNext)
}
test("transfer data with 10K rows") {
new StructStreamingResultSender(baseSender, OneColType, intIterator(1, 10000, genExcp = false)).send()
// println("type: " + collector.getResultType.toString)
assert(OneColType.equals(collector.getResultType))
val iter = collector.getResult
// println(iter.toList.map(list => list.mkString(",")).mkString("; "))
(1 to 10000).foreach { i =>
assert(iter.hasNext)
val o = iter.next()
assert(o.size == 1)
assert(o(0).asInstanceOf[Int] == i)
}
assert(! iter.hasNext)
}
test("transfer data with 10K rows with 2 sender") {
baseSender = new LocalResultSender(collector, 2)
val total = 300
val sender1 = Future { new StructStreamingResultSender(baseSender, OneColType, intIterator(1, total/2, genExcp = false), "sender1").send()}
val sender2 = Future { new StructStreamingResultSender(baseSender, OneColType, intIterator(total/2+1, total/2, genExcp = false), "sender2").send()}
Await.result(sender1, 1.seconds)
Await.result(sender2, 1.seconds)
// println("type: " + collector.getResultType.toString)
assert(OneColType.equals(collector.getResultType))
val iter = collector.getResult
// println(iter.toList.map(list => list.mkString(",")).mkString("; "))
val set = scala.collection.mutable.Set[Int]()
(1 to total).foreach { i =>
assert(iter.hasNext)
val o = iter.next()
assert(o.size == 1)
assert(! set.contains(o(0).asInstanceOf[Int]))
set.add(o(0).asInstanceOf[Int])
}
assert(! iter.hasNext)
}
test("transfer data with 10K rows with 2 sender with error") {
baseSender = new LocalResultSender(collector, 2)
val total = 1000
val sender1 = Future { new StructStreamingResultSender(baseSender, OneColType, intIterator(1, total/2, genExcp = false), "sender1").send()}
val sender2 = Future { new StructStreamingResultSender(baseSender, OneColType, intIterator(total/2+1, total/2, genExcp = true), "sender2").send()}
Await.result(sender1, 1 seconds)
Await.result(sender2, 1 seconds)
// println("type: " + collector.getResultType.toString)
assert(OneColType.equals(collector.getResultType))
val iter = collector.getResult
// println(iter.toList.map(list => list.mkString(",")).mkString("; "))
val set = scala.collection.mutable.Set[Int]()
intercept[RuntimeException] {
(1 to total).foreach { i =>
assert(iter.hasNext)
val o = iter.next()
assert(o.size == 1)
assert(! set.contains(o(0).asInstanceOf[Int]))
set.add(o(0).asInstanceOf[Int])
}
}
// println(s"rows received: ${set.size}")
}
test("transfer data with Exception") {
new StructStreamingResultSender(baseSender, OneColType, intIterator(1, 200, genExcp = true)).send()
// println("type: " + collector.getResultType.toString)
val iter = collector.getResult
intercept[RuntimeException] ( iter.foreach(_.mkString(",")) )
}
def stringPairIterator(n: Int, genExcp: Boolean): Iterator[Array[Object]] =
intIterator(1, n, genExcp).map(x => Array(s"key-${x(0)}", s"value-${x(0)}"))
test("transfer string pair data with 200 rows") {
new StructStreamingResultSender(baseSender, TwoColType, stringPairIterator(1000, genExcp = false)).send()
// println("type: " + collector.getResultType.toString)
assert(TwoColType.equals(collector.getResultType))
val iter = collector.getResult
// println(iter.toList.map(list => list.mkString(",")).mkString("; "))
(1 to 1000).foreach { i =>
assert(iter.hasNext)
val o = iter.next()
assert(o.size == 2)
assert(o(0) == s"key-$i")
assert(o(1) == s"value-$i")
}
assert(! iter.hasNext)
}
/**
* Usage notes: There are 3 kinds of data to transfer:
* (1) object, (2) byte array of serialized object, and (3) byte array
* this test shows how to handle all of them.
*/
test("DataSerializer usage") {
val outBuf = new HeapDataOutputStream(1024, null)
val inBuf = new ByteArrayDataInput()
// 1. a regular object
val hello = "Hello World!" * 30
// serialize the data
DataSerializer.writeObject(hello, outBuf)
val bytesHello = outBuf.toByteArray.clone()
// de-serialize the data
inBuf.initialize(bytesHello, Version.CURRENT)
val hello2 = DataSerializer.readObject(inBuf).asInstanceOf[Object]
assert(hello == hello2)
// 2. byte array of serialized object
// serialize: byte array from `CachedDeserializable`
val cd: CachedDeserializable = CachedDeserializableFactory.create(bytesHello)
outBuf.reset()
DataSerializer.writeByteArray(cd.getSerializedValue, outBuf)
// de-serialize the data in 2 steps
inBuf.initialize(outBuf.toByteArray.clone(), Version.CURRENT)
val bytesHello2: Array[Byte] = DataSerializer.readByteArray(inBuf)
inBuf.initialize(bytesHello2, Version.CURRENT)
val hello3 = DataSerializer.readObject(inBuf).asInstanceOf[Object]
assert(hello == hello3)
// 3. byte array
outBuf.reset()
DataSerializer.writeByteArray(bytesHello, outBuf)
inBuf.initialize(outBuf.toByteArray.clone(), Version.CURRENT)
val bytesHello3: Array[Byte] = DataSerializer.readByteArray(inBuf)
assert(bytesHello sameElements bytesHello3)
}
}
| prasi-in/geode | geode-spark-connector/geode-spark-connector/src/test/scala/org/apache/geode/spark/connector/internal/gemfirefunctions/StructStreamingResultSenderAndCollectorTest.scala | Scala | apache-2.0 | 10,317 |
package com.mesosphere
import com.mesosphere.http.MediaType
import io.lemonlabs.uri.Uri
import io.circe.testing.instances._
import java.nio.ByteBuffer
import java.util.UUID
import org.scalacheck.Arbitrary
import org.scalacheck.Arbitrary.arbitrary
import org.scalacheck.Gen
import org.scalacheck.Gen.Choose
import org.scalacheck.ScalacheckShapeless._
import org.scalacheck.derive.MkArbitrary
import scala.util.Success
import scala.util.Try
object Generators {
import com.mesosphere.Generators.Implicits._
val genPackageName: Gen[String] = {
val maxPackageNameLength = 64
val genPackageNameChar = Gen.oneOf(Gen.numChar, Gen.alphaLowerChar)
maxSizedString(maxPackageNameLength, genPackageNameChar)
}
def nonNegNum[A](implicit C: Choose[A], N: Numeric[A]): Gen[A] = {
Gen.sized(size => Gen.chooseNum(N.zero, N.fromInt(size)))
}
private val genSemVer: Gen[universe.v3.model.SemVer] = {
val maxStringLength = 10
val genNumbers = nonNegNum[Long]
val genPreReleases = for {
seqSize <- Gen.chooseNum(0, 3)
preReleases <- Gen.containerOfN[Seq, Either[String, Long]](
seqSize,
Gen.oneOf(
genNumbers.map(Right(_)),
Generators.nonEmptyMaxSizedString(
maxStringLength,
Gen.alphaLowerChar
).map(Left(_))
)
)
} yield preReleases
val genBuild = Generators.maxSizedString(maxStringLength, Gen.alphaLowerChar).map {
string => if (string.isEmpty) None else Some(string)
}
for {
major <- genNumbers
minor <- genNumbers
patch <- genNumbers
preReleases <- genPreReleases
build <- genBuild
} yield universe.v3.model.SemVer(major, minor, patch, preReleases, build)
}
val genVersion: Gen[universe.v3.model.Version] = {
genSemVer.map(_.toString).map(universe.v3.model.Version(_))
}
val genVersionSpecification: Gen[universe.v3.model.VersionSpecification] = {
val genExact = genVersion.map(universe.v3.model.ExactVersion)
Gen.frequency((1, universe.v3.model.AnyVersion), (20, genExact))
}
private val genReleaseVersion: Gen[universe.v3.model.ReleaseVersion] = for {
num <- Gen.posNum[Long]
} yield universe.v3.model.ReleaseVersion(num)
def genUpgradesFrom(
requiredVersion: Option[universe.v3.model.Version]
): Gen[Option[List[universe.v3.model.VersionSpecification]]] = {
requiredVersion match {
case Some(required) =>
for {
leftVersions <- Gen.listOf(genVersionSpecification)
rightVersions <- Gen.listOf(genVersionSpecification)
} yield Some(leftVersions ++ (universe.v3.model.ExactVersion(required) :: rightVersions))
case _ =>
Gen.listOf(genVersionSpecification).flatMap {
case vs if vs.isEmpty => Gen.oneOf(None, Some(Nil))
case vs => Gen.const(Some(vs))
}
}
}
def genV3Package(genName: Gen[String] = genPackageName): Gen[universe.v3.model.V3Package] = {
for {
name <- genName
version <- genVersion
releaseVersion <- genReleaseVersion
maintainer <- Gen.alphaStr
description <- Gen.alphaStr
} yield universe.v3.model.V3Package(
name=name,
version=version,
releaseVersion=releaseVersion,
maintainer=maintainer,
description=description
)
}
def genV2Package(genName: Gen[String] = genPackageName): Gen[universe.v3.model.V2Package] = {
for {
v3 <- genV3Package(genName)
marathonTemplate <- genByteBuffer
} yield {
universe.v3.model.V2Package(
name = v3.name,
version = v3.version,
releaseVersion = v3.releaseVersion,
maintainer = v3.maintainer,
description = v3.description,
marathon = universe.v3.model.Marathon(marathonTemplate)
)
}
}
def genV4Package(
genName: Gen[String] = genPackageName,
genUpgrades: Gen[Option[List[universe.v3.model.VersionSpecification]]] = genUpgradesFrom(requiredVersion = None)
): Gen[universe.v4.model.V4Package] = {
for {
upgradesFrom <- genUpgrades
downgradesTo <- Gen.option(Gen.listOf(genVersionSpecification))
v3 <- genV3Package(genName)
} yield {
universe.v4.model.V4Package(
name = v3.name,
version = v3.version,
releaseVersion = v3.releaseVersion,
maintainer = v3.maintainer,
description = v3.description,
upgradesFrom = upgradesFrom,
downgradesTo = downgradesTo
)
}
}
def genV3ResourceTestData() : Gen[(collection.immutable.Set[String],
universe.v3.model.Assets,
universe.v3.model.Images,
universe.v3.model.Cli)] = {
for {
v2 <- genV2ResourceTestData()
windowsCli <- arbitrary[Uri].map(_.toString)
linuxCli <- arbitrary[Uri].map(_.toString)
darwinCli <- arbitrary[Uri].map(_.toString)
} yield {
val cli = universe.v3.model.Cli(Some(universe.v3.model.Platforms(
Some(universe.v3.model.Architectures(universe.v3.model.Binary("p", windowsCli, List.empty))),
Some(universe.v3.model.Architectures(universe.v3.model.Binary("q", linuxCli, List.empty))),
Some(universe.v3.model.Architectures(universe.v3.model.Binary("r", darwinCli, List.empty)))
)))
val expectedSet = v2._1 + windowsCli + linuxCli + darwinCli
(expectedSet, v2._2, v2._3, cli)
}
}
// scalastyle:off magic.number
def genV2ResourceTestData() : Gen[(collection.immutable.Set[String],
universe.v3.model.Assets,
universe.v3.model.Images
)] = {
for {
numberOfUrls <- Gen.chooseNum(0, 10)
listOfUrls <- Gen.containerOfN[List, String](numberOfUrls, arbitrary[Uri].toString)
iconSmall <- arbitrary[Uri].map(_.toString)
iconMedium <- arbitrary[Uri].map(_.toString)
iconLarge <- arbitrary[Uri].map(_.toString)
screenshots <- Gen.containerOfN[List, String](numberOfUrls, arbitrary[Uri].toString)
} yield {
val assets = universe.v3.model.Assets(uris = Some(
listOfUrls.zipWithIndex.map { case ((k, v)) =>
(v.toString, k)
}.toMap
),
None
)
val expectedSet = iconSmall ::
iconMedium ::
iconLarge ::
(listOfUrls ++ screenshots)
val images = universe.v3.model.Images(Some(iconSmall), Some(iconMedium), Some(iconLarge), Some(screenshots))
(expectedSet.toSet, assets, images)
}
}
// scalastyle:on magic.number
/* This is just here to tell you that you need to update the generator below, when you
* add a new packaging version. This is a little hacky but worth the error
*/
def checkPackageDefinitionExhaustiveness(
pkgDef: universe.v4.model.PackageDefinition
): Gen[universe.v4.model.PackageDefinition] = {
pkgDef match {
case _: universe.v3.model.V2Package => ???
case _: universe.v3.model.V3Package => ???
case _: universe.v4.model.V4Package => ???
case _: universe.v5.model.V5Package => ???
}
}
def genPackageDefinition(
genName: Gen[String] = genPackageName,
genUpgrades: Gen[Option[List[universe.v3.model.VersionSpecification]]] = genUpgradesFrom(requiredVersion = None)
): Gen[universe.v4.model.PackageDefinition] = {
Gen.oneOf(genV2Package(genName), genV3Package(genName), genV4Package(genName, genUpgrades))
}
/* This is just here to tell you that you need to update the generator below,
* when you add a new packaging version. This is a little hacky but worth the error
*/
def checkExhaustiveness(
supportedPackage: universe.v4.model.SupportedPackageDefinition
): Gen[universe.v4.model.SupportedPackageDefinition] = {
supportedPackage match {
case _: universe.v3.model.V3Package => ???
case _: universe.v4.model.V4Package => ???
case _: universe.v5.model.V5Package => ???
}
}
val genSupportedPackageDefinition: Gen[universe.v4.model.SupportedPackageDefinition] = {
Gen.oneOf(genV4Package(), genV3Package())
}
private val genByteBuffer: Gen[ByteBuffer] = arbitrary[Array[Byte]].map(ByteBuffer.wrap)
private def maxSizedString(maxSize: Int, genChar: Gen[Char]): Gen[String] = for {
size <- Gen.chooseNum(0, maxSize)
array <- Gen.containerOfN[Array, Char](size, genChar)
} yield new String(array)
private def nonEmptyMaxSizedString(maxSize: Int, genChar: Gen[Char]): Gen[String] = for {
size <- Gen.chooseNum(1, maxSize)
array <- Gen.containerOfN[Array, Char](size, genChar)
} yield new String(array)
private def genNonEmptyString(genChar: Gen[Char]): Gen[String] = {
Gen.nonEmptyContainerOf[Array, Char](genChar).map(new String(_))
}
private val genTag: Gen[universe.v3.model.Tag] = {
val genTagChar = arbitrary[Char].suchThat(!_.isWhitespace)
val genTagString = genNonEmptyString(genTagChar)
genTagString.map(universe.v3.model.Tag(_))
}
private val genUri: Gen[Uri] = {
arbitrary[String]
.map(s => Try(Uri.parse(s)))
.flatMap {
case Success(uri) => uri
case _ => Gen.fail // URI parsing almost always succeeds, so this should be fine
}
}
private val genDcosReleaseVersionVersion: Gen[universe.v3.model.DcosReleaseVersion.Version] = {
nonNegNum[Int].map(universe.v3.model.DcosReleaseVersion.Version(_))
}
private val genDcosReleaseVersionSuffix: Gen[universe.v3.model.DcosReleaseVersion.Suffix] = {
genNonEmptyString(Gen.alphaNumChar).map(universe.v3.model.DcosReleaseVersion.Suffix(_))
}
// TODO package-add: Make this more general
private val genMediaType: Gen[MediaType] = {
val genTry = for {
typePart <- Gen.alphaStr.map(_.toLowerCase)
subTypePart <- Gen.alphaStr.map(_.toLowerCase)
} yield Try(MediaType(typePart, subTypePart))
genTry.flatMap {
case Success(mediaType) => mediaType
case _ => Gen.fail
}
}
object Implicits {
implicit val arbTag: Arbitrary[universe.v3.model.Tag] = Arbitrary(genTag)
implicit val arbUri: Arbitrary[Uri] = Arbitrary(genUri)
implicit val arbDcosReleaseVersionVersion:
Arbitrary[universe.v3.model.DcosReleaseVersion.Version] = {
Arbitrary(genDcosReleaseVersionVersion)
}
implicit val arbDcosReleaseVersionSuffix:
Arbitrary[universe.v3.model.DcosReleaseVersion.Suffix] = {
Arbitrary(genDcosReleaseVersionSuffix)
}
implicit val arbByteBuffer: Arbitrary[ByteBuffer] = Arbitrary(genByteBuffer)
implicit val arbV3Package: Arbitrary[universe.v3.model.V3Package] = Arbitrary(genV3Package())
implicit val arbPackageDefinition: Arbitrary[universe.v4.model.PackageDefinition] = {
Arbitrary(genPackageDefinition())
}
implicit val arbSupportedPackageDefinition: Arbitrary[universe.v4.model.SupportedPackageDefinition] = {
Arbitrary(genSupportedPackageDefinition)
}
implicit val arbUuid: Arbitrary[UUID] = Arbitrary(Gen.uuid)
implicit val arbSemVer: Arbitrary[universe.v3.model.SemVer] = Arbitrary(genSemVer)
implicit val arbMetadata: Arbitrary[universe.v4.model.Metadata] = derived
implicit val arbVersion: Arbitrary[universe.v3.model.Version] = Arbitrary(genVersion)
implicit val arbMediaType: Arbitrary[MediaType] = Arbitrary(genMediaType)
def derived[A: MkArbitrary]: Arbitrary[A] = implicitly[MkArbitrary[A]].arbitrary
}
}
| dcos/cosmos | cosmos-test-common/src/main/scala/com/mesosphere/Generators.scala | Scala | apache-2.0 | 11,308 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.arrow
import org.apache.arrow.vector.IntervalDayVector
import org.apache.spark.SparkFunSuite
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.util._
import org.apache.spark.sql.types._
import org.apache.spark.sql.vectorized._
import org.apache.spark.unsafe.types.UTF8String
class ArrowWriterSuite extends SparkFunSuite {
test("simple") {
def check(dt: DataType, data: Seq[Any], timeZoneId: String = null): Unit = {
val avroDatatype = dt match {
case _: DayTimeIntervalType => DayTimeIntervalType()
case _: YearMonthIntervalType => YearMonthIntervalType()
case tpe => tpe
}
val schema = new StructType().add("value", avroDatatype, nullable = true)
val writer = ArrowWriter.create(schema, timeZoneId)
assert(writer.schema === schema)
data.foreach { datum =>
writer.write(InternalRow(datum))
}
writer.finish()
val reader = new ArrowColumnVector(writer.root.getFieldVectors().get(0))
data.zipWithIndex.foreach {
case (null, rowId) => assert(reader.isNullAt(rowId))
case (datum, rowId) =>
val value = dt match {
case BooleanType => reader.getBoolean(rowId)
case ByteType => reader.getByte(rowId)
case ShortType => reader.getShort(rowId)
case IntegerType => reader.getInt(rowId)
case LongType => reader.getLong(rowId)
case FloatType => reader.getFloat(rowId)
case DoubleType => reader.getDouble(rowId)
case DecimalType.Fixed(precision, scale) => reader.getDecimal(rowId, precision, scale)
case StringType => reader.getUTF8String(rowId)
case BinaryType => reader.getBinary(rowId)
case DateType => reader.getInt(rowId)
case TimestampType => reader.getLong(rowId)
case _: YearMonthIntervalType => reader.getInt(rowId)
case _: DayTimeIntervalType => reader.getLong(rowId)
}
assert(value === datum)
}
writer.root.close()
}
check(BooleanType, Seq(true, null, false))
check(ByteType, Seq(1.toByte, 2.toByte, null, 4.toByte))
check(ShortType, Seq(1.toShort, 2.toShort, null, 4.toShort))
check(IntegerType, Seq(1, 2, null, 4))
check(LongType, Seq(1L, 2L, null, 4L))
check(FloatType, Seq(1.0f, 2.0f, null, 4.0f))
check(DoubleType, Seq(1.0d, 2.0d, null, 4.0d))
check(DecimalType.SYSTEM_DEFAULT, Seq(Decimal(1), Decimal(2), null, Decimal(4)))
check(StringType, Seq("a", "b", null, "d").map(UTF8String.fromString))
check(BinaryType, Seq("a".getBytes(), "b".getBytes(), null, "d".getBytes()))
check(DateType, Seq(0, 1, 2, null, 4))
check(TimestampType, Seq(0L, 3.6e9.toLong, null, 8.64e10.toLong), "America/Los_Angeles")
check(NullType, Seq(null, null, null))
DataTypeTestUtils.yearMonthIntervalTypes
.foreach(check(_, Seq(null, 0, 1, -1, Int.MaxValue, Int.MinValue)))
DataTypeTestUtils.dayTimeIntervalTypes.foreach(check(_,
Seq(null, 0L, 1000L, -1000L, (Long.MaxValue - 807L), (Long.MinValue + 808L))))
}
test("long overflow for DayTimeIntervalType")
{
val schema = new StructType().add("value", DayTimeIntervalType(), nullable = true)
val writer = ArrowWriter.create(schema, null)
val reader = new ArrowColumnVector(writer.root.getFieldVectors().get(0))
val valueVector = writer.root.getFieldVectors().get(0).asInstanceOf[IntervalDayVector]
valueVector.set(0, 106751992, 0)
valueVector.set(1, 106751991, Int.MaxValue)
// first long overflow for test Math.multiplyExact()
val msg = intercept[java.lang.ArithmeticException] {
reader.getLong(0)
}.getMessage
assert(msg.equals("long overflow"))
// second long overflow for test Math.addExact()
val msg1 = intercept[java.lang.ArithmeticException] {
reader.getLong(1)
}.getMessage
assert(msg1.equals("long overflow"))
writer.root.close()
}
test("get multiple") {
def check(dt: DataType, data: Seq[Any], timeZoneId: String = null): Unit = {
val avroDatatype = dt match {
case _: DayTimeIntervalType => DayTimeIntervalType()
case _: YearMonthIntervalType => YearMonthIntervalType()
case tpe => tpe
}
val schema = new StructType().add("value", avroDatatype, nullable = false)
val writer = ArrowWriter.create(schema, timeZoneId)
assert(writer.schema === schema)
data.foreach { datum =>
writer.write(InternalRow(datum))
}
writer.finish()
val reader = new ArrowColumnVector(writer.root.getFieldVectors().get(0))
val values = dt match {
case BooleanType => reader.getBooleans(0, data.size)
case ByteType => reader.getBytes(0, data.size)
case ShortType => reader.getShorts(0, data.size)
case IntegerType => reader.getInts(0, data.size)
case LongType => reader.getLongs(0, data.size)
case FloatType => reader.getFloats(0, data.size)
case DoubleType => reader.getDoubles(0, data.size)
case DateType => reader.getInts(0, data.size)
case TimestampType => reader.getLongs(0, data.size)
case _: YearMonthIntervalType => reader.getInts(0, data.size)
case _: DayTimeIntervalType => reader.getLongs(0, data.size)
}
assert(values === data)
writer.root.close()
}
check(BooleanType, Seq(true, false))
check(ByteType, (0 until 10).map(_.toByte))
check(ShortType, (0 until 10).map(_.toShort))
check(IntegerType, (0 until 10))
check(LongType, (0 until 10).map(_.toLong))
check(FloatType, (0 until 10).map(_.toFloat))
check(DoubleType, (0 until 10).map(_.toDouble))
check(DateType, (0 until 10))
check(TimestampType, (0 until 10).map(_ * 4.32e10.toLong), "America/Los_Angeles")
DataTypeTestUtils.yearMonthIntervalTypes.foreach(check(_, (0 until 14)))
DataTypeTestUtils.dayTimeIntervalTypes.foreach(check(_, (-10 until 10).map(_ * 1000.toLong)))
}
test("array") {
val schema = new StructType()
.add("arr", ArrayType(IntegerType, containsNull = true), nullable = true)
val writer = ArrowWriter.create(schema, null)
assert(writer.schema === schema)
writer.write(InternalRow(ArrayData.toArrayData(Array(1, 2, 3))))
writer.write(InternalRow(ArrayData.toArrayData(Array(4, 5))))
writer.write(InternalRow(null))
writer.write(InternalRow(ArrayData.toArrayData(Array.empty[Int])))
writer.write(InternalRow(ArrayData.toArrayData(Array(6, null, 8))))
writer.finish()
val reader = new ArrowColumnVector(writer.root.getFieldVectors().get(0))
val array0 = reader.getArray(0)
assert(array0.numElements() === 3)
assert(array0.getInt(0) === 1)
assert(array0.getInt(1) === 2)
assert(array0.getInt(2) === 3)
val array1 = reader.getArray(1)
assert(array1.numElements() === 2)
assert(array1.getInt(0) === 4)
assert(array1.getInt(1) === 5)
assert(reader.isNullAt(2))
val array3 = reader.getArray(3)
assert(array3.numElements() === 0)
val array4 = reader.getArray(4)
assert(array4.numElements() === 3)
assert(array4.getInt(0) === 6)
assert(array4.isNullAt(1))
assert(array4.getInt(2) === 8)
writer.root.close()
}
test("nested array") {
val schema = new StructType().add("nested", ArrayType(ArrayType(IntegerType)))
val writer = ArrowWriter.create(schema, null)
assert(writer.schema === schema)
writer.write(InternalRow(ArrayData.toArrayData(Array(
ArrayData.toArrayData(Array(1, 2, 3)),
ArrayData.toArrayData(Array(4, 5)),
null,
ArrayData.toArrayData(Array.empty[Int]),
ArrayData.toArrayData(Array(6, null, 8))))))
writer.write(InternalRow(null))
writer.write(InternalRow(ArrayData.toArrayData(Array.empty)))
writer.finish()
val reader = new ArrowColumnVector(writer.root.getFieldVectors().get(0))
val array0 = reader.getArray(0)
assert(array0.numElements() === 5)
val array00 = array0.getArray(0)
assert(array00.numElements() === 3)
assert(array00.getInt(0) === 1)
assert(array00.getInt(1) === 2)
assert(array00.getInt(2) === 3)
val array01 = array0.getArray(1)
assert(array01.numElements() === 2)
assert(array01.getInt(0) === 4)
assert(array01.getInt(1) === 5)
assert(array0.isNullAt(2))
val array03 = array0.getArray(3)
assert(array03.numElements() === 0)
val array04 = array0.getArray(4)
assert(array04.numElements() === 3)
assert(array04.getInt(0) === 6)
assert(array04.isNullAt(1))
assert(array04.getInt(2) === 8)
assert(reader.isNullAt(1))
val array2 = reader.getArray(2)
assert(array2.numElements() === 0)
writer.root.close()
}
test("null array") {
val schema = new StructType()
.add("arr", ArrayType(NullType, containsNull = true), nullable = true)
val writer = ArrowWriter.create(schema, null)
assert(writer.schema === schema)
writer.write(InternalRow(ArrayData.toArrayData(Array(null, null, null))))
writer.write(InternalRow(ArrayData.toArrayData(Array(null, null))))
writer.write(InternalRow(null))
writer.write(InternalRow(ArrayData.toArrayData(Array.empty[Int])))
writer.write(InternalRow(ArrayData.toArrayData(Array(null, null, null))))
writer.finish()
val reader = new ArrowColumnVector(writer.root.getFieldVectors().get(0))
val array0 = reader.getArray(0)
assert(array0.numElements() === 3)
assert(array0.isNullAt(0))
assert(array0.isNullAt(1))
assert(array0.isNullAt(2))
val array1 = reader.getArray(1)
assert(array1.numElements() === 2)
assert(array1.isNullAt(0))
assert(array1.isNullAt(1))
assert(reader.isNullAt(2))
val array3 = reader.getArray(3)
assert(array3.numElements() === 0)
val array4 = reader.getArray(4)
assert(array4.numElements() === 3)
assert(array4.isNullAt(0))
assert(array4.isNullAt(1))
assert(array4.isNullAt(2))
writer.root.close()
}
test("struct") {
val schema = new StructType()
.add("struct", new StructType().add("i", IntegerType).add("str", StringType))
val writer = ArrowWriter.create(schema, null)
assert(writer.schema === schema)
writer.write(InternalRow(InternalRow(1, UTF8String.fromString("str1"))))
writer.write(InternalRow(InternalRow(null, null)))
writer.write(InternalRow(null))
writer.write(InternalRow(InternalRow(4, null)))
writer.write(InternalRow(InternalRow(null, UTF8String.fromString("str5"))))
writer.finish()
val reader = new ArrowColumnVector(writer.root.getFieldVectors().get(0))
val struct0 = reader.getStruct(0)
assert(struct0.getInt(0) === 1)
assert(struct0.getUTF8String(1) === UTF8String.fromString("str1"))
val struct1 = reader.getStruct(1)
assert(struct1.isNullAt(0))
assert(struct1.isNullAt(1))
assert(reader.isNullAt(2))
val struct3 = reader.getStruct(3)
assert(struct3.getInt(0) === 4)
assert(struct3.isNullAt(1))
val struct4 = reader.getStruct(4)
assert(struct4.isNullAt(0))
assert(struct4.getUTF8String(1) === UTF8String.fromString("str5"))
writer.root.close()
}
test("nested struct") {
val schema = new StructType().add("struct",
new StructType().add("nested", new StructType().add("i", IntegerType).add("str", StringType)))
val writer = ArrowWriter.create(schema, null)
assert(writer.schema === schema)
writer.write(InternalRow(InternalRow(InternalRow(1, UTF8String.fromString("str1")))))
writer.write(InternalRow(InternalRow(InternalRow(null, null))))
writer.write(InternalRow(InternalRow(null)))
writer.write(InternalRow(null))
writer.finish()
val reader = new ArrowColumnVector(writer.root.getFieldVectors().get(0))
val struct00 = reader.getStruct(0).getStruct(0, 2)
assert(struct00.getInt(0) === 1)
assert(struct00.getUTF8String(1) === UTF8String.fromString("str1"))
val struct10 = reader.getStruct(1).getStruct(0, 2)
assert(struct10.isNullAt(0))
assert(struct10.isNullAt(1))
val struct2 = reader.getStruct(2)
assert(struct2.isNullAt(0))
assert(reader.isNullAt(3))
writer.root.close()
}
test("null struct") {
val schema = new StructType()
.add("struct", new StructType().add("n1", NullType).add("n2", NullType))
val writer = ArrowWriter.create(schema, null)
assert(writer.schema === schema)
writer.write(InternalRow(InternalRow(null, null)))
writer.write(InternalRow(null))
writer.write(InternalRow(InternalRow(null, null)))
writer.finish()
val reader = new ArrowColumnVector(writer.root.getFieldVectors().get(0))
val struct0 = reader.getStruct(0)
assert(struct0.isNullAt(0))
assert(struct0.isNullAt(1))
assert(reader.isNullAt(1))
val struct2 = reader.getStruct(2)
assert(struct2.isNullAt(0))
assert(struct2.isNullAt(1))
writer.root.close()
}
test("map") {
val schema = new StructType()
.add("map", MapType(IntegerType, StringType), nullable = true)
val writer = ArrowWriter.create(schema, null)
assert(writer.schema == schema)
writer.write(InternalRow(ArrayBasedMapData(
keys = Array(1, 2, 3),
values = Array(
UTF8String.fromString("v2"),
UTF8String.fromString("v3"),
UTF8String.fromString("v4")
)
)))
writer.write(InternalRow(ArrayBasedMapData(Array(43),
Array(UTF8String.fromString("v5"))
)))
writer.write(InternalRow(ArrayBasedMapData(Array(43), Array(null))))
writer.write(InternalRow(null))
writer.finish()
val reader = new ArrowColumnVector(writer.root.getFieldVectors.get(0))
val map0 = reader.getMap(0)
assert(map0.numElements() == 3)
assert(map0.keyArray().array().mkString(",") == Array(1, 2, 3).mkString(","))
assert(map0.valueArray().array().mkString(",") == Array("v2", "v3", "v4").mkString(","))
val map1 = reader.getMap(1)
assert(map1.numElements() == 1)
assert(map1.keyArray().array().mkString(",") == Array(43).mkString(","))
assert(map1.valueArray().array().mkString(",") == Array("v5").mkString(","))
val map2 = reader.getMap(2)
assert(map2.numElements() == 1)
assert(map2.keyArray().array().mkString(",") == Array(43).mkString(","))
assert(map2.valueArray().array().mkString(",") == Array(null).mkString(","))
val map3 = reader.getMap(3)
assert(map3 == null)
writer.root.close()
}
test("empty map") {
val schema = new StructType()
.add("map", MapType(IntegerType, StringType), nullable = true)
val writer = ArrowWriter.create(schema, null)
assert(writer.schema == schema)
writer.write(InternalRow(ArrayBasedMapData(Array(), Array())))
writer.finish()
val reader = new ArrowColumnVector(writer.root.getFieldVectors.get(0))
val map0 = reader.getMap(0)
assert(map0.numElements() == 0)
writer.root.close()
}
test("null value map") {
val schema = new StructType()
.add("map", MapType(IntegerType, NullType), nullable = true)
val writer = ArrowWriter.create(schema, null)
assert(writer.schema == schema)
writer.write(InternalRow(ArrayBasedMapData(
keys = Array(1, 2, 3),
values = Array(null, null, null)
)))
writer.write(InternalRow(ArrayBasedMapData(Array(43), Array(null))))
writer.write(InternalRow(null))
writer.finish()
val reader = new ArrowColumnVector(writer.root.getFieldVectors.get(0))
val map0 = reader.getMap(0)
assert(map0.numElements() == 3)
assert(map0.keyArray().array().mkString(",") == Array(1, 2, 3).mkString(","))
assert(map0.valueArray().array().mkString(",") == Array(null, null, null).mkString(","))
val map1 = reader.getMap(1)
assert(map1.numElements() == 1)
assert(map1.keyArray().array().mkString(",") == Array(43).mkString(","))
assert(map1.valueArray().array().mkString(",") == Array(null).mkString(","))
val map2 = reader.getMap(3)
assert(map2 == null)
writer.root.close()
}
test("nested map") {
val valueSchema = new StructType()
.add("name", StringType)
.add("age", IntegerType)
val schema = new StructType()
.add("map",
MapType(
keyType = IntegerType,
valueType = valueSchema
),
nullable = true)
val writer = ArrowWriter.create(schema, null)
assert(writer.schema == schema)
writer.write(InternalRow(
ArrayBasedMapData(
keys = Array(1),
values = Array(InternalRow(UTF8String.fromString("jon"), 20))
)))
writer.write(InternalRow(
ArrayBasedMapData(
keys = Array(1),
values = Array(InternalRow(UTF8String.fromString("alice"), 30))
)))
writer.write(InternalRow(
ArrayBasedMapData(
keys = Array(1),
values = Array(InternalRow(UTF8String.fromString("bob"), 40))
)))
writer.finish()
val reader = new ArrowColumnVector(writer.root.getFieldVectors.get(0))
def stringRepr(map: ColumnarMap): String = {
map.valueArray().getStruct(0, 2).toSeq(valueSchema).mkString(",")
}
val map0 = reader.getMap(0)
assert(map0.numElements() == 1)
assert(map0.keyArray().array().mkString(",") == Array(1).mkString(","))
assert(stringRepr(map0) == Array("jon", "20").mkString(","))
val map1 = reader.getMap(1)
assert(map1.numElements() == 1)
assert(map1.keyArray().array().mkString(",") == Array(1).mkString(","))
assert(stringRepr(map1) == Array("alice", "30").mkString(","))
val map2 = reader.getMap(2)
assert(map2.numElements() == 1)
assert(map2.keyArray().array().mkString(",") == Array(1).mkString(","))
assert(stringRepr(map2) == Array("bob", "40").mkString(","))
}
}
| wangmiao1981/spark | sql/core/src/test/scala/org/apache/spark/sql/execution/arrow/ArrowWriterSuite.scala | Scala | apache-2.0 | 18,691 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources
import org.apache.hadoop.fs.Path
import org.apache.hadoop.mapreduce.{OutputCommitter, TaskAttemptContext}
import org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter
import org.apache.spark.internal.Logging
import org.apache.spark.internal.io.HadoopMapReduceCommitProtocol
import org.apache.spark.sql.internal.SQLConf
/**
* A variant of [[HadoopMapReduceCommitProtocol]] that allows specifying the actual
* Hadoop output committer using an option specified in SQLConf.
*/
class SQLHadoopMapReduceCommitProtocol(jobId: String, path: String, isAppend: Boolean)
extends HadoopMapReduceCommitProtocol(jobId, path) with Serializable with Logging {
override protected def setupCommitter(context: TaskAttemptContext): OutputCommitter = {
var committer = context.getOutputFormatClass.newInstance().getOutputCommitter(context)
if (!isAppend) {
// If we are appending data to an existing dir, we will only use the output committer
// associated with the file output format since it is not safe to use a custom
// committer for appending. For example, in S3, direct parquet output committer may
// leave partial data in the destination dir when the appending job fails.
// See SPARK-8578 for more details.
val configuration = context.getConfiguration
val clazz =
configuration.getClass(SQLConf.OUTPUT_COMMITTER_CLASS.key, null, classOf[OutputCommitter])
if (clazz != null) {
logInfo(s"Using user defined output committer class ${clazz.getCanonicalName}")
// Every output format based on org.apache.hadoop.mapreduce.lib.output.OutputFormat
// has an associated output committer. To override this output committer,
// we will first try to use the output committer set in SQLConf.OUTPUT_COMMITTER_CLASS.
// If a data source needs to override the output committer, it needs to set the
// output committer in prepareForWrite method.
if (classOf[FileOutputCommitter].isAssignableFrom(clazz)) {
// The specified output committer is a FileOutputCommitter.
// So, we will use the FileOutputCommitter-specified constructor.
val ctor = clazz.getDeclaredConstructor(classOf[Path], classOf[TaskAttemptContext])
committer = ctor.newInstance(new Path(path), context)
} else {
// The specified output committer is just an OutputCommitter.
// So, we will use the no-argument constructor.
val ctor = clazz.getDeclaredConstructor()
committer = ctor.newInstance()
}
}
}
logInfo(s"Using output committer class ${committer.getClass.getCanonicalName}")
committer
}
}
| ZxlAaron/mypros | sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/SQLHadoopMapReduceCommitProtocol.scala | Scala | apache-2.0 | 3,542 |
package io.jacobappleton.compilers.server
import akka.actor._
import akka.pattern.ask
import akka.util.Timeout
import io.jacobappleton.compilers.server.workers.RegexWorkerActor
import io.jacobappleton.compilers.server.workers.RegexWorkerActor.RegexResponse
import io.jacobappleton.compilers.server.workers.RegexWorkerJsonProtocol._
import spray.http.StatusCodes
import spray.http.StatusCodes._
import spray.httpx.SprayJsonSupport._
import spray.routing.{Route, ExceptionHandler, HttpService}
import scala.concurrent.duration._
trait RoutingService extends HttpService {
implicit def executionContext = actorRefFactory.dispatcher
implicit val timeout = Timeout(1.second)
val regexWorker = actorRefFactory.actorOf(Props[RegexWorkerActor], "regexWorker")
implicit def exceptionHandler = ExceptionHandler {
case _: Exception => complete(BadRequest, "Error parsing Regex pattern")
}
val compilerRoute : Route =
path("") {
redirect("http://www.jacobappleton.io", StatusCodes.PermanentRedirect)
} ~
pathPrefix("app") {
pathPrefix("assets") {
getFromResourceDirectory("web/assets/")
} ~ {
getFromResource("web/index.html")
}
} ~
pathPrefix("api") {
path("regex") {
post {
entity(as[String]) { pattern =>
complete {
(regexWorker ? pattern).mapTo[RegexResponse]
}
}
}
}
}
override def timeoutRoute: Route = {
regexWorker ! Kill
complete(
InternalServerError,
"The server was not able to produce a timely response to your request.")
}
} | jacobappleton/Compiler | src/main/scala/io/jacobappleton/compilers/server/RoutingService.scala | Scala | mit | 1,622 |
package glint.messages.server.request
/**
* A push request for vectors containing longs
*
* @param keys The indices
* @param values The values to add
*/
private[glint] case class PushVectorLong(id: Int, keys: Array[Long], values: Array[Long]) extends Request
| rjagerman/glint | src/main/scala/glint/messages/server/request/PushVectorLong.scala | Scala | mit | 270 |
package lists
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
import org.scalatest.{FlatSpecLike, Matchers}
@RunWith(classOf[JUnitRunner])
class ScalaListsTest extends FlatSpecLike with Matchers {
behavior of "ScalaLists"
val lists = new ScalaLists
it should "sum" in {
val list = List(1, 2, 3, 4)
lists.sum(list) shouldBe 10
}
it should "find the max" in {
val list = List(1, 2, 3)
lists.max(list) shouldBe 3
}
it should "exists_True" in {
val strings = List("a", "b")
lists.exists(strings, "a") shouldBe true
}
it should "exists_False" in {
val strings = List("a", "b")
lists.exists(strings, "c") shouldBe false
}
it should "indexOf_Found" in {
val strings = List("a", "b")
lists.indexOf(strings, "b") shouldBe 1
}
it should "indexOf_NotFound" in {
val list = List(1, 2, 3)
lists.indexOf(list, 4) shouldBe -1
}
it should "count_Found" in {
val strings = List("a", "a", "b")
lists.count(strings, "a") shouldBe 2
}
it should "count_NotFound" in {
val list = List(1, 2, 3)
lists.count(list, 4) shouldBe 0
}
it should "order the elements" in {
val list = List(1, 2, 3)
val expectedList = List(3, 2, 1)
val f = (i1: Int, i2: Int) => {
i1 > i2
}
lists.order(list, f) should contain theSameElementsInOrderAs expectedList
}
it should "zip" in {
val list1 = List("a", "b", "c")
val list2 = List(1, 2, 3)
val expectedList = List("a", 1, "b", 2, "c", 3)
lists.zip(list1, list2) should contain theSameElementsInOrderAs expectedList
}
}
| DWiechert/rosetta-jvm | src/test/scala/lists/ScalaListsTest.scala | Scala | apache-2.0 | 1,676 |
package workers
import akka.actor.{ActorLogging, Actor}
import model.ItemOrder
import service.ItemService
import scala.concurrent.blocking
class ItemManager extends Actor with ActorLogging {
def receive = _ match {
case order: ItemOrder => blocking {
log.info(s"Got order => ${order.description}")
sender ! ItemService.create(order)
}
case itemRef: Int => blocking {
sender ! ItemService.get(itemRef)
}
}
}
| lukaszbudnik/spray-services-kamon | src/main/scala/workers/ItemManager.scala | Scala | apache-2.0 | 452 |
/*
* Copyright 2013 - 2015, Daniel Krzywicki <daniel.krzywicki@agh.edu.pl>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package pl.edu.agh.scalamas.mas.logic
import pl.edu.agh.scalamas.mas.LogicTypes._
/**
* Created by Daniel on 2015-01-14.
*/
trait PopulationStrategy {
def populationStrategy: PopulationProvider
trait PopulationProvider {
def initialPopulation: Population
}
} | ros3n/IntOb | core/src/main/scala/pl/edu/agh/scalamas/mas/logic/PopulationStrategy.scala | Scala | mit | 1,432 |
package services.arrivals
import drt.shared.api.FlightCodeSuffix
import drt.shared.{CarrierCode, FlightCode, PcpUtils, VoyageNumber}
import services.crunch.CrunchTestLike
class FlightCodeSpec extends CrunchTestLike {
"Can parse an IATA to carrier code and voyage number" >> {
FlightCode("FR8364", "") === FlightCode(CarrierCode("FR"), VoyageNumber("8364"), None)
FlightCode("FR836", "") === FlightCode(CarrierCode("FR"), VoyageNumber("836"), None)
FlightCode("FR836F", "") === FlightCode(CarrierCode("FR"), VoyageNumber("836"), Option(FlightCodeSuffix("F")))
FlightCode("U2836F", "") === FlightCode(CarrierCode("U2"), VoyageNumber("836"), Option(FlightCodeSuffix("F")))
FlightCode("0B836F", "") === FlightCode(CarrierCode("0B"), VoyageNumber("836"), Option(FlightCodeSuffix("F")))
}
"Can parse an ICAO to carrier code and voyage number" >> {
FlightCode("RYR8364", "") === FlightCode(CarrierCode("RYR"), VoyageNumber("8364"), None)
FlightCode("RYR836", "") === FlightCode(CarrierCode("RYR"), VoyageNumber("836"), None)
FlightCode("RYR836F", "") === FlightCode(CarrierCode("RYR"), VoyageNumber("836"), Option(FlightCodeSuffix("F")))
}
}
| UKHomeOffice/drt-scalajs-spa-exploration | server/src/test/scala/services/arrivals/FlightCodeSpec.scala | Scala | apache-2.0 | 1,179 |
/*
* Copyright 2011-2019 Asakusa Framework Team.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.asakusafw.spark.compiler
package graph
import org.objectweb.asm.Type
import com.asakusafw.lang.compiler.planning.SubPlan
import com.asakusafw.spark.tools.asm._
import com.asakusafw.spark.tools.asm.MethodBuilder._
object InputInstantiator extends Instantiator {
override def newInstance(
nodeType: Type,
subplan: SubPlan,
subplanToIdx: Map[SubPlan, Int])(
vars: Instantiator.Vars)(
implicit mb: MethodBuilder,
context: Instantiator.Context): Var = {
val input = pushNew(nodeType)
input.dup().invokeInit(
vars.broadcasts.push(),
vars.jobContext.push())
input.store()
}
}
| ashigeru/asakusafw-spark | compiler/src/main/scala/com/asakusafw/spark/compiler/graph/InputInstantiator.scala | Scala | apache-2.0 | 1,260 |
/*
* Copyright (c) 2014-2018 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.reactive.internal.consumers
import monix.execution.Callback
import monix.execution.Ack.{Continue, Stop}
import monix.execution.{Ack, Cancelable, Scheduler}
import monix.execution.atomic.{Atomic, PaddingStrategy}
import monix.execution.cancelables.{AssignableCancelable, SingleAssignCancelable}
import scala.util.control.NonFatal
import monix.reactive.Consumer
import monix.reactive.internal.consumers.LoadBalanceConsumer.IndexedSubscriber
import monix.reactive.observers.Subscriber
import scala.annotation.tailrec
import scala.collection.immutable.{BitSet, Queue}
import scala.collection.mutable.ListBuffer
import scala.concurrent.{Future, Promise}
import scala.util.{Failure, Success}
/** Implementation for [[monix.reactive.Consumer.loadBalance]]. */
private[reactive]
final class LoadBalanceConsumer[-In, R]
(parallelism: Int, consumers: Array[Consumer[In, R]])
extends Consumer[In, List[R]] {
require(parallelism > 0, s"parallelism = $parallelism, should be > 0")
require(consumers.length > 0, "consumers list must not be empty")
// NOTE: onFinish MUST BE synchronized by `self` and
// double-checked by means of `isDone`
def createSubscriber(onFinish: Callback[Throwable, List[R]], s: Scheduler): (Subscriber[In], AssignableCancelable) = {
// Assignable cancelable returned, can be used to cancel everything
// since it will be assigned the stream subscription
val mainCancelable = SingleAssignCancelable()
val balanced = new Subscriber[In] { self =>
implicit val scheduler = s
// Trying to prevent contract violations, once this turns
// true, then no final events are allowed to happen.
// MUST BE synchronized by `self`.
private[this] var isUpstreamComplete = false
// Trying to prevent contract violations. Turns true in case
// we already signaled a result upstream.
// MUST BE synchronized by `self`.
private[this] var isDownstreamDone = false
// Stores the error that was reported upstream - basically
// multiple subscribers can report multiple errors, but we
// emit the first one, so in case multiple errors happen we
// want to log them, but only if they aren't the same reference
// MUST BE synchronized by `self`
private[this] var reportedError: Throwable = _
// Results accumulator - when length == parallelism,
// that's when we need to trigger `onFinish.onSuccess`.
// MUST BE synchronized by `self`
private[this] val accumulator = ListBuffer.empty[R]
/** Builds cancelables for subscribers. */
private def newCancelableFor(out: IndexedSubscriber[In]): Cancelable =
new Cancelable {
private[this] var isCanceled = false
// Forcing an asynchronous boundary, to avoid any possible
// initialization issues (in building subscribersQueue) or
// stack overflows and other problems
def cancel(): Unit = scheduler.executeAsync { () =>
// We are required to synchronize, because we need to
// make sure that subscribersQueue is fully created before
// triggering any cancellation!
self.synchronized {
// Guards the idempotency contract of cancel(); not really
// required, because `deactivate()` should be idempotent, but
// since we are doing an expensive synchronize, we might as well
if (!isCanceled) {
isCanceled = true
interruptOne(out, null)
}
}
}
}
// Asynchronous queue that serves idle subscribers waiting
// for something to process, or that puts the stream on wait
// until there are subscribers available
private[this] val subscribersQueue = self.synchronized {
var initial = Queue.empty[IndexedSubscriber[In]]
// When the callback gets called by each subscriber, on success we
// do nothing because for normal completion we are listing on
// `Stop` events from onNext, but on failure we deactivate all.
val callback = new Callback[Throwable, R] {
def onSuccess(value: R): Unit =
accumulate(value)
def onError(ex: Throwable): Unit =
interruptAll(ex)
}
val arrLen = consumers.length
var i = 0
while (i < parallelism) {
val (out, c) = consumers(i % arrLen).createSubscriber(callback, s)
val indexed = IndexedSubscriber(i, out)
// Every created subscriber has the opportunity to cancel the
// main subscription if needed, cancellation thus happening globally
c := newCancelableFor(indexed)
initial = initial.enqueue(indexed)
i += 1
}
new LoadBalanceConsumer.AsyncQueue(initial, parallelism)
}
def onNext(elem: In): Future[Ack] = {
// Declares a stop event, completing the callback
def stop(): Ack = self.synchronized {
// Protecting against contract violations
isUpstreamComplete = true
Stop
}
// Are there subscribers available?
val sf = subscribersQueue.poll()
// Doing a little optimization to prevent one async boundary
sf.value match {
case Some(Success(subscriber)) =>
// As a matter of protocol, if null values happen, then
// this means that all subscribers have been deactivated and so
// we should cancel the streaming.
if (subscriber == null) stop() else {
signalNext(subscriber, elem)
Continue
}
case _ => sf.map {
case null => stop()
case subscriber =>
signalNext(subscriber, elem)
Continue
}
}
}
/** Triggered whenever the subscribers are finishing with onSuccess */
private def accumulate(value: R): Unit = self.synchronized {
if (!isDownstreamDone) {
accumulator += value
if (accumulator.length == parallelism) {
isDownstreamDone = true
onFinish.onSuccess(accumulator.toList)
// GC relief
accumulator.clear()
}
}
}
/** Triggered whenever we need to signal an `onError` upstream */
private def reportErrorUpstream(ex: Throwable) = self.synchronized {
if (isDownstreamDone) {
// We only report errors that we haven't
// reported to upstream by means of `onError`!
if (reportedError != ex)
scheduler.reportFailure(ex)
} else {
isDownstreamDone = true
reportedError = ex
onFinish.onError(ex)
// GC relief
accumulator.clear()
}
}
/** Called whenever a subscriber stops its subscription, or
* when an error gets thrown.
*/
private def interruptOne(out: IndexedSubscriber[In], ex: Throwable): Unit = {
// Deactivating the subscriber. In case all subscribers
// have been deactivated, then we are done
if (subscribersQueue.deactivate(out))
interruptAll(ex)
}
/** When Stop or error is received, this makes sure the
* streaming gets interrupted!
*/
private def interruptAll(ex: Throwable): Unit = self.synchronized {
// All the following operations are idempotent!
isUpstreamComplete = true
mainCancelable.cancel()
subscribersQueue.deactivateAll()
// Is this an error to signal?
if (ex != null) reportErrorUpstream(ex)
}
/** Given a subscriber, signals the given element, then return
* the subscriber to the queue if possible.
*/
private def signalNext(out: IndexedSubscriber[In], elem: In): Unit = {
// We are forcing an asynchronous boundary here, since we
// don't want to block the main thread!
scheduler.executeAsync { () =>
try out.out.onNext(elem).syncOnComplete {
case Success(ack) =>
ack match {
case Continue =>
// We have permission to continue from this subscriber
// so returning it to the queue, to be reused
subscribersQueue.offer(out)
case Stop =>
interruptOne(out, null)
}
case Failure(ex) =>
interruptAll(ex)
} catch {
case ex if NonFatal(ex) =>
interruptAll(ex)
}
}
}
def onComplete(): Unit =
signalComplete(null)
def onError(ex: Throwable): Unit =
signalComplete(ex)
private def signalComplete(ex: Throwable): Unit = {
def loop(activeCount: Int): Future[Unit] = {
// If we no longer have active subscribers to
// push events into, then the loop is finished
if (activeCount <= 0)
Future.successful(())
else subscribersQueue.poll().flatMap {
// By protocol, if a null happens, then there are
// no more active subscribers available
case null =>
Future.successful(())
case subscriber =>
try {
if (ex == null) subscriber.out.onComplete()
else subscriber.out.onError(ex)
} catch {
case err if NonFatal(err) => s.reportFailure(err)
}
if (activeCount > 0) loop(activeCount-1)
else Future.successful(())
}
}
self.synchronized {
// Protecting against contract violations.
if (!isUpstreamComplete) {
isUpstreamComplete = true
// Starting the loop
loop(subscribersQueue.activeCount).onComplete {
case Success(()) =>
if (ex != null) reportErrorUpstream(ex)
case Failure(err) =>
reportErrorUpstream(err)
}
} else if (ex != null) {
reportErrorUpstream(ex)
}
}
}
}
(balanced, mainCancelable)
}
}
private[reactive] object LoadBalanceConsumer {
/** Wraps a subscriber implementation into one
* that exposes an ID.
*/
private[reactive] final
case class IndexedSubscriber[-In](id: Int, out: Subscriber[In])
private final class AsyncQueue[In](
initialQueue: Queue[IndexedSubscriber[In]], parallelism: Int) {
private[this] val stateRef = {
val initial: State[In] = Available(initialQueue, BitSet.empty, parallelism)
Atomic.withPadding(initial, PaddingStrategy.LeftRight256)
}
def activeCount: Int =
stateRef.get.activeCount
@tailrec
def offer(value: IndexedSubscriber[In]): Unit =
stateRef.get match {
case current @ Available(queue, canceledIDs, ac) =>
if (ac > 0 && !canceledIDs(value.id)) {
val update = Available(queue.enqueue(value), canceledIDs, ac)
if (!stateRef.compareAndSet(current, update))
offer(value)
}
case current @ Waiting(promise, canceledIDs, ac) =>
if (!canceledIDs(value.id)) {
val update = Available[In](Queue.empty, canceledIDs, ac)
if (!stateRef.compareAndSet(current, update))
offer(value)
else
promise.success(value)
}
}
@tailrec
def poll(): Future[IndexedSubscriber[In]] =
stateRef.get match {
case current @ Available(queue, canceledIDs, ac) =>
if (ac <= 0)
Future.successful(null)
else if (queue.isEmpty) {
val p = Promise[IndexedSubscriber[In]]()
val update = Waiting(p, canceledIDs, ac)
if (!stateRef.compareAndSet(current, update))
poll()
else
p.future
}
else {
val (ref, newQueue) = queue.dequeue
val update = Available(newQueue, canceledIDs, ac)
if (!stateRef.compareAndSet(current, update))
poll()
else
Future.successful(ref)
}
case Waiting(_,_,_) =>
Future.failed(new IllegalStateException("waiting in poll()"))
}
@tailrec
def deactivateAll(): Unit =
stateRef.get match {
case current @ Available(_, canceledIDs, _) =>
val update: State[In] = Available(Queue.empty, canceledIDs, 0)
if (!stateRef.compareAndSet(current, update))
deactivateAll()
case current @ Waiting(promise, canceledIDs, _) =>
val update: State[In] = Available(Queue.empty, canceledIDs, 0)
if (!stateRef.compareAndSet(current, update))
deactivateAll()
else
promise.success(null)
}
@tailrec
def deactivate(ref: IndexedSubscriber[In]): Boolean =
stateRef.get match {
case current @ Available(queue, canceledIDs, count) =>
if (count <= 0) true else {
val update = if (canceledIDs(ref.id)) current else {
val newQueue = queue.filterNot(_.id == ref.id)
Available(newQueue, canceledIDs+ref.id, count-1)
}
if (update.activeCount == current.activeCount)
false // nothing to update
else if (!stateRef.compareAndSet(current, update))
deactivate(ref) // retry
else
update.activeCount == 0
}
case current @ Waiting(promise, canceledIDs, count) =>
if (canceledIDs(ref.id)) count <= 0 else {
val update =
if (count - 1 > 0) Waiting(promise, canceledIDs+ref.id, count-1)
else Available[In](Queue.empty, canceledIDs+ref.id, 0)
if (!stateRef.compareAndSet(current, update))
deactivate(ref) // retry
else if (update.activeCount <= 0) {
promise.success(null)
true
}
else
false
}
}
}
private[reactive] sealed trait State[In] {
def activeCount: Int
def canceledIDs: Set[Int]
}
private[reactive] final case class Available[In](
available: Queue[IndexedSubscriber[In]],
canceledIDs: BitSet,
activeCount: Int)
extends State[In]
private[reactive] final case class Waiting[In](
promise: Promise[IndexedSubscriber[In]],
canceledIDs: BitSet,
activeCount: Int)
extends State[In]
}
| Wogan/monix | monix-reactive/shared/src/main/scala/monix/reactive/internal/consumers/LoadBalanceConsumer.scala | Scala | apache-2.0 | 15,301 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.util.collection.unsafe.sort
import com.google.common.primitives.UnsignedBytes
import org.scalatest.prop.PropertyChecks
import org.apache.spark.SparkFunSuite
import org.apache.spark.unsafe.types.UTF8String
class PrefixComparatorsSuite extends SparkFunSuite with PropertyChecks {
test("String prefix comparator") {
def testPrefixComparison(s1: String, s2: String): Unit = {
val utf8string1 = UTF8String.fromString(s1)
val utf8string2 = UTF8String.fromString(s2)
val s1Prefix = PrefixComparators.StringPrefixComparator.computePrefix(utf8string1)
val s2Prefix = PrefixComparators.StringPrefixComparator.computePrefix(utf8string2)
val prefixComparisonResult = PrefixComparators.STRING.compare(s1Prefix, s2Prefix)
val cmp = UnsignedBytes.lexicographicalComparator().compare(
utf8string1.getBytes.take(8), utf8string2.getBytes.take(8))
assert(
(prefixComparisonResult == 0 && cmp == 0) ||
(prefixComparisonResult < 0 && s1.compareTo(s2) < 0) ||
(prefixComparisonResult > 0 && s1.compareTo(s2) > 0))
}
// scalastyle:off
val regressionTests = Table(
("s1", "s2"),
("abc", "世界"),
("你好", "世界"),
("你好123", "你好122")
)
// scalastyle:on
forAll (regressionTests) { (s1: String, s2: String) => testPrefixComparison(s1, s2) }
forAll { (s1: String, s2: String) => testPrefixComparison(s1, s2) }
}
test("Binary prefix comparator") {
def compareBinary(x: Array[Byte], y: Array[Byte]): Int = {
for (i <- 0 until x.length; if i < y.length) {
val res = x(i).compare(y(i))
if (res != 0) return res
}
x.length - y.length
}
def testPrefixComparison(x: Array[Byte], y: Array[Byte]): Unit = {
val s1Prefix = PrefixComparators.BinaryPrefixComparator.computePrefix(x)
val s2Prefix = PrefixComparators.BinaryPrefixComparator.computePrefix(y)
val prefixComparisonResult =
PrefixComparators.BINARY.compare(s1Prefix, s2Prefix)
assert(
(prefixComparisonResult == 0) ||
(prefixComparisonResult < 0 && compareBinary(x, y) < 0) ||
(prefixComparisonResult > 0 && compareBinary(x, y) > 0))
}
// scalastyle:off
val regressionTests = Table(
("s1", "s2"),
("abc", "世界"),
("你好", "世界"),
("你好123", "你好122")
)
// scalastyle:on
forAll (regressionTests) { (s1: String, s2: String) =>
testPrefixComparison(s1.getBytes("UTF-8"), s2.getBytes("UTF-8"))
}
forAll { (s1: String, s2: String) =>
testPrefixComparison(s1.getBytes("UTF-8"), s2.getBytes("UTF-8"))
}
}
test("double prefix comparator handles NaNs properly") {
val nan1: Double = java.lang.Double.longBitsToDouble(0x7ff0000000000001L)
val nan2: Double = java.lang.Double.longBitsToDouble(0x7fffffffffffffffL)
assert(nan1.isNaN)
assert(nan2.isNaN)
val nan1Prefix = PrefixComparators.DoublePrefixComparator.computePrefix(nan1)
val nan2Prefix = PrefixComparators.DoublePrefixComparator.computePrefix(nan2)
assert(nan1Prefix === nan2Prefix)
val doubleMaxPrefix = PrefixComparators.DoublePrefixComparator.computePrefix(Double.MaxValue)
assert(PrefixComparators.DOUBLE.compare(nan1Prefix, doubleMaxPrefix) === 1)
}
}
| ArvinDevel/onlineAggregationOnSparkV2 | core/src/test/scala/org/apache/spark/util/collection/unsafe/sort/PrefixComparatorsSuite.scala | Scala | apache-2.0 | 4,149 |
package plp.expressions1.util
import plp.expressions1.expression.{ExpAnd, ExpConcat, ExpEquals, ExpLength, ExpMenos, ExpNot, ExpOr, ExpSoma, ExpSub, Expressao, Valor, ValorBooleano, ValorConcreto, ValorInteiro, ValorString}
class VisitorAvaliar extends Visitor[Valor] {
private def valor[T](e: Expressao) = v(e).asInstanceOf[ValorConcreto[T]].valor
def visit(expr: ExpConcat) = {
val esq: String = valor(expr.esq)
val dir: String = valor(expr.dir)
ValorString(esq + dir)
}
def visit(expr: ExpEquals) = {
val esq: Any = valor(expr.esq)
val dir: Any = valor(expr.dir)
ValorBooleano(esq == dir)
}
def visit(expr: ExpLength) = {
val exp: String = valor(expr.exp)
ValorInteiro(exp.length)
}
def visit(expr: ExpMenos) = {
val exp: Int = valor(expr.exp)
ValorInteiro(-exp)
}
def visit(expr: ExpNot) = {
val exp: Boolean = valor(expr.exp)
ValorBooleano(!exp)
}
def visit(expr: ExpAnd) = {
val esq: Boolean = valor(expr.esq)
val dir: Boolean = valor(expr.dir)
ValorBooleano(esq && dir)
}
def visit(expr: ExpOr) = {
val esq: Boolean = valor(expr.esq)
val dir: Boolean = valor(expr.dir)
ValorBooleano(esq || dir)
}
def visit(expr: ExpSoma) = {
val esq: Int = valor(expr.esq)
val dir: Int = valor(expr.dir)
ValorInteiro(esq + dir)
}
def visit(expr: ExpSub) = {
val esq: Int = valor(expr.esq)
val dir: Int = valor(expr.dir)
ValorInteiro(esq - dir)
}
def visit(valorBooleano: ValorBooleano) = valorBooleano
def visit(valorInteiro: ValorInteiro) = valorInteiro
def visit(valorString: ValorString) = valorString
}
| lrlucena/PLP-Scala | src/plp/expressions1/util/VisitorAvaliar.scala | Scala | gpl-3.0 | 1,651 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy.k8s.integrationtest
import java.nio.file.{Path, Paths}
import java.util.UUID
import scala.collection.JavaConverters._
import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer
import io.fabric8.kubernetes.client.DefaultKubernetesClient
import org.scalatest.concurrent.Eventually
import org.apache.spark.SparkConf
import org.apache.spark.deploy.k8s.integrationtest.TestConstants._
import org.apache.spark.internal.Logging
import org.apache.spark.internal.config.JARS
import org.apache.spark.internal.config.Tests.IS_TESTING
import org.apache.spark.internal.config.UI.UI_ENABLED
private[spark] class KubernetesTestComponents(defaultClient: DefaultKubernetesClient) {
val namespaceOption = Option(System.getProperty(CONFIG_KEY_KUBE_NAMESPACE))
val hasUserSpecifiedNamespace = namespaceOption.isDefined
val namespace = namespaceOption.getOrElse(UUID.randomUUID().toString.replaceAll("-", ""))
val serviceAccountName =
Option(System.getProperty(CONFIG_KEY_KUBE_SVC_ACCOUNT))
.getOrElse("default")
val kubernetesClient = defaultClient.inNamespace(namespace)
val clientConfig = kubernetesClient.getConfiguration
def createNamespace(): Unit = {
defaultClient.namespaces.createNew()
.withNewMetadata()
.withName(namespace)
.endMetadata()
.done()
}
def deleteNamespace(): Unit = {
defaultClient.namespaces.withName(namespace).delete()
Eventually.eventually(KubernetesSuite.TIMEOUT, KubernetesSuite.INTERVAL) {
val namespaceList = defaultClient
.namespaces()
.list()
.getItems
.asScala
require(!namespaceList.exists(_.getMetadata.getName == namespace))
}
}
def newSparkAppConf(): SparkAppConf = {
new SparkAppConf()
.set("spark.master", s"k8s://${kubernetesClient.getMasterUrl}")
.set("spark.kubernetes.namespace", namespace)
.set("spark.executor.cores", "1")
.set("spark.executor.instances", "1")
.set("spark.app.name", "spark-test-app")
.set(IS_TESTING.key, "false")
.set(UI_ENABLED.key, "true")
.set("spark.kubernetes.submission.waitAppCompletion", "false")
.set("spark.kubernetes.authenticate.driver.serviceAccountName", serviceAccountName)
}
}
private[spark] class SparkAppConf {
private val map = mutable.Map[String, String]()
def set(key: String, value: String): SparkAppConf = {
map.put(key, value)
this
}
def get(key: String): String = map.getOrElse(key, "")
def setJars(jars: Seq[String]): Unit = set(JARS.key, jars.mkString(","))
override def toString: String = map.toString
def toStringArray: Iterable[String] = map.toList.flatMap(t => List("--conf", s"${t._1}=${t._2}"))
def toSparkConf: SparkConf = new SparkConf().setAll(map)
}
private[spark] case class SparkAppArguments(
mainAppResource: String,
mainClass: String,
appArgs: Array[String])
private[spark] object SparkAppLauncher extends Logging {
def launch(
appArguments: SparkAppArguments,
appConf: SparkAppConf,
timeoutSecs: Int,
sparkHomeDir: Path,
isJVM: Boolean,
pyFiles: Option[String] = None): Unit = {
val sparkSubmitExecutable = sparkHomeDir.resolve(Paths.get("bin", "spark-submit"))
logInfo(s"Launching a spark app with arguments $appArguments and conf $appConf")
val preCommandLine = if (isJVM) {
mutable.ArrayBuffer(sparkSubmitExecutable.toFile.getAbsolutePath,
"--deploy-mode", "cluster",
"--class", appArguments.mainClass,
"--master", appConf.get("spark.master"))
} else {
mutable.ArrayBuffer(sparkSubmitExecutable.toFile.getAbsolutePath,
"--deploy-mode", "cluster",
"--master", appConf.get("spark.master"))
}
val commandLine =
pyFiles.map(s => preCommandLine ++ Array("--py-files", s)).getOrElse(preCommandLine) ++
appConf.toStringArray :+ appArguments.mainAppResource
if (appArguments.appArgs.nonEmpty) {
commandLine ++= appArguments.appArgs
}
logInfo(s"Launching a spark app with command line: ${commandLine.mkString(" ")}")
ProcessUtils.executeProcess(commandLine.toArray, timeoutSecs)
}
}
| dbtsai/spark | resource-managers/kubernetes/integration-tests/src/test/scala/org/apache/spark/deploy/k8s/integrationtest/KubernetesTestComponents.scala | Scala | apache-2.0 | 4,993 |
package controllers
import models.DAO
import models.DTO.User
import play.api.mvc._
import scala.concurrent.Future
import play.api.libs.concurrent.Execution.Implicits.defaultContext
/**
* Created by pnagarjuna on 23/05/15.
*/
trait Secured {
private def email(request: RequestHeader) = request.session.get("email")
private def onUnauthorized(request: RequestHeader) = Results.Redirect(routes.Auth.login()).withNewSession
def withAuth(f: String => Request[AnyContent] => Result) = Security.Authenticated(email, onUnauthorized) { email =>
Action.async(request => Future(f(email)(request)))
}
def withFAuth(f: String => Request[AnyContent] => Future[Result]) = Security.Authenticated(email, onUnauthorized) { email =>
Action.async(request => f(email)(request))
}
def withAsyncAuth[A](p: BodyParser[A])(f: String => Request[A] => Future[Result]) = Security.Authenticated(email, onUnauthorized) { email =>
Action.async(p)(request => f(email)(request))
}
def withUser(f: User => Request[AnyContent] => Result) = withAuth { email => request =>
scala.concurrent.blocking {
DAO.getUser(email).map(user => f(user)(request)).getOrElse(onUnauthorized(request))
}
}
def withFUser(f: User => Request[AnyContent] => Future[Result]) = withFAuth { email => request =>
scala.concurrent.blocking {
DAO.getUser(email).map(user => f(user)(request)).getOrElse(Future(onUnauthorized(request)))
}
}
def withAsyncUser[A](p: BodyParser[A])(f: User => Request[A] => Future[Result]) = withAsyncAuth(p) { email => request =>
scala.concurrent.blocking {
DAO.getUser(email).map(user => f(user)(request)).getOrElse(Future(onUnauthorized(request)))
}
}
}
| pamu/ticketing-system | app/controllers/Secured.scala | Scala | apache-2.0 | 1,709 |
package models
import java.util.UUID
import akka.actor._
import com.redis.RedisClient
import scala.concurrent.duration._
import scala.language.postfixOps
import play.api.libs.json.{JsObject, JsString, JsValue, Json}
import play.api.libs.iteratee._
import play.api.libs.concurrent._
import akka.util.Timeout
import play.api.Play.current
import scala.concurrent.ExecutionContext.Implicits.global
import GameResult._
import shared.Board
import shared.Board.BoardInfo
import shared.GameElements.GameElement
import shared.MessageProtocol._
import scala.util.Random
object Game {
implicit val timeout = Timeout(1 second)
/* one actor to hold all game data */
lazy val default = Akka.system.actorOf(Props[Game])
}
class Game extends Actor with ActorLogging {
var members = Set.empty[String]
val redis = new RedisClient("localhost", 6379)
val players = scala.collection.mutable.Map[String, List[ActorRef]]()
//Concurrent.
def receive = {
case Join(None) =>
// no one else already joined the game
val id = randomAlpha(5)
val results = List()
redis.set(id, Json.stringify(Json.toJson(results)))
players += (id.toString -> List(sender()))
sender() ! GameCreated(id.toString)
case Join(Some(id)) =>
if (players.get(id).isDefined) {
val board = generateBoard()
players += (id -> (players(id) :+ sender()))
for (actorRef <- players(id)) {
actorRef ! GameReady(board)
}
} else {
sender() ! GameNotFound
}
case FinishedRound(id, time, correct, round) =>
if (players.get(id).isDefined) {
val resultsString = redis.get[String](id)
if (resultsString.isDefined) {
log.warning(s"resultsString = ${resultsString.get}")
val results: List[GameResult] =
Json.parse(resultsString.get).as[List[GameResult]] :+ GameResult(sender().path.toString, time, correct, round)
redis.set(id, Json.stringify(Json.toJson(results)))
if (round > 5) {
for (actorRef <- players(id)) {
actorRef ! EndOfGame
}
} else if (results.count(_.round == round) < 2) {
// waiting for results from player 2
} else {
//if there is another round
val card = generateCards(Board.board)
for (actorRef <- players(id)) {
actorRef ! NextRound(card)
}
}
} else {
sender() ! GameNotFound
}
} else {
sender() ! GameNotFound //"id not found"
}
}
def generateBoard() =
BoardInfo(Board.board, generateCards(Board.board))
def generateCards(board: Seq[GameElement]): (GameElement, (GameElement, GameElement)) = {
val shuffled = Random.shuffle(board)
(shuffled(0), (GameElement(shuffled(1).shape, shuffled(2).color), GameElement(shuffled(3).shape, shuffled(4).color)))
}
def randomAlpha(length: Int): String = {
val chars = ('2' to '9') ++ ('A' to 'Z')
randomStringFromCharList(length, chars)
}
// used by #6 and #7
def randomStringFromCharList(length: Int, chars: Seq[Char]): String = {
val sb = new StringBuilder
for (i <- 1 to length) {
val randomNum = util.Random.nextInt(chars.length)
sb.append(chars(randomNum))
}
sb.toString
}
}
case class Join(id: Option[String])
| lutzh/schnappen | server/app/models/Game.scala | Scala | apache-2.0 | 3,399 |
package org.hammerlab.bam.spark.load
import org.hammerlab.bam.test.resources.TestBams
import org.hammerlab.bgzf.block.HeaderParseException
import org.hammerlab.spark.test.suite.SparkSuite
import spark_bam._
class LoadSamAsBamFails
extends SparkSuite
with TestBams {
test("load") {
intercept[HeaderParseException] {
sc.loadBam(sam2)
}
.getMessage should be(
"Position 0: 64 != 31"
)
}
}
| ryan-williams/spark-bam | load/src/test/scala/org/hammerlab/bam/spark/load/LoadSamAsBamFails.scala | Scala | apache-2.0 | 427 |
/*
* Copyright (c) 2012, Johannes Rudolph
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package net.virtualvoid.codec
/**
* A collection of codecs concerning Scala data structures.
*/
trait ScalaCodecs {
/**
* Apply a codec only on the first element of a `Tuple2`.
*
* @param codec The codec to apply to the first element of the tuple.
*/
case class OnFirst[I1, I2, O1](codec: Codec[I1, O1]) extends Codec[(I1, I2), (O1, I2)] {
def name = "On first item of tuple do '%s'" format codec.name
def encode(tuple: (I1, I2)) =
for (o1 <- codec.encode(tuple._1).right)
yield (o1, tuple._2)
def decode(tuple: (O1, I2)) =
for (i1 <- codec.decode(tuple._1).right)
yield (i1, tuple._2)
}
/**
* Apply a codec only on the second element of a `Tuple2`.
*
* @param codec The codec to apply to the second element of the tuple.
*/
case class OnSecond[I1, I2, O2](codec: Codec[I2, O2]) extends Codec[(I1, I2), (I1, O2)] {
def name = "On second item of tuple do '%s'" format codec.name
def encode(tuple: (I1, I2)) =
for (o2 <- codec.encode(tuple._2).right)
yield (tuple._1, o2)
def decode(tuple: (I1, O2)) =
for (i2 <- codec.decode(tuple._2).right)
yield (tuple._1, i2)
}
/**
* A codec which swaps the elements of a `Tuple2`.
*/
case class ReverseTuple[T1, T2]() extends ReversibleCodecBase[(T1, T2), (T2, T1)] {
def name = "Reverse tuple"
def doEncode(i: (T1, T2)) =
(i._2, i._1)
def doDecode(o: (T2, T1)) =
(o._2, o._1)
}
/**
* A codec which joins two arrays of elements of the same type by concatenation. To be able
* to decode the resulting concatenated array the first element must have a constant size in
* all cases.
* @param firstBlockSize The size of the first data array
*/
case class Join[T: ClassManifest](firstBlockSize: Int) extends ReversibleCodecBase[(Array[T], Array[T]), Array[T]] {
def name = "Join two arrays"
def doEncode(tuple: (Array[T], Array[T])) = {
val (ar1, ar2) = tuple
assert(ar1.size == firstBlockSize, "The size of the first array must be %d but is %s" format (firstBlockSize, ar1.size))
ar1 ++ ar2
}
def doDecode(block: Array[T]) =
(block.take(firstBlockSize), block.drop(firstBlockSize))
}
}
| jrudolph/codecs | src/main/scala/net/virtualvoid/codec/ScalaCodecs.scala | Scala | bsd-2-clause | 3,614 |
/*
* Derived from Twitter Finagle.
*
* Original source:
* https://github.com/twitter/finagle/blob/6e2462acc32ac753bf4e9d8e672f9f361be6b2da/finagle-http/src/test/scala/com/twitter/finagle/http/path/PathSpec.scala
*/
package org.http4s
package dsl
import cats.effect.IO
import org.http4s.Uri.uri
import org.http4s.dsl.io._
import org.scalacheck.Arbitrary
import org.scalacheck.Arbitrary.arbitrary
class PathSpec extends Http4sSpec {
implicit val arbitraryPath: Arbitrary[Path] =
Arbitrary {
arbitrary[List[String]].map(Path(_))
}
"Path" should {
"/foo/bar" in {
Path("/foo/bar") must_== Path("foo", "bar")
}
"foo/bar" in {
Path("foo/bar") must_== Path("foo", "bar")
}
"//foo/bar" in {
Path("//foo/bar") must_== Path("", "foo", "bar")
}
"~ extractor on Path" in {
(Path("/foo.json") match {
case Root / "foo" ~ "json" => true
case _ => false
}) must beTrue
}
"~ extractor on filename foo.json" in {
("foo.json" match {
case "foo" ~ "json" => true
case _ => false
}) must beTrue
}
"~ extractor on filename foo" in {
("foo" match {
case "foo" ~ "" => true
case _ => false
}) must beTrue
}
"-> extractor /test.json" in {
val req = Request[IO](method = Method.GET, uri = uri("/test.json"))
(req match {
case GET -> Root / "test.json" => true
case _ => false
}) must beTrue
}
"-> extractor /foo/test.json" in {
val req = Request[IO](method = Method.GET, uri = uri("/foo/test.json"))
(req match {
case GET -> Root / "foo" / "test.json" => true
case _ => false
}) must beTrue
}
"→ extractor /test.json" in {
val req = Request[IO](method = Method.GET, uri = uri("/test.json"))
(req match {
case GET → (Root / "test.json") => true
case _ => false
}) must beTrue
}
"request path info extractor for /" in {
val req = Request[IO](method = Method.GET, uri = uri("/"))
(req match {
case _ -> Root => true
case _ => false
}) must beTrue
}
"Root extractor" in {
(Path("/") match {
case Root => true
case _ => false
}) must beTrue
}
"Root extractor, no partial match" in {
(Path("/test.json") match {
case Root => true
case _ => false
}) must beFalse
}
"Root extractor, empty path" in {
(Path("") match {
case Root => true
case _ => false
}) must beTrue
}
"/ extractor" in {
(Path("/1/2/3/test.json") match {
case Root / "1" / "2" / "3" / "test.json" => true
case _ => false
}) must beTrue
}
"/: extractor" in {
(Path("/1/2/3/test.json") match {
case "1" /: "2" /: path => Some(path)
case _ => None
}) must_== Some(Path("/3/test.json"))
}
"/: should not crash without trailing slash" in {
// Bug reported on Gitter
Path("/cameras/1NJDOI") match {
case "cameras" /: _ /: "events" /: _ /: "exports" /: _ => false
case _ => true
}
}
"trailing slash" in {
(Path("/1/2/3/") match {
case Root / "1" / "2" / "3" / "" => true
case _ => false
}) must beTrue
}
"encoded chars" in {
(Path("/foo%20bar/and%2For/1%2F2") match {
case Root / "foo bar" / "and/or" / "1/2" => true
case _ => false
}) must beTrue
}
"encode chars in toString" in {
(Root / "foo bar" / "and/or" / "1/2").toString must_==
"/foo%20bar/and%2For/1%2F2"
}
"Int extractor" in {
(Path("/user/123") match {
case Root / "user" / IntVar(userId) => userId == 123
case _ => false
}) must beTrue
}
"Int extractor, invalid int" in {
(Path("/user/invalid") match {
case Root / "user" / IntVar(userId @ _) => true
case _ => false
}) must beFalse
}
"Int extractor, number format error" in {
(Path("/user/2147483648") match {
case Root / "user" / IntVar(userId @ _) => true
case _ => false
}) must beFalse
}
"Long extractor" >> {
"valid" >> {
"small positive number" in {
(Path("/user/123") match {
case Root / "user" / LongVar(userId) => userId == 123
case _ => false
}) must beTrue
}
"negative number" in {
(Path("/user/-432") match {
case Root / "user" / LongVar(userId) => userId == -432
case _ => false
}) must beTrue
}
}
"invalid" >> {
"a word" in {
(Path("/user/invalid") match {
case Root / "user" / LongVar(userId @ _) => true
case _ => false
}) must beFalse
}
"number but out of domain" in {
(Path("/user/9223372036854775808") match {
case Root / "user" / LongVar(userId @ _) => true
case _ => false
}) must beFalse
}
}
}
"UUID extractor" >> {
"valid" >> {
"a UUID" in {
(Path("/user/13251d88-7a73-4fcf-b935-54dfae9f023e") match {
case Root / "user" / UUIDVar(userId) =>
userId.toString == "13251d88-7a73-4fcf-b935-54dfae9f023e"
case _ => false
}) must beTrue
}
}
"invalid" >> {
"a number" in {
(Path("/user/123") match {
case Root / "user" / UUIDVar(userId @ _) => true
case _ => false
}) must beFalse
}
"a word" in {
(Path("/user/invalid") match {
case Root / "user" / UUIDVar(userId @ _) => true
case _ => false
}) must beFalse
}
"a bad UUID" in {
(Path("/user/13251d88-7a73-4fcf-b935") match {
case Root / "user" / UUIDVar(userId @ _) => true
case _ => false
}) must beFalse
}
}
}
"consistent apply / toList" in prop { p: Path =>
Path(p.toList) must_== p
}
"Path.apply is stack safe" in {
Path("/" * 1000000) must beAnInstanceOf[Path]
}
}
}
| aeons/http4s | dsl/src/test/scala/org/http4s/dsl/PathSpec.scala | Scala | apache-2.0 | 6,266 |
import sbt._
class MTSProject(info: ProjectInfo) extends DefaultProject(info)
{
val scalatest = "org.scalatest" % "scalatest" % "1.2"
//override val mainClass = Some("aggreg.Exp")
override def mainScalaSourcePath = "src"
override def mainResourcesPath = "resources"
override def testScalaSourcePath = "test-src"
override def testResourcesPath = "test-resources"
}
| paradigmatic/mts | project/build/Project.scala | Scala | gpl-3.0 | 391 |
package com.bostontechnologies.quickfixs.messages
import quickfix.Message
import quickfix.fix50.BusinessMessageReject
import quickfix.field._
class RichBusinessMessageReject private(self: Message)
extends RichMessage(self) {
require(RichMessage.isA(self, RichBusinessMessageReject.msgType))
def hasText: Boolean = self.isSetField(Text.FIELD)
def text: String = self.getString(Text.FIELD)
def text_=(value: String) {
self.setString(Text.FIELD, value)
}
def refMessageType: String = self.getString(RefMsgType.FIELD)
def refMessageType_=(value: String) {
self.setString(RefMsgType.FIELD, value)
}
def refMessageSequenceNumber: Int = self.getInt(RefSeqNum.FIELD)
def refMessageSequenceNumber_=(value: Int) {
self.setInt(RefSeqNum.FIELD, value)
}
def businessRejectReason: Int = self.getInt(BusinessRejectReason.FIELD)
def businessRejectReason_=(value: Int) {
self.setInt(BusinessRejectReason.FIELD, value)
}
def hasRefTag: Boolean = self.isSetField(BusinessRejectRefID.FIELD)
def refTag: Int = self.getInt(BusinessRejectRefID.FIELD)
def refTag_=(value: Int) {
self.setInt(BusinessRejectRefID.FIELD, value)
}
}
object RichBusinessMessageReject extends RichMessageExtractor[RichBusinessMessageReject, BusinessMessageReject] {
val msgType = MsgType.BUSINESS_MESSAGE_REJECT
def apply(message: quickfix.fix50.BusinessMessageReject): RichBusinessMessageReject =
new RichBusinessMessageReject(message)
def new50Message: RichBusinessMessageReject = this(new quickfix.fix50.BusinessMessageReject())
def apply(message: quickfix.fix44.BusinessMessageReject): RichBusinessMessageReject =
new RichBusinessMessageReject(message)
def new44Message: RichBusinessMessageReject = this(new quickfix.fix44.BusinessMessageReject())
def newMessage: RichBusinessMessageReject = new RichBusinessMessageReject(RichMessage.newMessage(msgType).self)
def apply(request: RichMessage, refTag: Int, reason: Int, text: String = ""): RichBusinessMessageReject = {
val reject = RichBusinessMessageReject.newMessage
reject.refMessageType = request.messageType
reject.refMessageSequenceNumber = request.sequenceNumber
reject.refTag = refTag
reject.businessRejectReason = reason
if (!text.isEmpty) {
reject.text = text
}
reject
}
} | Forexware/quickfixs | src/main/scala/com/bostontechnologies/quickfixs/messages/RichBusinessMessageReject.scala | Scala | apache-2.0 | 2,334 |
/*
* Copyright (C) 2015 Stratio (http://stratio.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.stratio.crossdata.connector.mongodb
import com.mongodb.DBCollection
import com.mongodb.casbah.Imports.DBObject
import com.mongodb.casbah.MongoDB
import com.stratio.crossdata.connector.TableInventory.Table
import com.stratio.crossdata.connector.{TableInventory, TableManipulation}
import com.stratio.datasource.mongodb.config.{MongodbConfig, MongodbConfigBuilder, MongodbCredentials, MongodbSSLOptions}
import com.stratio.datasource.mongodb.{DefaultSource => ProviderDS, MongodbConnection, MongodbRelation}
import com.stratio.datasource.util.Config._
import com.stratio.datasource.util.{Config, ConfigBuilder}
import org.apache.spark.sql.SaveMode._
import org.apache.spark.sql.sources.{BaseRelation, DataSourceRegister}
import org.apache.spark.sql.types.StructType
import org.apache.spark.sql.{DataFrame, SQLContext, SaveMode}
import scala.util.{Failure, Try}
/**
* Allows creation of MongoDB based tables using
* the syntax CREATE TEMPORARY TABLE ... USING com.stratio.deep.mongodb.
* Required options are detailed in [[com.stratio.datasource.mongodb.config.MongodbConfig]]
*/
class DefaultSource extends ProviderDS with TableInventory with DataSourceRegister with TableManipulation{
import MongodbConfig._
/**
* if the collection is capped
*/
val MongoCollectionPropertyCapped:String= "capped"
/**
* collection size
*/
val MongoCollectionPropertySize:String= "size"
/**
* max number of documents
*/
val MongoCollectionPropertyMax:String= "max"
override def shortName(): String = "mongodb"
override def createRelation(
sqlContext: SQLContext,
parameters: Map[String, String]): BaseRelation = {
MongodbXDRelation(
MongodbConfigBuilder(parseParameters(parameters))
.build())(sqlContext)
}
override def createRelation(
sqlContext: SQLContext,
parameters: Map[String, String],
schema: StructType): BaseRelation = {
MongodbXDRelation(
MongodbConfigBuilder(parseParameters(parameters))
.build(),Some(schema))(sqlContext)
}
override def createRelation(
sqlContext: SQLContext,
mode: SaveMode,
parameters: Map[String, String],
data: DataFrame): BaseRelation = {
val mongodbRelation = MongodbXDRelation(
MongodbConfigBuilder(parseParameters(parameters))
.build())(sqlContext)
mode match{
case Append => mongodbRelation.insert(data, overwrite = false)
case Overwrite => mongodbRelation.insert(data, overwrite = true)
case ErrorIfExists => if(mongodbRelation.isEmptyCollection) mongodbRelation.insert(data, overwrite = false)
else throw new UnsupportedOperationException("Writing in a non-empty collection.")
case Ignore => if(mongodbRelation.isEmptyCollection) mongodbRelation.insert(data, overwrite = false)
}
mongodbRelation
}
/**
* @inheritdoc
*/
override def generateConnectorOpts(item: Table, userOpts: Map[String, String]): Map[String, String] = Map(
Database -> item.database.get,
Collection -> item.tableName
) ++ userOpts
/**
* @inheritdoc
*/
override def listTables(context: SQLContext, options: Map[String, String]): Seq[Table] = {
Seq(Host).foreach { opName =>
if (!options.contains(opName)) sys.error( s"""Option "$opName" is mandatory for IMPORT TABLES""")
}
MongodbConnection.withClientDo(parseParametersWithoutValidation(options)) { mongoClient =>
def extractAllDatabases: Seq[MongoDB] =
mongoClient.getDatabaseNames().map(mongoClient.getDB)
def extractAllCollections(db: MongoDB): Seq[DBCollection] =
db.getCollectionNames().map(db.getCollection).toSeq
val tablesIt: Iterable[Table] = for {
database: MongoDB <- extractAllDatabases
collection: DBCollection <- extractAllCollections(database)
if options.get(Database).forall( _ == collection.getDB.getName)
if options.get(Collection).forall(_ == collection.getName)
} yield {
collectionToTable(context, options, database.getName, collection.getName)
}
tablesIt.toSeq
}
}
//Avoids importing system tables
override def exclusionFilter(t: TableInventory.Table): Boolean =
!t.tableName.startsWith("""system.""") && !t.database.get.equals("local")
private def collectionToTable(context: SQLContext, options: Map[String, String], database: String, collection: String): Table = {
val collectionConfig = MongodbConfigBuilder()
.apply(parseParameters(options + (Database -> database) + (Collection -> collection)))
.build()
Table(collection, Some(database), Some(new MongodbRelation(collectionConfig)(context).schema))
}
override def createExternalTable(context: SQLContext,
tableName: String,
databaseName: Option[String],
schema: StructType,
options: Map[String, String]): Option[Table] = {
val database: String = options.get(Database).orElse(databaseName).
getOrElse(throw new RuntimeException(s"$Database required when use CREATE EXTERNAL TABLE command"))
val collection: String = options.getOrElse(Collection, tableName)
val mongoOptions = DBObject()
options.map {
case (MongoCollectionPropertyCapped, value) => mongoOptions.put(MongoCollectionPropertyCapped, value)
case (MongoCollectionPropertySize, value) => mongoOptions.put(MongoCollectionPropertySize, value.toInt)
case (MongoCollectionPropertyMax, value) => mongoOptions.put(MongoCollectionPropertyMax, value.toInt)
case _ =>
}
try {
MongodbConnection.withClientDo(parseParametersWithoutValidation(options)) { mongoClient =>
mongoClient.getDB(database).createCollection(collection, mongoOptions)
}
Option(Table(collection, Option(database), Option(schema)))
} catch {
case e: Exception =>
sys.error(e.getMessage)
None
}
}
override def dropExternalTable(context: SQLContext,
options: Map[String, String]): Try[Unit] = {
val tupleDbColl = for {
db <- options.get(Database)
coll <- options.get(Collection)
} yield (db, coll)
tupleDbColl.fold[Try[Unit]](
ifEmpty = Failure(throw new RuntimeException(s"Required options not found ${Set(Database, Collection) -- options.keys}"))
) { case (dbName, collName) =>
Try {
MongodbConnection.withClientDo(parseParametersWithoutValidation(options)) { mongoClient =>
mongoClient.getDB(dbName).getCollection(collName).drop()
}
}
}
}
// TODO refactor datasource -> avoid duplicated method
def parseParametersWithoutValidation(parameters : Map[String,String]): Config = {
// required properties
/** We will assume hosts are provided like 'host:port,host2:port2,...' */
val properties: Map[String, Any] = parameters.updated(Host, parameters.getOrElse(Host, notFound[String](Host)).split(",").toList)
//optional parseable properties
val optionalProperties: List[String] = List(Credentials,SSLOptions, UpdateFields)
val finalProperties = (properties /: optionalProperties){
/** We will assume credentials are provided like 'user,database,password;user,database,password;...' */
case (properties,Credentials) =>
parameters.get(Credentials).map{ credentialInput =>
val credentials = credentialInput.split(";").map(_.split(",")).toList
.map(credentials => MongodbCredentials(credentials(0), credentials(1), credentials(2).toCharArray))
properties + (Credentials -> credentials)
} getOrElse properties
/** We will assume ssloptions are provided like '/path/keystorefile,keystorepassword,/path/truststorefile,truststorepassword' */
case (properties,SSLOptions) =>
parameters.get(SSLOptions).map{ ssloptionsInput =>
val ssloption = ssloptionsInput.split(",")
val ssloptions = MongodbSSLOptions(Some(ssloption(0)), Some(ssloption(1)), ssloption(2), Some(ssloption(3)))
properties + (SSLOptions -> ssloptions)
} getOrElse properties
/** We will assume fields are provided like 'user,database,password...' */
case (properties, UpdateFields) =>
parameters.get(UpdateFields).map{ updateInputs =>
val updateFields = updateInputs.split(",")
properties + (UpdateFields -> updateFields)
} getOrElse properties
}
MongodbConnectorConfigBuilder(finalProperties).build()
}
// TODO refactor datasource -> avoid duplicated config builder
case class MongodbConnectorConfigBuilder(props: Map[Property, Any] = Map()) extends {
override val properties = Map() ++ props
} with ConfigBuilder[MongodbConnectorConfigBuilder](properties) {
val requiredProperties: List[Property] = MongodbConfig.Host :: Nil
def apply(props: Map[Property, Any]) = MongodbConnectorConfigBuilder(props)
}
} | darroyocazorla/crossdata | mongodb/src/main/scala/com/stratio/crossdata/connector/mongodb/DefaultSource.scala | Scala | apache-2.0 | 9,898 |
package breeze.stats.distributions
/*
Copyright 2009 David Hall, Daniel Ramage
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
import org.scalacheck._
import org.scalatest._
import org.scalatest.funsuite._
import org.scalatestplus.scalacheck._
// VonMises variance depends on some reasonable handling of % 2 * pi, so we'll not include it.
class VonMisesTest
extends AnyFunSuite
with Checkers
with UnivariateContinuousDistrTestBase
with ExpFamTest[VonMises, Double] {
import Arbitrary.arbitrary
val expFam: VonMises.type = VonMises
def arbParameter: Arbitrary[(Double, Double)] = Arbitrary {
for (mu <- arbitrary[Double].map { _.abs % (2 * math.Pi) }; // Gamma pdf at 0 not defined when shape == 1
k <- arbitrary[Double].map { _.abs % 3.0 + 1.5 })
yield (mu, k)
}
def paramsClose(p: (Double, Double), b: (Double, Double)) = {
val y1 = (math.sin(p._1) - math.sin(b._1)).abs / (math.sin(p._1).abs / 2 + math.sin(b._1).abs / 2 + 1) < 1E-1
val y2 = (p._2 - b._2).abs / (p._2.abs / 2 + b._2.abs / 2 + 1) < 1E-1
y1 && y2
}
def asDouble(x: Double) = x
def fromDouble(x: Double) = x
implicit def arbDistr = Arbitrary {
for (shape <- arbitrary[Double].map { x =>
math.abs(x) % (2 * math.Pi)
};
scale <- arbitrary[Double].map { x =>
math.abs(x) % 3.0 + 1.1
}) yield new VonMises(shape, scale);
}
type Distr = VonMises
}
| scalanlp/breeze | math/src/test/scala/breeze/stats/distributions/VonMisesTest.scala | Scala | apache-2.0 | 1,911 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.optimizer
import org.apache.spark.sql.catalyst.dsl.expressions._
import org.apache.spark.sql.catalyst.dsl.plans._
import org.apache.spark.sql.catalyst.expressions.{Attribute, AttributeMap}
import org.apache.spark.sql.catalyst.plans.{Inner, PlanTest}
import org.apache.spark.sql.catalyst.plans.logical.{ColumnStat, LogicalPlan}
import org.apache.spark.sql.catalyst.rules.RuleExecutor
import org.apache.spark.sql.catalyst.statsEstimation.{StatsEstimationTestBase, StatsTestPlan}
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.internal.SQLConf.{CASE_SENSITIVE, CBO_ENABLED, JOIN_REORDER_ENABLED}
class JoinReorderSuite extends PlanTest with StatsEstimationTestBase {
override val conf = new SQLConf().copy(
CASE_SENSITIVE -> true, CBO_ENABLED -> true, JOIN_REORDER_ENABLED -> true)
object Optimize extends RuleExecutor[LogicalPlan] {
val batches =
Batch("Operator Optimizations", FixedPoint(100),
CombineFilters,
PushDownPredicate,
ReorderJoin(conf),
PushPredicateThroughJoin,
ColumnPruning,
CollapseProject) ::
Batch("Join Reorder", Once,
CostBasedJoinReorder(conf)) :: Nil
}
/** Set up tables and columns for testing */
private val columnInfo: AttributeMap[ColumnStat] = AttributeMap(Seq(
attr("t1.k-1-2") -> ColumnStat(distinctCount = 2, min = Some(1), max = Some(2),
nullCount = 0, avgLen = 4, maxLen = 4),
attr("t1.v-1-10") -> ColumnStat(distinctCount = 10, min = Some(1), max = Some(10),
nullCount = 0, avgLen = 4, maxLen = 4),
attr("t2.k-1-5") -> ColumnStat(distinctCount = 5, min = Some(1), max = Some(5),
nullCount = 0, avgLen = 4, maxLen = 4),
attr("t3.v-1-100") -> ColumnStat(distinctCount = 100, min = Some(1), max = Some(100),
nullCount = 0, avgLen = 4, maxLen = 4),
attr("t4.k-1-2") -> ColumnStat(distinctCount = 2, min = Some(1), max = Some(2),
nullCount = 0, avgLen = 4, maxLen = 4),
attr("t4.v-1-10") -> ColumnStat(distinctCount = 10, min = Some(1), max = Some(10),
nullCount = 0, avgLen = 4, maxLen = 4),
attr("t5.k-1-5") -> ColumnStat(distinctCount = 5, min = Some(1), max = Some(5),
nullCount = 0, avgLen = 4, maxLen = 4),
attr("t5.v-1-5") -> ColumnStat(distinctCount = 5, min = Some(1), max = Some(5),
nullCount = 0, avgLen = 4, maxLen = 4)
))
private val nameToAttr: Map[String, Attribute] = columnInfo.map(kv => kv._1.name -> kv._1)
private val nameToColInfo: Map[String, (Attribute, ColumnStat)] =
columnInfo.map(kv => kv._1.name -> kv)
// Table t1/t4: big table with two columns
private val t1 = StatsTestPlan(
outputList = Seq("t1.k-1-2", "t1.v-1-10").map(nameToAttr),
rowCount = 1000,
// size = rows * (overhead + column length)
size = Some(1000 * (8 + 4 + 4)),
attributeStats = AttributeMap(Seq("t1.k-1-2", "t1.v-1-10").map(nameToColInfo)))
private val t4 = StatsTestPlan(
outputList = Seq("t4.k-1-2", "t4.v-1-10").map(nameToAttr),
rowCount = 2000,
size = Some(2000 * (8 + 4 + 4)),
attributeStats = AttributeMap(Seq("t4.k-1-2", "t4.v-1-10").map(nameToColInfo)))
// Table t2/t3: small table with only one column
private val t2 = StatsTestPlan(
outputList = Seq("t2.k-1-5").map(nameToAttr),
rowCount = 20,
size = Some(20 * (8 + 4)),
attributeStats = AttributeMap(Seq("t2.k-1-5").map(nameToColInfo)))
private val t3 = StatsTestPlan(
outputList = Seq("t3.v-1-100").map(nameToAttr),
rowCount = 100,
size = Some(100 * (8 + 4)),
attributeStats = AttributeMap(Seq("t3.v-1-100").map(nameToColInfo)))
// Table t5: small table with two columns
private val t5 = StatsTestPlan(
outputList = Seq("t5.k-1-5", "t5.v-1-5").map(nameToAttr),
rowCount = 20,
size = Some(20 * (8 + 4)),
attributeStats = AttributeMap(Seq("t5.k-1-5", "t5.v-1-5").map(nameToColInfo)))
test("reorder 3 tables") {
val originalPlan =
t1.join(t2).join(t3).where((nameToAttr("t1.k-1-2") === nameToAttr("t2.k-1-5")) &&
(nameToAttr("t1.v-1-10") === nameToAttr("t3.v-1-100")))
// The cost of original plan (use only cardinality to simplify explanation):
// cost = cost(t1 J t2) = 1000 * 20 / 5 = 4000
// In contrast, the cost of the best plan:
// cost = cost(t1 J t3) = 1000 * 100 / 100 = 1000 < 4000
// so (t1 J t3) J t2 is better (has lower cost, i.e. intermediate result size) than
// the original order (t1 J t2) J t3.
val bestPlan =
t1.join(t3, Inner, Some(nameToAttr("t1.v-1-10") === nameToAttr("t3.v-1-100")))
.join(t2, Inner, Some(nameToAttr("t1.k-1-2") === nameToAttr("t2.k-1-5")))
assertEqualPlans(originalPlan, bestPlan)
}
test("put unjoinable item at the end and reorder 3 joinable tables") {
// The ReorderJoin rule puts the unjoinable item at the end, and then CostBasedJoinReorder
// reorders other joinable items.
val originalPlan =
t1.join(t2).join(t4).join(t3).where((nameToAttr("t1.k-1-2") === nameToAttr("t2.k-1-5")) &&
(nameToAttr("t1.v-1-10") === nameToAttr("t3.v-1-100")))
val bestPlan =
t1.join(t3, Inner, Some(nameToAttr("t1.v-1-10") === nameToAttr("t3.v-1-100")))
.join(t2, Inner, Some(nameToAttr("t1.k-1-2") === nameToAttr("t2.k-1-5")))
.join(t4)
assertEqualPlans(originalPlan, bestPlan)
}
test("reorder 3 tables with pure-attribute project") {
val originalPlan =
t1.join(t2).join(t3).where((nameToAttr("t1.k-1-2") === nameToAttr("t2.k-1-5")) &&
(nameToAttr("t1.v-1-10") === nameToAttr("t3.v-1-100")))
.select(nameToAttr("t1.v-1-10"))
val bestPlan =
t1.join(t3, Inner, Some(nameToAttr("t1.v-1-10") === nameToAttr("t3.v-1-100")))
.select(nameToAttr("t1.k-1-2"), nameToAttr("t1.v-1-10"))
.join(t2, Inner, Some(nameToAttr("t1.k-1-2") === nameToAttr("t2.k-1-5")))
.select(nameToAttr("t1.v-1-10"))
assertEqualPlans(originalPlan, bestPlan)
}
test("reorder 3 tables - one of the leaf items is a project") {
val originalPlan =
t1.join(t5).join(t3).where((nameToAttr("t1.k-1-2") === nameToAttr("t5.k-1-5")) &&
(nameToAttr("t1.v-1-10") === nameToAttr("t3.v-1-100")))
.select(nameToAttr("t1.v-1-10"))
// Items: t1, t3, project(t5.k-1-5, t5)
val bestPlan =
t1.join(t3, Inner, Some(nameToAttr("t1.v-1-10") === nameToAttr("t3.v-1-100")))
.select(nameToAttr("t1.k-1-2"), nameToAttr("t1.v-1-10"))
.join(t5.select(nameToAttr("t5.k-1-5")), Inner,
Some(nameToAttr("t1.k-1-2") === nameToAttr("t5.k-1-5")))
.select(nameToAttr("t1.v-1-10"))
assertEqualPlans(originalPlan, bestPlan)
}
test("don't reorder if project contains non-attribute") {
val originalPlan =
t1.join(t2, Inner, Some(nameToAttr("t1.k-1-2") === nameToAttr("t2.k-1-5")))
.select((nameToAttr("t1.k-1-2") + nameToAttr("t2.k-1-5")) as "key", nameToAttr("t1.v-1-10"))
.join(t3, Inner, Some(nameToAttr("t1.v-1-10") === nameToAttr("t3.v-1-100")))
.select("key".attr)
assertEqualPlans(originalPlan, originalPlan)
}
test("reorder 4 tables (bushy tree)") {
val originalPlan =
t1.join(t4).join(t2).join(t3).where((nameToAttr("t1.k-1-2") === nameToAttr("t4.k-1-2")) &&
(nameToAttr("t1.k-1-2") === nameToAttr("t2.k-1-5")) &&
(nameToAttr("t4.v-1-10") === nameToAttr("t3.v-1-100")))
// The cost of original plan (use only cardinality to simplify explanation):
// cost(t1 J t4) = 1000 * 2000 / 2 = 1000000, cost(t1t4 J t2) = 1000000 * 20 / 5 = 4000000,
// cost = cost(t1 J t4) + cost(t1t4 J t2) = 5000000
// In contrast, the cost of the best plan (a bushy tree):
// cost(t1 J t2) = 1000 * 20 / 5 = 4000, cost(t4 J t3) = 2000 * 100 / 100 = 2000,
// cost = cost(t1 J t2) + cost(t4 J t3) = 6000 << 5000000.
val bestPlan =
t1.join(t2, Inner, Some(nameToAttr("t1.k-1-2") === nameToAttr("t2.k-1-5")))
.join(t4.join(t3, Inner, Some(nameToAttr("t4.v-1-10") === nameToAttr("t3.v-1-100"))),
Inner, Some(nameToAttr("t1.k-1-2") === nameToAttr("t4.k-1-2")))
assertEqualPlans(originalPlan, bestPlan)
}
test("keep the order of attributes in the final output") {
val outputLists = Seq("t1.k-1-2", "t1.v-1-10", "t3.v-1-100").permutations
while (outputLists.hasNext) {
val expectedOrder = outputLists.next().map(nameToAttr)
val expectedPlan =
t1.join(t3, Inner, Some(nameToAttr("t1.v-1-10") === nameToAttr("t3.v-1-100")))
.join(t2, Inner, Some(nameToAttr("t1.k-1-2") === nameToAttr("t2.k-1-5")))
.select(expectedOrder: _*)
// The plan should not change after optimization
assertEqualPlans(expectedPlan, expectedPlan)
}
}
private def assertEqualPlans(
originalPlan: LogicalPlan,
groundTruthBestPlan: LogicalPlan): Unit = {
val optimized = Optimize.execute(originalPlan.analyze)
val expected = groundTruthBestPlan.analyze
compareJoinOrder(optimized, expected)
}
}
| JerryLead/spark | sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/JoinReorderSuite.scala | Scala | apache-2.0 | 9,849 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// scalastyle:off println
package org.apache.spark.examples.sql
import org.apache.spark.sql.SaveMode
// $example on:init_session$
import org.apache.spark.sql.SparkSession
// $example off:init_session$
// One method for defining the schema of an RDD is to make a case class with the desired column
// names and types.
case class Record(key: Int, value: String)
object RDDRelation {
def main(args: Array[String]): Unit = {
// $example on:init_session$
val spark = SparkSession
.builder
.appName("Spark Examples")
.config("spark.some.config.option", "some-value")
.getOrCreate()
// Importing the SparkSession gives access to all the SQL functions and implicit conversions.
import spark.implicits._
// $example off:init_session$
val df = spark.createDataFrame((1 to 100).map(i => Record(i, s"val_$i")))
// Any RDD containing case classes can be used to create a temporary view. The schema of the
// view is automatically inferred using scala reflection.
df.createOrReplaceTempView("records")
// Once tables have been registered, you can run SQL queries over them.
println("Result of SELECT *:")
spark.sql("SELECT * FROM records").collect().foreach(println)
// Aggregation queries are also supported.
val count = spark.sql("SELECT COUNT(*) FROM records").collect().head.getLong(0)
println(s"COUNT(*): $count")
// The results of SQL queries are themselves RDDs and support all normal RDD functions. The
// items in the RDD are of type Row, which allows you to access each column by ordinal.
val rddFromSql = spark.sql("SELECT key, value FROM records WHERE key < 10")
println("Result of RDD.map:")
rddFromSql.rdd.map(row => s"Key: ${row(0)}, Value: ${row(1)}").collect().foreach(println)
// Queries can also be written using a LINQ-like Scala DSL.
df.where($"key" === 1).orderBy($"value".asc).select($"key").collect().foreach(println)
// Write out an RDD as a parquet file with overwrite mode.
df.write.mode(SaveMode.Overwrite).parquet("pair.parquet")
// Read in parquet file. Parquet files are self-describing so the schema is preserved.
val parquetFile = spark.read.parquet("pair.parquet")
// Queries can be run using the DSL on parquet files just like the original RDD.
parquetFile.where($"key" === 1).select($"value".as("a")).collect().foreach(println)
// These files can also be used to create a temporary view.
parquetFile.createOrReplaceTempView("parquetFile")
spark.sql("SELECT * FROM parquetFile").collect().foreach(println)
spark.stop()
}
}
// scalastyle:on println
| lhfei/spark-in-action | spark-3.x/src/main/scala/org/apache/spark/examples/sql/RDDRelation.scala | Scala | apache-2.0 | 3,444 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.api
import java.util.concurrent.{ExecutionException, TimeUnit, TimeoutException}
import java.util.{Properties, Random}
import kafka.common.Topic
import kafka.consumer.SimpleConsumer
import kafka.integration.KafkaServerTestHarness
import kafka.server.KafkaConfig
import kafka.utils.{ShutdownableThread, TestUtils}
import org.apache.kafka.clients.producer._
import org.apache.kafka.clients.producer.internals.ErrorLoggingCallback
import org.apache.kafka.common.KafkaException
import org.apache.kafka.common.errors.{InvalidTopicException, NotEnoughReplicasAfterAppendException, NotEnoughReplicasException}
import org.junit.Assert._
import org.junit.{After, Before, Test}
class ProducerFailureHandlingTest extends KafkaServerTestHarness {
private val producerBufferSize = 30000
private val serverMessageMaxBytes = producerBufferSize/2
val numServers = 2
val overridingProps = new Properties()
overridingProps.put(KafkaConfig.AutoCreateTopicsEnableProp, false.toString)
overridingProps.put(KafkaConfig.MessageMaxBytesProp, serverMessageMaxBytes.toString)
// Set a smaller value for the number of partitions for the offset commit topic (__consumer_offset topic)
// so that the creation of that topic/partition(s) and subsequent leader assignment doesn't take relatively long
overridingProps.put(KafkaConfig.OffsetsTopicPartitionsProp, 1.toString)
def generateConfigs() =
TestUtils.createBrokerConfigs(numServers, zkConnect, false).map(KafkaConfig.fromProps(_, overridingProps))
private var consumer1: SimpleConsumer = null
private var consumer2: SimpleConsumer = null
private var producer1: KafkaProducer[Array[Byte],Array[Byte]] = null
private var producer2: KafkaProducer[Array[Byte],Array[Byte]] = null
private var producer3: KafkaProducer[Array[Byte],Array[Byte]] = null
private var producer4: KafkaProducer[Array[Byte],Array[Byte]] = null
private val topic1 = "topic-1"
private val topic2 = "topic-2"
@Before
override def setUp() {
super.setUp()
producer1 = TestUtils.createNewProducer(brokerList, acks = 0, blockOnBufferFull = false, bufferSize = producerBufferSize)
producer2 = TestUtils.createNewProducer(brokerList, acks = 1, blockOnBufferFull = false, bufferSize = producerBufferSize)
producer3 = TestUtils.createNewProducer(brokerList, acks = -1, blockOnBufferFull = false, bufferSize = producerBufferSize)
}
@After
override def tearDown() {
if (producer1 != null) producer1.close
if (producer2 != null) producer2.close
if (producer3 != null) producer3.close
if (producer4 != null) producer4.close
super.tearDown()
}
/**
* With ack == 0 the future metadata will have no exceptions with offset -1
*/
@Test
def testTooLargeRecordWithAckZero() {
// create topic
TestUtils.createTopic(zkClient, topic1, 1, numServers, servers)
// send a too-large record
val record = new ProducerRecord[Array[Byte],Array[Byte]](topic1, null, "key".getBytes, new Array[Byte](serverMessageMaxBytes + 1))
assertEquals("Returned metadata should have offset -1", producer1.send(record).get.offset, -1L)
}
/**
* With ack == 1 the future metadata will throw ExecutionException caused by RecordTooLargeException
*/
@Test
def testTooLargeRecordWithAckOne() {
// create topic
TestUtils.createTopic(zkClient, topic1, 1, numServers, servers)
// send a too-large record
val record = new ProducerRecord[Array[Byte],Array[Byte]](topic1, null, "key".getBytes, new Array[Byte](serverMessageMaxBytes + 1))
intercept[ExecutionException] {
producer2.send(record).get
}
}
/**
* With non-exist-topic the future metadata should return ExecutionException caused by TimeoutException
*/
@Test
def testNonExistentTopic() {
// send a record with non-exist topic
val record = new ProducerRecord[Array[Byte],Array[Byte]](topic2, null, "key".getBytes, "value".getBytes)
intercept[ExecutionException] {
producer1.send(record).get
}
}
/**
* With incorrect broker-list the future metadata should return ExecutionException caused by TimeoutException
*
* TODO: other exceptions that can be thrown in ExecutionException:
* UnknownTopicOrPartitionException
* NotLeaderForPartitionException
* LeaderNotAvailableException
* CorruptRecordException
* TimeoutException
*/
@Test
def testWrongBrokerList() {
// create topic
TestUtils.createTopic(zkClient, topic1, 1, numServers, servers)
// producer with incorrect broker list
producer4 = TestUtils.createNewProducer("localhost:8686,localhost:4242", acks = 1, blockOnBufferFull = false, bufferSize = producerBufferSize)
// send a record with incorrect broker list
val record = new ProducerRecord[Array[Byte],Array[Byte]](topic1, null, "key".getBytes, "value".getBytes)
intercept[ExecutionException] {
producer4.send(record).get
}
}
/**
* 1. With ack=0, the future metadata should not be blocked.
* 2. With ack=1, the future metadata should block,
* and subsequent calls will eventually cause buffer full
*/
@Test
def testNoResponse() {
// create topic
TestUtils.createTopic(zkClient, topic1, 1, numServers, servers)
// first send a message to make sure the metadata is refreshed
val record1 = new ProducerRecord[Array[Byte],Array[Byte]](topic1, null, "key".getBytes, "value".getBytes)
producer1.send(record1).get
producer2.send(record1).get
// stop IO threads and request handling, but leave networking operational
// any requests should be accepted and queue up, but not handled
servers.foreach(server => server.requestHandlerPool.shutdown())
producer1.send(record1).get(5000, TimeUnit.MILLISECONDS)
intercept[TimeoutException] {
producer2.send(record1).get(5000, TimeUnit.MILLISECONDS)
}
// TODO: expose producer configs after creating them
// send enough messages to get buffer full
val tooManyRecords = 10
val msgSize = producerBufferSize / tooManyRecords
val value = new Array[Byte](msgSize)
new Random().nextBytes(value)
val record2 = new ProducerRecord[Array[Byte],Array[Byte]](topic1, null, "key".getBytes, value)
intercept[KafkaException] {
for (i <- 1 to tooManyRecords)
producer2.send(record2)
}
// do not close produce2 since it will block
// TODO: can we do better?
producer2 = null
}
/**
* The send call with invalid partition id should throw KafkaException caused by IllegalArgumentException
*/
@Test
def testInvalidPartition() {
// create topic
TestUtils.createTopic(zkClient, topic1, 1, numServers, servers)
// create a record with incorrect partition id, send should fail
val record = new ProducerRecord[Array[Byte],Array[Byte]](topic1, new Integer(1), "key".getBytes, "value".getBytes)
intercept[IllegalArgumentException] {
producer1.send(record)
}
intercept[IllegalArgumentException] {
producer2.send(record)
}
intercept[IllegalArgumentException] {
producer3.send(record)
}
}
/**
* The send call after producer closed should throw KafkaException cased by IllegalStateException
*/
@Test
def testSendAfterClosed() {
// create topic
TestUtils.createTopic(zkClient, topic1, 1, numServers, servers)
val record = new ProducerRecord[Array[Byte],Array[Byte]](topic1, null, "key".getBytes, "value".getBytes)
// first send a message to make sure the metadata is refreshed
producer1.send(record).get
producer2.send(record).get
producer3.send(record).get
intercept[IllegalStateException] {
producer1.close
producer1.send(record)
}
intercept[IllegalStateException] {
producer2.close
producer2.send(record)
}
intercept[IllegalStateException] {
producer3.close
producer3.send(record)
}
// re-close producer is fine
}
@Test
def testCannotSendToInternalTopic() {
val thrown = intercept[ExecutionException] {
producer2.send(new ProducerRecord[Array[Byte],Array[Byte]](Topic.InternalTopics.head, "test".getBytes, "test".getBytes)).get
}
assertTrue("Unexpected exception while sending to an invalid topic " + thrown.getCause, thrown.getCause.isInstanceOf[InvalidTopicException])
}
@Test
def testNotEnoughReplicas() {
val topicName = "minisrtest"
val topicProps = new Properties()
topicProps.put("min.insync.replicas",(numServers+1).toString)
TestUtils.createTopic(zkClient, topicName, 1, numServers, servers, topicProps)
val record = new ProducerRecord[Array[Byte],Array[Byte]](topicName, null, "key".getBytes, "value".getBytes)
try {
producer3.send(record).get
fail("Expected exception when producing to topic with fewer brokers than min.insync.replicas")
} catch {
case e: ExecutionException =>
if (!e.getCause.isInstanceOf[NotEnoughReplicasException]) {
fail("Expected NotEnoughReplicasException when producing to topic with fewer brokers than min.insync.replicas")
}
}
}
@Test
def testNotEnoughReplicasAfterBrokerShutdown() {
val topicName = "minisrtest2"
val topicProps = new Properties()
topicProps.put("min.insync.replicas",numServers.toString)
TestUtils.createTopic(zkClient, topicName, 1, numServers, servers,topicProps)
val record = new ProducerRecord[Array[Byte],Array[Byte]](topicName, null, "key".getBytes, "value".getBytes)
// this should work with all brokers up and running
producer3.send(record).get
// shut down one broker
servers.head.shutdown()
servers.head.awaitShutdown()
try {
producer3.send(record).get
fail("Expected exception when producing to topic with fewer brokers than min.insync.replicas")
} catch {
case e: ExecutionException =>
if (!e.getCause.isInstanceOf[NotEnoughReplicasException] &&
!e.getCause.isInstanceOf[NotEnoughReplicasAfterAppendException]) {
fail("Expected NotEnoughReplicasException or NotEnoughReplicasAfterAppendException when producing to topic " +
"with fewer brokers than min.insync.replicas, but saw " + e.getCause)
}
}
// restart the server
servers.head.startup()
}
private class ProducerScheduler extends ShutdownableThread("daemon-producer", false)
{
val numRecords = 1000
var sent = 0
var failed = false
val producer = TestUtils.createNewProducer(brokerList, bufferSize = producerBufferSize, retries = 10)
override def doWork(): Unit = {
val responses =
for (i <- sent+1 to sent+numRecords)
yield producer.send(new ProducerRecord[Array[Byte],Array[Byte]](topic1, null, null, i.toString.getBytes),
new ErrorLoggingCallback(topic1, null, null, true))
val futures = responses.toList
try {
futures.map(_.get)
sent += numRecords
} catch {
case e : Exception => failed = true
}
}
override def shutdown(){
super.shutdown()
producer.close
}
}
}
| usakey/kafka | core/src/test/scala/integration/kafka/api/ProducerFailureHandlingTest.scala | Scala | apache-2.0 | 11,949 |
/*
* Copyright 2013 - 2020 Outworkers Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.outworkers.phantom.builder.primitives
import com.outworkers.phantom.PhantomSuite
import com.outworkers.phantom.dsl._
import com.outworkers.phantom.tables.Recipe
import com.outworkers.phantom.tables.bugs.NpeRecipe
import com.outworkers.util.samplers._
class PrimitiveColumnRecipesTest extends PhantomSuite {
override def beforeAll(): Unit = {
super.beforeAll()
database.recipes.createSchema()
database.collectionNpeTable.createSchema()
}
it should "insert a new record in the recipes table and retrieve it" in {
val sample = gen[NpeRecipe]
val chain = for {
_ <- database.collectionNpeTable.storeRecord(sample)
res <- database.collectionNpeTable.select.where(_.id eqs sample.id).one()
} yield res
whenReady(chain) { res =>
res shouldBe defined
res.value shouldEqual sample
}
}
it should "update the author of a recipe" in {
val sample = gen[Recipe]
val newAuthor = Some(gen[ShortString].value)
val chain = for {
_ <- database.recipes.storeRecord(sample)
res <- database.recipes.select.where(_.url eqs sample.url).one()
_ <- database.recipes
.update.where(_.url eqs sample.url)
.modify(_.description setTo newAuthor)
.future()
res2 <- database.recipes.select.where(_.url eqs sample.url).one()
} yield (res, res2)
whenReady(chain) { case (res, res2) =>
res shouldBe defined
res.value shouldEqual sample
res2 shouldBe defined
res2.value shouldEqual sample.copy(description = newAuthor)
}
}
it should "retrieve an empty ingredients set" in {
val sample = gen[NpeRecipe]
val chain = for {
_ <- database.collectionNpeTable.insert()
.value(_.id, sample.id)
.value(_.name, sample.name)
.value(_.title, sample.title)
.value(_.author, sample.author)
.value(_.description, sample.description)
.value(_.timestamp, sample.timestamp)
.future()
res <- database.collectionNpeTable.findRecipeById(sample.id)
} yield res
whenReady(chain) { res =>
res shouldBe defined
res.value.ingredients shouldBe empty
res.value.props shouldBe empty
}
}
} | outworkers/phantom | phantom-dsl/src/test/scala/com/outworkers/phantom/builder/primitives/PrimitiveColumnRecipesTest.scala | Scala | apache-2.0 | 2,827 |
package filter
import akka.actor.{Actor, ActorRef, Props}
import akka.event.Logging
import kuger.loganalyzer.core.api.Filter
import messages.{InputDrainedEvent, LogStatementEvent, LoganalyzerEvent}
object FilterActor {
def props(filter: Filter, downstream: Array[ActorRef]): Props = {
Props(new FilterActor(filter, downstream))
}
}
class FilterActor(filter: Filter, downstream: Array[ActorRef]) extends Actor {
val log = Logging(context.system, this)
override def receive = {
case LogStatementEvent(message) => {
val doFilter = filter.filter(message)
if (!doFilter) {
passMessage(LogStatementEvent(message))
}
}
case InputDrainedEvent(input) =>
log.info("Drained event received. passing on..")
passMessage(InputDrainedEvent(input))
case default =>
log.warning("Could not handle message: " + default)
}
def passMessage(message: LoganalyzerEvent) = {
downstream foreach (_ ! message)
}
}
| MichaelKuger/LogAnalyzer | src/main/scala/filter/FilterActor.scala | Scala | apache-2.0 | 974 |
/*
* Copyright (c) 2014-2020 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.execution.internal
/** An abstraction over the `ForkJoinPool` implementation, meant
* to target multiple Scala versions.
*/
package object forkJoin {
private[monix] type ForkJoinPool =
scala.concurrent.forkjoin.ForkJoinPool
private[monix] type ForkJoinWorkerThreadFactory =
scala.concurrent.forkjoin.ForkJoinPool.ForkJoinWorkerThreadFactory
private[monix] type ForkJoinWorkerThread =
scala.concurrent.forkjoin.ForkJoinWorkerThread
private[monix] type ManagedBlocker =
scala.concurrent.forkjoin.ForkJoinPool.ManagedBlocker
private[monix] type ForkJoinTask[V] =
scala.concurrent.forkjoin.ForkJoinTask[V]
private[monix] object ForkJoinPool {
def managedBlock(blocker: ManagedBlocker): Unit =
scala.concurrent.forkjoin.ForkJoinPool.managedBlock(blocker)
}
private[monix] def defaultForkJoinWorkerThreadFactory: ForkJoinWorkerThreadFactory =
scala.concurrent.forkjoin.ForkJoinPool.defaultForkJoinWorkerThreadFactory
}
| alexandru/monifu | monix-execution/jvm/src/main/scala_2.11/monix/execution/internal/forkJoin/package.scala | Scala | apache-2.0 | 1,659 |
/***********************************************************************
* Copyright (c) 2013-2018 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.kafka.tools
import com.beust.jcommander.Parameter
import org.locationtech.geomesa.kafka.data.KafkaDataStoreFactory
/**
* Shared Kafka-specific command line parameters
*/
trait KafkaDataStoreParams {
@Parameter(names = Array("-b", "--brokers"), description = "Brokers (host:port, comma separated)", required = true)
var brokers: String = _
@Parameter(names = Array("-z", "--zookeepers"), description = "Zookeepers (host[:port], comma separated)", required = true)
var zookeepers: String = _
@Parameter(names = Array("-p", "--zkpath"), description = "Zookeeper path where feature schemas are saved")
var zkPath: String = KafkaDataStoreFactory.DefaultZkPath
def numConsumers: Int
def replication: Int
def partitions: Int
def fromBeginning: Boolean
}
trait ProducerDataStoreParams extends KafkaDataStoreParams {
@Parameter(names = Array("--replication"), description = "Replication factor for Kafka topic")
var replication: Int = 1 // note: can't use override modifier since it's a var
@Parameter(names = Array("--partitions"), description = "Number of partitions for the Kafka topic")
var partitions: Int = 1 // note: can't use override modifier since it's a var
override val numConsumers: Int = 0
override val fromBeginning: Boolean = false
}
trait ConsumerDataStoreParams extends KafkaDataStoreParams {
@Parameter(names = Array("--num-consumers"), description = "Number of consumer threads used for reading from Kafka")
var numConsumers: Int = 1 // note: can't use override modifier since it's a var
// TODO support from oldest+n, oldest+t, newest-n, newest-t, time=t, offset=o
@Parameter(names = Array("--from-beginning"), description = "Consume from the beginning or end of the topic")
var fromBeginning: Boolean = false
override val replication: Int = 1
override val partitions: Int = 1
}
trait StatusDataStoreParams extends KafkaDataStoreParams {
override val numConsumers: Int = 0
override val replication: Int = 1
override val partitions: Int = 1
override val fromBeginning: Boolean = false
} | ddseapy/geomesa | geomesa-kafka/geomesa-kafka-tools/src/main/scala/org/locationtech/geomesa/kafka/tools/KafkaParams.scala | Scala | apache-2.0 | 2,566 |
package org.apache.spark
import org.apache.spark.serializer._
import org.apache.spark.util._
import scala.reflect._
object ExposedUtils {
def clean[F <: AnyRef](f: F, checkSerializable: Boolean = true): F = {
ClosureCleaner.clean(f, checkSerializable)
f
}
def clone[T: ClassTag](value: T, sc: SparkContext): T =
clone(value, sc.env.serializer.newInstance())
def clone[T: ClassTag](value: T, serializer: SerializerInstance): T =
Utils.clone(value, serializer)
}
| hail-is/hail | hail/src/main/scala/org/apache/spark/ExposedUtils.scala | Scala | mit | 490 |
/** Copyright 2015 TappingStone, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.prediction.workflow
import java.io.PrintWriter
import java.io.Serializable
import java.io.StringWriter
import java.util.concurrent.TimeUnit
import akka.actor._
import akka.event.Logging
import akka.io.IO
import akka.pattern.ask
import akka.util.Timeout
import com.github.nscala_time.time.Imports.DateTime
import com.twitter.bijection.Injection
import com.twitter.chill.KryoBase
import com.twitter.chill.KryoInjection
import com.twitter.chill.ScalaKryoInstantiator
import de.javakaffee.kryoserializers.SynchronizedCollectionsSerializer
import grizzled.slf4j.Logging
import io.prediction.controller.Engine
import io.prediction.controller.Params
import io.prediction.controller.Utils
import io.prediction.controller.WithPrId
import io.prediction.core.BaseAlgorithm
import io.prediction.core.BaseServing
import io.prediction.core.Doer
import io.prediction.data.storage.EngineInstance
import io.prediction.data.storage.EngineManifest
import io.prediction.data.storage.Storage
import io.prediction.workflow.JsonExtractorOption.JsonExtractorOption
import org.json4s._
import org.json4s.native.JsonMethods._
import org.json4s.native.Serialization.write
import spray.can.Http
import spray.http.MediaTypes._
import spray.http._
import spray.httpx.Json4sSupport
import spray.routing._
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
import scala.concurrent.duration._
import scala.concurrent.future
import scala.language.existentials
import scala.util.Failure
import scala.util.Random
import scala.util.Success
class KryoInstantiator(classLoader: ClassLoader) extends ScalaKryoInstantiator {
override def newKryo(): KryoBase = {
val kryo = super.newKryo()
kryo.setClassLoader(classLoader)
SynchronizedCollectionsSerializer.registerSerializers(kryo)
kryo
}
}
object KryoInstantiator extends Serializable {
def newKryoInjection : Injection[Any, Array[Byte]] = {
val kryoInstantiator = new KryoInstantiator(getClass.getClassLoader)
KryoInjection.instance(kryoInstantiator)
}
}
case class ServerConfig(
batch: String = "",
engineInstanceId: String = "",
engineId: Option[String] = None,
engineVersion: Option[String] = None,
engineVariant: String = "",
env: Option[String] = None,
ip: String = "0.0.0.0",
port: Int = 8000,
feedback: Boolean = false,
eventServerIp: String = "0.0.0.0",
eventServerPort: Int = 7070,
accessKey: Option[String] = None,
logUrl: Option[String] = None,
logPrefix: Option[String] = None,
logFile: Option[String] = None,
verbose: Boolean = false,
debug: Boolean = false,
jsonExtractor: JsonExtractorOption = JsonExtractorOption.Both)
case class StartServer()
case class BindServer()
case class StopServer()
case class ReloadServer()
case class UpgradeCheck()
object CreateServer extends Logging {
val actorSystem = ActorSystem("pio-server")
val engineInstances = Storage.getMetaDataEngineInstances
val engineManifests = Storage.getMetaDataEngineManifests
val modeldata = Storage.getModelDataModels
def main(args: Array[String]): Unit = {
val parser = new scopt.OptionParser[ServerConfig]("CreateServer") {
opt[String]("batch") action { (x, c) =>
c.copy(batch = x)
} text("Batch label of the deployment.")
opt[String]("engineId") action { (x, c) =>
c.copy(engineId = Some(x))
} text("Engine ID.")
opt[String]("engineVersion") action { (x, c) =>
c.copy(engineVersion = Some(x))
} text("Engine version.")
opt[String]("engine-variant") required() action { (x, c) =>
c.copy(engineVariant = x)
} text("Engine variant JSON.")
opt[String]("ip") action { (x, c) =>
c.copy(ip = x)
}
opt[String]("env") action { (x, c) =>
c.copy(env = Some(x))
} text("Comma-separated list of environmental variables (in 'FOO=BAR' " +
"format) to pass to the Spark execution environment.")
opt[Int]("port") action { (x, c) =>
c.copy(port = x)
} text("Port to bind to (default: 8000).")
opt[String]("engineInstanceId") required() action { (x, c) =>
c.copy(engineInstanceId = x)
} text("Engine instance ID.")
opt[Unit]("feedback") action { (_, c) =>
c.copy(feedback = true)
} text("Enable feedback loop to event server.")
opt[String]("event-server-ip") action { (x, c) =>
c.copy(eventServerIp = x)
}
opt[Int]("event-server-port") action { (x, c) =>
c.copy(eventServerPort = x)
} text("Event server port. Default: 7070")
opt[String]("accesskey") action { (x, c) =>
c.copy(accessKey = Some(x))
} text("Event server access key.")
opt[String]("log-url") action { (x, c) =>
c.copy(logUrl = Some(x))
}
opt[String]("log-prefix") action { (x, c) =>
c.copy(logPrefix = Some(x))
}
opt[String]("log-file") action { (x, c) =>
c.copy(logFile = Some(x))
}
opt[Unit]("verbose") action { (x, c) =>
c.copy(verbose = true)
} text("Enable verbose output.")
opt[Unit]("debug") action { (x, c) =>
c.copy(debug = true)
} text("Enable debug output.")
opt[String]("json-extractor") action { (x, c) =>
c.copy(jsonExtractor = JsonExtractorOption.withName(x))
}
}
parser.parse(args, ServerConfig()) map { sc =>
WorkflowUtils.modifyLogging(sc.verbose)
engineInstances.get(sc.engineInstanceId) map { engineInstance =>
val engineId = sc.engineId.getOrElse(engineInstance.engineId)
val engineVersion = sc.engineVersion.getOrElse(
engineInstance.engineVersion)
engineManifests.get(engineId, engineVersion) map { manifest =>
val engineFactoryName = engineInstance.engineFactory
val upgrade = actorSystem.actorOf(Props(
classOf[UpgradeActor],
engineFactoryName))
actorSystem.scheduler.schedule(
0.seconds,
1.days,
upgrade,
UpgradeCheck())
val master = actorSystem.actorOf(Props(
classOf[MasterActor],
sc,
engineInstance,
engineFactoryName,
manifest),
"master")
implicit val timeout = Timeout(5.seconds)
master ? StartServer()
actorSystem.awaitTermination
} getOrElse {
error(s"Invalid engine ID or version. Aborting server.")
}
} getOrElse {
error(s"Invalid engine instance ID. Aborting server.")
}
}
}
def createServerActorWithEngine[TD, EIN, PD, Q, P, A](
sc: ServerConfig,
engineInstance: EngineInstance,
engine: Engine[TD, EIN, PD, Q, P, A],
engineLanguage: EngineLanguage.Value,
manifest: EngineManifest): ActorRef = {
val engineParams = engine.engineInstanceToEngineParams(engineInstance, sc.jsonExtractor)
val kryo = KryoInstantiator.newKryoInjection
val modelsFromEngineInstance =
kryo.invert(modeldata.get(engineInstance.id).get.models).get.
asInstanceOf[Seq[Any]]
val batch = if (engineInstance.batch.nonEmpty) {
s"${engineInstance.engineFactory} (${engineInstance.batch})"
} else {
engineInstance.engineFactory
}
val sparkContext = WorkflowContext(
batch = batch,
executorEnv = engineInstance.env,
mode = "Serving",
sparkEnv = engineInstance.sparkConf)
val models = engine.prepareDeploy(
sparkContext,
engineParams,
engineInstance.id,
modelsFromEngineInstance,
params = WorkflowParams()
)
val algorithms = engineParams.algorithmParamsList.map { case (n, p) =>
Doer(engine.algorithmClassMap(n), p)
}
val servingParamsWithName = engineParams.servingParams
val serving = Doer(engine.servingClassMap(servingParamsWithName._1),
servingParamsWithName._2)
actorSystem.actorOf(
Props(
classOf[ServerActor[Q, P]],
sc,
engineInstance,
engine,
engineLanguage,
manifest,
engineParams.dataSourceParams._2,
engineParams.preparatorParams._2,
algorithms,
engineParams.algorithmParamsList.map(_._2),
models,
serving,
engineParams.servingParams._2))
}
}
class UpgradeActor(engineClass: String) extends Actor {
val log = Logging(context.system, this)
implicit val system = context.system
def receive: Actor.Receive = {
case x: UpgradeCheck =>
WorkflowUtils.checkUpgrade("deployment", engineClass)
}
}
class MasterActor(
sc: ServerConfig,
engineInstance: EngineInstance,
engineFactoryName: String,
manifest: EngineManifest) extends Actor {
val log = Logging(context.system, this)
implicit val system = context.system
var sprayHttpListener: Option[ActorRef] = None
var currentServerActor: Option[ActorRef] = None
var retry = 3
def undeploy(ip: String, port: Int): Unit = {
val serverUrl = s"http://${ip}:${port}"
log.info(
s"Undeploying any existing engine instance at $serverUrl")
try {
val code = scalaj.http.Http(s"$serverUrl/stop").asString.code
code match {
case 200 => Unit
case 404 => log.error(
s"Another process is using $serverUrl. Unable to undeploy.")
case _ => log.error(
s"Another process is using $serverUrl, or an existing " +
s"engine server is not responding properly (HTTP $code). " +
"Unable to undeploy.")
}
} catch {
case e: java.net.ConnectException =>
log.warning(s"Nothing at $serverUrl")
case _: Throwable =>
log.error("Another process might be occupying " +
s"$ip:$port. Unable to undeploy.")
}
}
def receive: Actor.Receive = {
case x: StartServer =>
val actor = createServerActor(
sc,
engineInstance,
engineFactoryName,
manifest)
currentServerActor = Some(actor)
undeploy(sc.ip, sc.port)
self ! BindServer()
case x: BindServer =>
currentServerActor map { actor =>
IO(Http) ! Http.Bind(actor, interface = sc.ip, port = sc.port)
} getOrElse {
log.error("Cannot bind a non-existing server backend.")
}
case x: StopServer =>
log.info(s"Stop server command received.")
sprayHttpListener.map { l =>
log.info("Server is shutting down.")
l ! Http.Unbind(5.seconds)
system.shutdown
} getOrElse {
log.warning("No active server is running.")
}
case x: ReloadServer =>
log.info("Reload server command received.")
val latestEngineInstance =
CreateServer.engineInstances.getLatestCompleted(
manifest.id,
manifest.version,
engineInstance.engineVariant)
latestEngineInstance map { lr =>
val actor = createServerActor(sc, lr, engineFactoryName, manifest)
sprayHttpListener.map { l =>
l ! Http.Unbind(5.seconds)
IO(Http) ! Http.Bind(actor, interface = sc.ip, port = sc.port)
currentServerActor.get ! Kill
currentServerActor = Some(actor)
} getOrElse {
log.warning("No active server is running. Abort reloading.")
}
} getOrElse {
log.warning(
s"No latest completed engine instance for ${manifest.id} " +
s"${manifest.version}. Abort reloading.")
}
case x: Http.Bound =>
val serverUrl = s"http://${sc.ip}:${sc.port}"
log.info(s"Engine is deployed and running. Engine API is live at ${serverUrl}.")
sprayHttpListener = Some(sender)
case x: Http.CommandFailed =>
if (retry > 0) {
retry -= 1
log.error(s"Bind failed. Retrying... ($retry more trial(s))")
context.system.scheduler.scheduleOnce(1.seconds) {
self ! BindServer()
}
} else {
log.error("Bind failed. Shutting down.")
system.shutdown
}
}
def createServerActor(
sc: ServerConfig,
engineInstance: EngineInstance,
engineFactoryName: String,
manifest: EngineManifest): ActorRef = {
val (engineLanguage, engineFactory) =
WorkflowUtils.getEngine(engineFactoryName, getClass.getClassLoader)
val engine = engineFactory()
// EngineFactory return a base engine, which may not be deployable.
if (!engine.isInstanceOf[Engine[_,_,_,_,_,_]]) {
throw new NoSuchMethodException(s"Engine $engine is not deployable")
}
val deployableEngine = engine.asInstanceOf[Engine[_,_,_,_,_,_]]
CreateServer.createServerActorWithEngine(
sc,
engineInstance,
// engine,
deployableEngine,
engineLanguage,
manifest)
}
}
class ServerActor[Q, P](
val args: ServerConfig,
val engineInstance: EngineInstance,
val engine: Engine[_, _, _, Q, P, _],
val engineLanguage: EngineLanguage.Value,
val manifest: EngineManifest,
val dataSourceParams: Params,
val preparatorParams: Params,
val algorithms: Seq[BaseAlgorithm[_, _, Q, P]],
val algorithmsParams: Seq[Params],
val models: Seq[Any],
val serving: BaseServing[Q, P],
val servingParams: Params) extends Actor with HttpService {
val serverStartTime = DateTime.now
val log = Logging(context.system, this)
var requestCount: Int = 0
var avgServingSec: Double = 0.0
var lastServingSec: Double = 0.0
/** The following is required by HttpService */
def actorRefFactory: ActorContext = context
implicit val timeout = Timeout(5, TimeUnit.SECONDS)
val pluginsActorRef =
context.actorOf(Props(classOf[PluginsActor], args.engineVariant), "PluginsActor")
val pluginContext = EngineServerPluginContext(log, args.engineVariant)
def receive: Actor.Receive = runRoute(myRoute)
val feedbackEnabled = if (args.feedback) {
if (args.accessKey.isEmpty) {
log.error("Feedback loop cannot be enabled because accessKey is empty.")
false
} else {
true
}
} else false
def remoteLog(logUrl: String, logPrefix: String, message: String): Unit = {
implicit val formats = Utils.json4sDefaultFormats
try {
scalaj.http.Http(logUrl).postData(
logPrefix + write(Map(
"engineInstance" -> engineInstance,
"message" -> message))).asString
} catch {
case e: Throwable =>
log.error(s"Unable to send remote log: ${e.getMessage}")
}
}
def getStackTraceString(e: Throwable): String = {
val writer = new StringWriter()
val printWriter = new PrintWriter(writer)
e.printStackTrace(printWriter)
writer.toString
}
val myRoute =
path("") {
get {
respondWithMediaType(`text/html`) {
detach() {
complete {
html.index(
args,
manifest,
engineInstance,
algorithms.map(_.toString),
algorithmsParams.map(_.toString),
models.map(_.toString),
dataSourceParams.toString,
preparatorParams.toString,
servingParams.toString,
serverStartTime,
feedbackEnabled,
args.eventServerIp,
args.eventServerPort,
requestCount,
avgServingSec,
lastServingSec
).toString
}
}
}
}
} ~
path("queries.json") {
post {
detach() {
entity(as[String]) { queryString =>
try {
val servingStartTime = DateTime.now
val jsonExtractorOption = args.jsonExtractor
val queryTime = DateTime.now
// Extract Query from Json
val query = JsonExtractor.extract(
jsonExtractorOption,
queryString,
algorithms.head.queryClass,
algorithms.head.querySerializer,
algorithms.head.gsonTypeAdapterFactories
)
val queryJValue = JsonExtractor.toJValue(
jsonExtractorOption,
query,
algorithms.head.querySerializer,
algorithms.head.gsonTypeAdapterFactories)
// Deploy logic. First call Serving.supplement, then Algo.predict,
// finally Serving.serve.
val supplementedQuery = serving.supplementBase(query)
// TODO: Parallelize the following.
val predictions = algorithms.zipWithIndex.map { case (a, ai) =>
a.predictBase(models(ai), supplementedQuery)
}
// Notice that it is by design to call Serving.serve with the
// *original* query.
val prediction = serving.serveBase(query, predictions)
val predictionJValue = JsonExtractor.toJValue(
jsonExtractorOption,
prediction,
algorithms.head.querySerializer,
algorithms.head.gsonTypeAdapterFactories)
/** Handle feedback to Event Server
* Send the following back to the Event Server
* - appId
* - engineInstanceId
* - query
* - prediction
* - prId
*/
val result = if (feedbackEnabled) {
implicit val formats =
algorithms.headOption map { alg =>
alg.querySerializer
} getOrElse {
Utils.json4sDefaultFormats
}
// val genPrId = Random.alphanumeric.take(64).mkString
def genPrId: String = Random.alphanumeric.take(64).mkString
val newPrId = prediction match {
case id: WithPrId =>
val org = id.prId
if (org.isEmpty) genPrId else org
case _ => genPrId
}
// also save Query's prId as prId of this pio_pr predict events
val queryPrId =
query match {
case id: WithPrId =>
Map("prId" -> id.prId)
case _ =>
Map()
}
val data = Map(
// "appId" -> dataSourceParams.asInstanceOf[ParamsWithAppId].appId,
"event" -> "predict",
"eventTime" -> queryTime.toString(),
"entityType" -> "pio_pr", // prediction result
"entityId" -> newPrId,
"properties" -> Map(
"engineInstanceId" -> engineInstance.id,
"query" -> query,
"prediction" -> prediction)) ++ queryPrId
// At this point args.accessKey should be Some(String).
val accessKey = args.accessKey.getOrElse("")
val f: Future[Int] = future {
scalaj.http.Http(
s"http://${args.eventServerIp}:${args.eventServerPort}/" +
s"events.json?accessKey=$accessKey").postData(
write(data)).header(
"content-type", "application/json").asString.code
}
f onComplete {
case Success(code) => {
if (code != 201) {
log.error(s"Feedback event failed. Status code: $code."
+ s"Data: ${write(data)}.")
}
}
case Failure(t) => {
log.error(s"Feedback event failed: ${t.getMessage}") }
}
// overwrite prId in predictedResult
// - if it is WithPrId,
// then overwrite with new prId
// - if it is not WithPrId, no prId injection
if (prediction.isInstanceOf[WithPrId]) {
predictionJValue merge parse(s"""{"prId" : "$newPrId"}""")
} else {
predictionJValue
}
} else predictionJValue
val pluginResult =
pluginContext.outputBlockers.values.foldLeft(result) { case (r, p) =>
p.process(engineInstance, queryJValue, r, pluginContext)
}
// Bookkeeping
val servingEndTime = DateTime.now
lastServingSec =
(servingEndTime.getMillis - servingStartTime.getMillis) / 1000.0
avgServingSec =
((avgServingSec * requestCount) + lastServingSec) /
(requestCount + 1)
requestCount += 1
respondWithMediaType(`application/json`) {
complete(compact(render(pluginResult)))
}
} catch {
case e: MappingException =>
log.error(
s"Query '$queryString' is invalid. Reason: ${e.getMessage}")
args.logUrl map { url =>
remoteLog(
url,
args.logPrefix.getOrElse(""),
s"Query:\\n$queryString\\n\\nStack Trace:\\n" +
s"${getStackTraceString(e)}\\n\\n")
}
complete(StatusCodes.BadRequest, e.getMessage)
case e: Throwable =>
val msg = s"Query:\\n$queryString\\n\\nStack Trace:\\n" +
s"${getStackTraceString(e)}\\n\\n"
log.error(msg)
args.logUrl map { url =>
remoteLog(
url,
args.logPrefix.getOrElse(""),
msg)
}
complete(StatusCodes.InternalServerError, msg)
}
}
}
}
} ~
path("reload") {
get {
complete {
context.actorSelection("/user/master") ! ReloadServer()
"Reloading..."
}
}
} ~
path("stop") {
get {
complete {
context.system.scheduler.scheduleOnce(1.seconds) {
context.actorSelection("/user/master") ! StopServer()
}
"Shutting down..."
}
}
} ~
pathPrefix("assets") {
getFromResourceDirectory("assets")
} ~
path("plugins.json") {
import EngineServerJson4sSupport._
get {
respondWithMediaType(MediaTypes.`application/json`) {
complete {
Map("plugins" -> Map(
"outputblockers" -> pluginContext.outputBlockers.map { case (n, p) =>
n -> Map(
"name" -> p.pluginName,
"description" -> p.pluginDescription,
"class" -> p.getClass.getName,
"params" -> pluginContext.pluginParams(p.pluginName))
},
"outputsniffers" -> pluginContext.outputSniffers.map { case (n, p) =>
n -> Map(
"name" -> p.pluginName,
"description" -> p.pluginDescription,
"class" -> p.getClass.getName,
"params" -> pluginContext.pluginParams(p.pluginName))
}
))
}
}
}
} ~
path("plugins" / Segments) { segments =>
import EngineServerJson4sSupport._
get {
respondWithMediaType(MediaTypes.`application/json`) {
complete {
val pluginArgs = segments.drop(2)
val pluginType = segments(0)
val pluginName = segments(1)
pluginType match {
case EngineServerPlugin.outputSniffer =>
pluginsActorRef ? PluginsActor.HandleREST(
pluginName = pluginName,
pluginArgs = pluginArgs) map {
_.asInstanceOf[String]
}
}
}
}
}
}
}
object EngineServerJson4sSupport extends Json4sSupport {
implicit def json4sFormats: Formats = DefaultFormats
}
| ch33hau/PredictionIO | core/src/main/scala/io/prediction/workflow/CreateServer.scala | Scala | apache-2.0 | 24,736 |
/**
* Licensed to Big Data Genomics (BDG) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The BDG licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.bdgenomics.adam.rdd.read
import org.seqdoop.hadoop_bam.{ SAMRecordWritable, KeyIgnoringAnySAMOutputFormat, SAMFormat }
import htsjdk.samtools.SAMFileHeader
import org.apache.spark.rdd.InstrumentedOutputFormat
import org.bdgenomics.adam.instrumentation.Timers
import org.apache.hadoop.mapreduce.OutputFormat
object ADAMSAMOutputFormat extends Serializable {
private[read] var header: Option[SAMFileHeader] = None
/**
* Attaches a header to the ADAMSAMOutputFormat Hadoop writer. If a header has previously
* been attached, the header must be cleared first.
*
* @throws Exception Exception thrown if a SAM header has previously been attached, and not cleared.
*
* @param samHeader Header to attach.
*
* @see clearHeader
*/
def addHeader(samHeader: SAMFileHeader) {
assert(header.isEmpty, "Cannot attach a new SAM header without first clearing the header.")
header = Some(samHeader)
}
/**
* Clears the attached header.
*
* @see addHeader
*/
def clearHeader() {
header = None
}
/**
* Returns the current header.
*
* @return Current SAM header.
*/
private[read] def getHeader: SAMFileHeader = {
assert(header.isDefined, "Cannot return header if not attached.")
header.get
}
}
class ADAMSAMOutputFormat[K]
extends KeyIgnoringAnySAMOutputFormat[K](SAMFormat.valueOf("SAM")) with Serializable {
setSAMHeader(ADAMSAMOutputFormat.getHeader)
}
class InstrumentedADAMSAMOutputFormat[K] extends InstrumentedOutputFormat[K, org.seqdoop.hadoop_bam.SAMRecordWritable] {
override def timerName(): String = Timers.WriteSAMRecord.timerName
override def outputFormatClass(): Class[_ <: OutputFormat[K, SAMRecordWritable]] = classOf[ADAMSAMOutputFormat[K]]
}
| VinACE/adam | adam-core/src/main/scala/org/bdgenomics/adam/rdd/read/ADAMSAMOutputFormat.scala | Scala | apache-2.0 | 2,554 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.