code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1 value | license stringclasses 15 values | size int64 5 1M |
|---|---|---|---|---|---|
// See LICENSE for license details.
package sifive.blocks.devices.spi
import Chisel.{defaultCompileOptions => _, _}
import freechips.rocketchip.util.CompileOptions.NotStrictInferReset
import freechips.rocketchip.config.Parameters
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.regmapper._
import freechips.rocketchip.tilelink._
import freechips.rocketchip.interrupts._
import freechips.rocketchip.subsystem._
import freechips.rocketchip.util.HeterogeneousBag
import sifive.blocks.util.{NonBlockingEnqueue, NonBlockingDequeue}
import freechips.rocketchip.diplomaticobjectmodel.model.{OMComponent, OMRegister}
import freechips.rocketchip.diplomaticobjectmodel.logicaltree.{LogicalModuleTree, LogicalTreeNode}
import freechips.rocketchip.diplomaticobjectmodel.DiplomaticObjectModelAddressing
import sifive.blocks.util._
trait SPIParamsBase {
val rAddress: BigInt
val rSize: BigInt
val rxDepth: Int
val txDepth: Int
val csWidth: Int
val frameBits: Int
val delayBits: Int
val divisorBits: Int
val fineDelayBits: Int
val sampleDelayBits: Int
val defaultSampleDel:Int
val oeDisableDummy: Boolean
lazy val csIdBits = log2Up(csWidth)
lazy val lengthBits = log2Floor(frameBits) + 1
lazy val countBits = math.max(lengthBits, delayBits)
lazy val txDepthBits = log2Floor(txDepth) + 1
lazy val rxDepthBits = log2Floor(rxDepth) + 1
}
case class SPIParams(
rAddress: BigInt,
rSize: BigInt = 0x1000,
rxDepth: Int = 8,
txDepth: Int = 8,
csWidth: Int = 1,
frameBits: Int = 8,
delayBits: Int = 8,
divisorBits: Int = 12,
fineDelayBits: Int = 0,
sampleDelayBits: Int = 5,
defaultSampleDel: Int = 3,
oeDisableDummy: Boolean = false
)
extends SPIParamsBase with DeviceParams {
require(frameBits >= 4)
require((fineDelayBits == 0) | (fineDelayBits == 5), s"Require fine delay bits to be 0 or 5 and not $fineDelayBits")
require(sampleDelayBits >= 0)
require(defaultSampleDel >= 0)
require(!oeDisableDummy)
}
class SPITopModule(c: SPIParamsBase, outer: TLSPIBase)
extends LazyModuleImp(outer) {
val ctrl = Reg(init = SPIControl.init(c))
val fifo = Module(new SPIFIFO(c))
val mac = Module(new SPIMedia(c))
outer.port <> mac.io.port
fifo.io.ctrl.fmt := ctrl.fmt
fifo.io.ctrl.cs <> ctrl.cs
fifo.io.ctrl.wm := ctrl.wm
mac.io.ctrl.sck := ctrl.sck
mac.io.ctrl.extradel := ctrl.extradel
mac.io.ctrl.sampledel := ctrl.sampledel
mac.io.ctrl.dla := ctrl.dla
mac.io.ctrl.cs <> ctrl.cs
val ie = Reg(init = new SPIInterrupts().fromBits(Bits(0)))
val ip = fifo.io.ip
outer.interrupts(0) := (ip.txwm && ie.txwm) || (ip.rxwm && ie.rxwm)
val regmapBase = Seq(
SPICRs.sckdiv -> Seq(RegField(c.divisorBits, ctrl.sck.div,
RegFieldDesc("sckdiv", "Serial clock divisor", reset=Some(3)))),
SPICRs.sckmode -> RegFieldGroup("sckmode", Some("Serial clock mode"), Seq(
RegField(1, ctrl.sck.pha,
RegFieldDesc("sckmode_pha", "Serial clock phase", reset=Some(0))),
RegField(1, ctrl.sck.pol,
RegFieldDesc("sckmode_pol", "Serial clock polarity", reset=Some(0))))),
SPICRs.csid -> Seq(RegField(c.csIdBits, ctrl.cs.id,
RegFieldDesc("csid", "Chip select id", reset=Some(0)))),
SPICRs.csdef -> ctrl.cs.dflt.zipWithIndex.map{ case (x, i) => RegField(1, x,
RegFieldDesc(s"csdef$i", s"Chip select ${i} default", group = Some("csdef"), groupDesc = Some("Chip select default"), reset=Some(1)))},
SPICRs.csmode -> Seq(RegField(SPICSMode.width, ctrl.cs.mode,
RegFieldDesc("csmode", "Chip select mode", reset=Some(SPICSMode.Auto.litValue())))),
SPICRs.dcssck -> Seq(RegField(c.delayBits, ctrl.dla.cssck,
RegFieldDesc("cssck", "CS to SCK delay", reset=Some(1)))),
SPICRs.dsckcs -> Seq(RegField(c.delayBits, ctrl.dla.sckcs,
RegFieldDesc("sckcs", "SCK to CS delay", reset=Some(1)))),
SPICRs.dintercs -> Seq(RegField(c.delayBits, ctrl.dla.intercs,
RegFieldDesc("intercs", "Minimum CS inactive time", reset=Some(1)))),
SPICRs.dinterxfr -> Seq(RegField(c.delayBits, ctrl.dla.interxfr,
RegFieldDesc("interxfr", "Minimum interframe delay", reset=Some(0)))),
SPICRs.fmt -> RegFieldGroup("fmt",Some("Serial frame format"),Seq(
RegField(SPIProtocol.width, ctrl.fmt.proto,
RegFieldDesc("proto","SPI Protocol", reset=Some(SPIProtocol.Single.litValue()))),
RegField(SPIEndian.width, ctrl.fmt.endian,
RegFieldDesc("endian","SPI Endianness", reset=Some(SPIEndian.MSB.litValue()))),
RegField(SPIDirection.width, ctrl.fmt.iodir,
RegFieldDesc("iodir","SPI I/O Direction", reset=Some(SPIDirection.Rx.litValue()))))),
SPICRs.len -> Seq(RegField(c.lengthBits, ctrl.fmt.len,
RegFieldDesc("len","Number of bits per frame", reset=Some(math.min(c.frameBits, 8))))),
SPICRs.txfifo -> RegFieldGroup("txdata",Some("Transmit data"),
NonBlockingEnqueue(fifo.io.tx)),
SPICRs.rxfifo -> RegFieldGroup("rxdata",Some("Receive data"),
NonBlockingDequeue(fifo.io.rx)),
SPICRs.txmark -> Seq(RegField(c.txDepthBits, ctrl.wm.tx,
RegFieldDesc("txmark","Transmit watermark", reset=Some(0)))),
SPICRs.rxmark -> Seq(RegField(c.rxDepthBits, ctrl.wm.rx,
RegFieldDesc("rxmark","Receive watermark", reset=Some(0)))),
SPICRs.ie -> RegFieldGroup("ie",Some("SPI interrupt enable"),Seq(
RegField(1, ie.txwm,
RegFieldDesc("txwm_ie","Transmit watermark interrupt enable", reset=Some(0))),
RegField(1, ie.rxwm,
RegFieldDesc("rxwm_ie","Receive watermark interrupt enable", reset=Some(0))))),
SPICRs.ip -> RegFieldGroup("ip",Some("SPI interrupt pending"),Seq(
RegField.r(1, ip.txwm,
RegFieldDesc("txwm_ip","Transmit watermark interrupt pending", volatile=true)),
RegField.r(1, ip.rxwm,
RegFieldDesc("rxwm_ip","Receive watermark interrupt pending", volatile=true)))),
SPICRs.extradel -> RegFieldGroup("extradel",Some("delay from the sck edge"),Seq(
RegField(c.divisorBits, ctrl.extradel.coarse,
RegFieldDesc("extradel_coarse","Coarse grain sample delay", reset=Some(0))),
RegField(c.fineDelayBits, ctrl.extradel.fine,
RegFieldDesc("extradel_fine","Fine grain sample delay", reset=Some(0))))),
SPICRs.sampledel -> RegFieldGroup("sampledel",Some("Number of delay stages from slave to SPI controller"),Seq(
RegField(c.sampleDelayBits, ctrl.sampledel.sd,
RegFieldDesc("sampledel_sd","Number of delay stages from slave to the SPI controller", reset=Some(c.defaultSampleDel))))))
}
class MMCDevice(spi: Device, maxMHz: Double = 20) extends SimpleDevice("mmc", Seq("mmc-spi-slot")) {
override def parent = Some(spi)
override def describe(resources: ResourceBindings): Description = {
val Description(name, mapping) = super.describe(resources)
val extra = Map(
"voltage-ranges" -> Seq(ResourceInt(3300), ResourceInt(3300)),
"disable-wp" -> Nil,
"spi-max-frequency" -> Seq(ResourceInt(maxMHz * 1000000)))
Description(name, mapping ++ extra)
}
}
class FlashDevice(spi: Device, bits: Int = 4, maxMHz: Double = 50, compat: Seq[String] = Nil) extends SimpleDevice("flash", compat :+ "jedec,spi-nor") {
require (bits == 1 || bits == 2 || bits == 4)
override def parent = Some(spi)
override def describe(resources: ResourceBindings): Description = {
val Description(name, mapping) = super.describe(resources)
val extra = Map(
"m25p,fast-read" -> Nil,
"spi-tx-bus-width" -> Seq(ResourceInt(bits)),
"spi-rx-bus-width" -> Seq(ResourceInt(bits)),
"spi-max-frequency" -> Seq(ResourceInt(maxMHz * 1000000)))
Description(name, mapping ++ extra)
}
}
abstract class TLSPIBase(w: Int, c: SPIParamsBase)(implicit p: Parameters) extends IORegisterRouter(
RegisterRouterParams(
name = "spi",
compat = Seq("sifive,spi0"),
base = c.rAddress,
size = c.rSize,
beatBytes = w),
new SPIPortIO(c))
with HasInterruptSources {
require(isPow2(c.rSize))
override def extraResources(resources: ResourceBindings) = Map(
"#address-cells" -> Seq(ResourceInt(1)),
"#size-cells" -> Seq(ResourceInt(0)))
override def nInterrupts = 1
}
class TLSPI(w: Int, c: SPIParams)(implicit p: Parameters)
extends TLSPIBase(w,c)(p) with HasTLControlRegMap {
lazy val module = new SPITopModule(c, this) {
mac.io.link <> fifo.io.link
val mapping = (regmapBase)
regmap(mapping:_*)
val omRegMap = OMRegister.convert(mapping:_*)
}
val logicalTreeNode = new LogicalTreeNode(() => Some(device)) {
def getOMComponents(resourceBindings: ResourceBindings, children: Seq[OMComponent] = Nil): Seq[OMComponent] = {
Seq(
OMSPI(
rxDepth = c.rxDepth,
txDepth = c.txDepth,
csWidthBits = c.csWidth,
frameBits = c.frameBits,
delayBits = c.delayBits,
divisorBits = c.divisorBits,
coarseDelayBits = c.divisorBits,
fineDelayBits = c.fineDelayBits,
sampleDelayBits = c.sampleDelayBits,
defaultSampleDelay = c.defaultSampleDel,
memoryRegions = DiplomaticObjectModelAddressing.getOMMemoryRegions("SPI", resourceBindings, Some(module.omRegMap)),
interrupts = DiplomaticObjectModelAddressing.describeGlobalInterrupts(device.describe(resourceBindings).name, resourceBindings)
)
)
}
}
}
| sifive/sifive-blocks | src/main/scala/devices/spi/TLSPI.scala | Scala | apache-2.0 | 9,683 |
package scodec
package codecs
import scalaz.\\/
import scodec.bits.BitVector
private[codecs] final class ListCodec[A](codec: Codec[A]) extends Codec[List[A]] {
def encode(list: List[A]): String \\/ BitVector = Encoder.encodeSeq(codec)(list)
def decode(buffer: BitVector): String \\/ (BitVector, List[A]) =
Decoder.decodeCollect[List, A](codec)(buffer).map { res => (BitVector.empty, res) }
override def toString = s"list($codec)"
}
| ceedubs/scodec | src/main/scala/scodec/codecs/ListCodec.scala | Scala | bsd-3-clause | 445 |
package org.jscala
import scala.reflect.macros.Context
import scala.collection.generic.{MapFactory, SeqFactory}
/**
* Author: Alexander Nemish
* Date: 10/25/13
* Time: 10:50 PM
*/
trait JsBasis[C <: Context] extends MacroHelpers[C] {
import c.universe._
protected val unaryOps = Seq("+", "-", "!")
protected val encodedUnaryOpsMap = unaryOps.map(op => newTermName(s"unary_$op").encodedName -> op).toMap
protected val binOps = Seq("*", "/", "%", "+", "-", "<<", ">>", ">>>",
"<", ">", "<=", ">=",
"==", "!=", "&", "|", "^", "&&", "||")
protected val encodedBinOpsMap = binOps.map(op => newTermName(op).encodedName -> op).toMap
protected lazy val jsString: PFT[String] = {
case Literal(Constant(value: Char)) => value.toString
case Literal(Constant(value: String)) => value
}
protected lazy val jsStringLit: ToExpr[JsString] = jsString.andThen(s => reify(JsString(c.literal(s).splice)))
protected lazy val jsNumLit: ToExpr[JsNum] = {
case Literal(Constant(value: Byte)) => reify(JsNum(c.literal(value).splice, isFloat = false))
case Literal(Constant(value: Short)) => reify(JsNum(c.literal(value).splice, isFloat = false))
case Literal(Constant(value: Int)) => reify(JsNum(c.literal(value).splice, isFloat = false))
case Literal(Constant(value: Long)) => reify(JsNum(c.literal(value).splice, isFloat = false))
case Literal(Constant(value: Double)) => reify(JsNum(c.literal(value).splice, isFloat = true))
}
protected lazy val jsBoolLit: ToExpr[JsBool] = {
case Literal(Constant(value: Boolean)) => reify(JsBool(c.literal(value).splice))
}
protected object jsUnitLit extends PartialFunction[Tree, Expr[JsUnit.type]] {
def apply(v1: Tree) = reify(JsUnit)
def isDefinedAt(x: Tree) = isUnit(x)
}
protected object jsNullLit extends PartialFunction[Tree, Expr[JsNull.type]] {
def apply(v1: Tree) = reify(JsNull)
def isDefinedAt(x: Tree) = isNull(x)
}
protected val jsLit: ToExpr[JsLit] = {
jsStringLit orElse jsNumLit orElse jsBoolLit orElse jsNullLit orElse jsUnitLit
}
protected lazy val jsThis: ToExpr[JsIdent] = {
case This(name) => reify(JsIdent("this"))
}
protected lazy val jsIdent: ToExpr[JsIdent] = {
case Ident(name) => reify(JsIdent(c.literal(name.decoded).splice))
}
protected lazy val jsJStringExpr: ToExpr[JsExpr] = {
case Apply(Select(New(Select(Select(Ident(Name("org")), Name("jscala")), Name("JString"))), _), List(Literal(Constant(str: String)))) =>
reify(JsString(c.literal(str).splice))
}
}
| xeno-by/jscala | jscala/src/main/scala/org/jscala/JsBasis.scala | Scala | mit | 2,552 |
/**
* This file is part of objc2swift.
* https://github.com/yahoojapan/objc2swift
*
* Copyright (c) 2015 Yahoo Japan Corporation
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
package org.objc2swift
import java.io.InputStream
import org.antlr.v4.runtime.ParserRuleContext
import org.antlr.v4.runtime.tree.ParseTreeWalker
class ObjC2SwiftConverter(input: InputStream) extends BaseConverter(input)
with ClassVisitor
with CategoryVisitor
with ProtocolVisitor
with PropertyVisitor
with MethodVisitor
with DeclarationVisitor
with StatementVisitor
with ExpressionVisitor
with MessageVisitor
with OperatorVisitor
with TypeVisitor
with EnumVisitor
with ErrorHandler {
parser.removeErrorListeners()
parser.addErrorListener(this)
def getParseTree() = {
val lines = List.newBuilder[String]
new ParseTreeWalker().walk(new ObjCBaseListener() {
override def enterEveryRule(ctx: ParserRuleContext): Unit = {
lines +=
(ctx.depth - 1) + " " * ctx.depth +
parser.getRuleNames()(ctx.getRuleIndex) + ": " + "'" + ctx.getStart.getText.replace("\\n\\r\\t", " ") + "'"
}
}, root)
lines.result().mkString("\\n")
}
}
| johndpope/objc2swift | src/main/scala/org/objc2swift/ObjC2SwiftConverter.scala | Scala | mit | 1,278 |
package com.aluxian.tweeather.scripts
import com.aluxian.tweeather.transformers.{ColumnDropper, FeatureReducer, StringSanitizer}
import org.apache.spark.Logging
import org.apache.spark.ml.Pipeline
import org.apache.spark.ml.classification.NaiveBayes
import org.apache.spark.ml.feature.{HashingTF, StopWordsRemover, Tokenizer}
import org.apache.spark.mllib.evaluation.BinaryClassificationMetrics
import org.apache.spark.sql.Row
/**
* Created by Najeefa Nikhat Choudhury
* This script trains a Naive Bayes classifier with the dataset of tweets
* collected by [[TwitterEmoCountryCollector]] and parsed by [[TwitterEmoCountryParser]].
*
* To test the accuracy, it uses the Sentiment140 manually-labelled dataset.
* After the model is created, it can be tested with [[TwitterEmoCountryRepl]].
*/
object TwitterEmoCountryTrainer extends Script with Logging {
override def main(args: Array[String]) {
super.main(args)
// Prepare data sets
logInfo("Getting datasets")
val Array(trainingData, testData) = sqlc.read.parquet("tw/sentiment/emoByCountry/parsed/data.parquet")
.randomSplit(Array(0.9, 0.1))
// Configure the pipeline
val pipeline = new Pipeline().setStages(Array(
new FeatureReducer().setInputCol("raw_text").setOutputCol("reduced_text"),
new StringSanitizer().setInputCol("reduced_text").setOutputCol("text"),
new Tokenizer().setInputCol("text").setOutputCol("raw_words"),
new StopWordsRemover().setInputCol("raw_words").setOutputCol("words"),
new HashingTF().setInputCol("words").setOutputCol("features"),
// new IDF().setInputCol("raw_features").setOutputCol("features"),
new NaiveBayes().setSmoothing(0.5).setFeaturesCol("features"),
new ColumnDropper().setColumns("raw_text", "reduced_text", "text",
"raw_words", "words", "features")
))
// Fit the pipeline
logInfo(s"Training model on ${trainingData.count()} records")
val model = pipeline.fit(trainingData)
// Test the model accuracy
logInfo("Testing model")
val predicted = model
.transform(testData)
.select("prediction", "label")
.map { case Row(prediction: Double, label: Double) => (prediction, label) }
val matches = predicted.map({ case (prediction, label) => if (prediction == label) 1 else 0 }).sum()
logInfo(s"Test dataset accuracy: ${matches / predicted.count()}")
val metrics = new BinaryClassificationMetrics(predicted)
logInfo(s"Test dataset ROC: ${metrics.areaUnderROC()}")
logInfo(s"Test dataset PR: ${metrics.areaUnderPR()}")
metrics.unpersist()
// Save the model
logInfo("Saving model")
model.write.overwrite().save("tw/sentiment/models/emoCountry.model")
logInfo("Training finished")
sc.stop()
}
}
| cnajeefa/Tourism-Sentiment-Analysis | src/main/scala/com/aluxian/tweeather/scripts/TwitterEmoCountryTrainer.scala | Scala | apache-2.0 | 2,781 |
/*
* Copyright (c) <2015-2016>, see CONTRIBUTORS
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the <organization> nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package ch.usi.inf.l3.sana.primj.modifiers
import ch.usi.inf.l3.sana
import sana.tiny
import sana.calcj
import tiny.modifiers.Flag
case object PARAM extends Flag
case object LOCAL_VARIABLE extends Flag
case object FIELD extends Flag
case object FINAL extends Flag
| amanjpro/languages-a-la-carte | primj/src/main/scala/modifiers/flags.scala | Scala | bsd-3-clause | 1,842 |
package de.about.scalatrain
import org.specs2.mutable._
import org.scalacheck.Gen._
class TimeSpec extends Specification {
"Calling fromMinutes" should {
"throw an IAE for negative minutes" in {
forall(List(Int.MinValue, -1337, -1)) {
(minutes: Int) => Time fromMinutes minutes must throwA[IllegalArgumentException]
}
}
"return a correctly initialized Time instance for minutes within [0, 24 * 60 - 1)" in {
forall(0 to (24 * 60 - 1)) {
(minutes: Int) => Time fromMinutes minutes must not beNull
}
}
}
}
| atamanroman/scalatrain | src/test/scala/de/aboutco/scalatrain/TimeSpec.scala | Scala | unlicense | 566 |
package com.twitter.scalding
import cascading.tap.Tap
import cascading.tuple.{ Fields, Tuple }
import scala.collection.mutable.Buffer
import org.scalatest.{ Matchers, WordSpec }
class TestTapFactoryTest extends WordSpec with Matchers {
"A test tap created by TestTapFactory" should {
"error helpfully when a source is not in the map for test buffers" in {
// Source to use for this test.
val testSource = Tsv("path")
// Map of sources to use when creating the tap-- does not contain testSource
val emptySourceMap = Map[Source, Buffer[Tuple]]()
val testMode = Test { emptySourceMap.get(_) }
val testTapFactory = TestTapFactory(testSource, new Fields())
def createIllegalTap(accessMode: AccessMode): Tap[Any, Any, Any] =
testTapFactory.createTap(accessMode)(testMode).asInstanceOf[Tap[Any, Any, Any]]
the[IllegalArgumentException] thrownBy {
createIllegalTap(Read)
} should have message ("requirement failed: " + TestTapFactory.sourceNotFoundError.format(testSource))
the[IllegalArgumentException] thrownBy {
createIllegalTap(Write)
} should have message ("requirement failed: " + TestTapFactory.sinkNotFoundError.format(testSource))
}
}
}
| jzmq/scalding | scalding-core/src/test/scala/com/twitter/scalding/TestTapFactoryTest.scala | Scala | apache-2.0 | 1,247 |
/***********************************************************************
* Copyright (c) 2013-2017 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.arrow.io.records
import java.io.ByteArrayInputStream
import java.nio.channels.Channels
import org.apache.arrow.memory.BufferAllocator
import org.apache.arrow.vector.file.ReadChannel
import org.apache.arrow.vector.schema.ArrowRecordBatch
import org.apache.arrow.vector.stream.MessageSerializer
import org.apache.arrow.vector.types.pojo.Field
import org.apache.arrow.vector.{VectorLoader, VectorSchemaRoot}
import org.locationtech.geomesa.utils.io.WithClose
class RecordBatchLoader(field: Field)(implicit allocator: BufferAllocator) {
import scala.collection.JavaConversions._
val vector = field.createVector(allocator)
private val root = new VectorSchemaRoot(Seq(field), Seq(vector), 0)
private val loader = new VectorLoader(root)
def load(bytes: Array[Byte]): Unit = {
WithClose(new ReadChannel(Channels.newChannel(new ByteArrayInputStream(bytes)))) { in =>
WithClose(MessageSerializer.deserializeMessageBatch(in, allocator).asInstanceOf[ArrowRecordBatch]) { recordBatch =>
loader.load(recordBatch)
}
}
}
}
| ronq/geomesa | geomesa-arrow/geomesa-arrow-gt/src/main/scala/org/locationtech/geomesa/arrow/io/records/RecordBatchLoader.scala | Scala | apache-2.0 | 1,550 |
/*
* Copyright 2014 Commonwealth Computer Research, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.locationtech.geomesa.core.iterators
import java.util.UUID
import org.apache.accumulo.core.client.{IteratorSetting, ScannerBase}
import org.apache.accumulo.core.data.{Key, Value}
import org.apache.accumulo.core.iterators.{IteratorEnvironment, SortedKeyValueIterator, WrappingIterator}
object RowOnlyIterator {
def setupRowOnlyIterator(scanner: ScannerBase, priority: Int) {
val iteratorName = "RowOnlyIterator-" + UUID.randomUUID.toString.subSequence(0, 5)
scanner.addScanIterator(new IteratorSetting(priority, iteratorName, classOf[RowOnlyIterator]))
}
}
class RowOnlyIterator
extends WrappingIterator {
@Override
override def getTopKey: Key = new Key(super.getTopKey.getRow)
@Override
override def deepCopy(env: IteratorEnvironment): SortedKeyValueIterator[Key, Value] = null
}
| mmatz-ccri/geomesa | geomesa-core/src/main/scala/org/locationtech/geomesa/core/iterators/RowOnlyIterator.scala | Scala | apache-2.0 | 1,437 |
package com.wavesplatform.history
import com.wavesplatform.common.state.ByteStr
import com.wavesplatform.common.utils.EitherExt2
import com.wavesplatform.features.BlockchainFeatures
import com.wavesplatform.history.Domain.BlockchainUpdaterExt
import com.wavesplatform.settings.{BlockchainSettings, WavesSettings}
import com.wavesplatform.state.diffs.{ENOUGH_AMT, produce}
import com.wavesplatform.test.PropSpec
import com.wavesplatform.transaction.assets.{BurnTransaction, IssueTransaction, ReissueTransaction}
import com.wavesplatform.transaction.transfer.TransferTransaction
import com.wavesplatform.transaction.{Asset, GenesisTransaction, TxVersion}
import org.scalacheck.Gen
class BlockchainUpdaterBurnTest extends PropSpec with DomainScenarioDrivenPropertyCheck {
val Waves: Long = 100000000
type Setup =
(Long, GenesisTransaction, TransferTransaction, IssueTransaction, BurnTransaction, ReissueTransaction)
val preconditions: Gen[Setup] = for {
master <- accountGen
ts <- timestampGen
transferAssetWavesFee <- smallFeeGen
alice <- accountGen
(_, assetName, description, quantity, decimals, _, _, _) <- issueParamGen
genesis: GenesisTransaction = GenesisTransaction.create(master.toAddress, ENOUGH_AMT, ts).explicitGet()
masterToAlice: TransferTransaction = TransferTransaction
.selfSigned(1.toByte, master, alice.toAddress, Asset.Waves, 3 * Waves, Asset.Waves, transferAssetWavesFee, ByteStr.empty, ts + 1)
.explicitGet()
issue: IssueTransaction = IssueTransaction(
TxVersion.V1,
alice.publicKey,
assetName,
description,
quantity,
decimals,
false,
script = None,
Waves,
ts + 100
).signWith(alice.privateKey)
burn: BurnTransaction = BurnTransaction.selfSigned(1.toByte, alice, issue.asset, quantity / 2, Waves, ts + 200).explicitGet()
reissue: ReissueTransaction = ReissueTransaction
.selfSigned(1.toByte, alice, issue.asset, burn.quantity, true, Waves, ts + 300)
.explicitGet()
} yield (ts, genesis, masterToAlice, issue, burn, reissue)
val localBlockchainSettings: BlockchainSettings = DefaultBlockchainSettings.copy(
functionalitySettings = DefaultBlockchainSettings.functionalitySettings
.copy(
featureCheckBlocksPeriod = 1,
blocksForFeatureActivation = 1,
preActivatedFeatures = Map(BlockchainFeatures.NG.id -> 0, BlockchainFeatures.DataTransaction.id -> 0)
)
)
val localWavesSettings: WavesSettings = settings.copy(blockchainSettings = localBlockchainSettings)
property("issue -> burn -> reissue in sequential blocks works correctly") {
scenario(preconditions, localWavesSettings) {
case (domain, (ts, genesis, masterToAlice, issue, burn, reissue)) =>
val block0 = customBuildBlockOfTxs(randomSig, Seq(genesis), defaultSigner, 1.toByte, ts)
val block1 = customBuildBlockOfTxs(block0.id(), Seq(masterToAlice), defaultSigner, TxVersion.V1, ts + 150)
val block2 = customBuildBlockOfTxs(block1.id(), Seq(issue), defaultSigner, TxVersion.V1, ts + 250)
val block3 = customBuildBlockOfTxs(block2.id(), Seq(burn), defaultSigner, TxVersion.V1, ts + 350)
val block4 = customBuildBlockOfTxs(block3.id(), Seq(reissue), defaultSigner, TxVersion.V1, ts + 450)
domain.appendBlock(block0)
domain.appendBlock(block1)
domain.appendBlock(block2)
val assetDescription1 = domain.blockchainUpdater.assetDescription(issue.asset).get
assetDescription1.reissuable should be(false)
assetDescription1.totalVolume should be(issue.quantity)
domain.appendBlock(block3)
val assetDescription2 = domain.blockchainUpdater.assetDescription(issue.asset).get
assetDescription2.reissuable should be(false)
assetDescription2.totalVolume should be(issue.quantity - burn.quantity)
domain.blockchainUpdater.processBlock(block4) should produce("Asset is not reissuable")
}
}
property("issue -> burn -> reissue in micro blocks works correctly") {
scenario(preconditions, localWavesSettings) {
case (domain, (ts, genesis, masterToAlice, issue, burn, reissue)) =>
val block0 = customBuildBlockOfTxs(randomSig, Seq(genesis), defaultSigner, TxVersion.V1, ts)
val block1 = customBuildBlockOfTxs(block0.id(), Seq(masterToAlice), defaultSigner, TxVersion.V1, ts + 150)
val block2 = customBuildBlockOfTxs(block1.id(), Seq(issue), defaultSigner, TxVersion.V1, ts + 250)
val block3 = customBuildBlockOfTxs(block2.id(), Seq(burn, reissue), defaultSigner, TxVersion.V1, ts + 350)
domain.appendBlock(block0)
domain.appendBlock(block1)
domain.appendBlock(block2)
val assetDescription1 = domain.blockchainUpdater.assetDescription(issue.asset).get
assetDescription1.reissuable should be(false)
assetDescription1.totalVolume should be(issue.quantity)
domain.blockchainUpdater.processBlock(block3) should produce("Asset is not reissuable")
}
}
}
| wavesplatform/Waves | node/src/test/scala/com/wavesplatform/history/BlockchainUpdaterBurnTest.scala | Scala | mit | 5,237 |
package com.whisk.finagle.mysql
package object circe extends CirceValueDecoders with CirceRowImplicits {}
| whisklabs/mysql-util | mysql-util-circe/src/main/scala/com/whisk/finagle/mysql/circe/package.scala | Scala | mit | 107 |
package com.wavesplatform.utils
object SystemTime extends Time {
def correctedTime(): Long = System.currentTimeMillis()
@volatile
private[this] var txTime: Long = 0
def getTimestamp(): Long = {
txTime = Math.max(correctedTime(), txTime + 1)
txTime
}
}
| wavesplatform/Waves | node/src/test/scala/com/wavesplatform/utils/SystemTime.scala | Scala | mit | 273 |
package org.bowlerframework.view.json
import net.liftweb.json.{MappingException, TypeInfo, Formats, Serializer}
import net.liftweb.json.JsonAST._
/**
* Created by IntelliJ IDEA.
* User: wfaler
* Date: 05/05/2011
* Time: 22:18
* To change this template use File | Settings | File Templates.
*/
class BigDecimalSerializer extends Serializer[BigDecimal] {
private val Class = classOf[BigDecimal]
def deserialize(implicit format: Formats): PartialFunction[(TypeInfo, JValue), BigDecimal] = {
case (TypeInfo(Class, _), json) => json match {
case JInt(iv) => BigDecimal(iv)
case JDouble(dv) => BigDecimal(dv)
case JString(s) => new BigDecimal(new java.math.BigDecimal(s))
case x => throw new MappingException("Can't convert " + Class + " to BigDecimal" + json)
}
}
def serialize(implicit format: Formats): PartialFunction[Any, JValue] = {
case x: BigDecimal => JString(x.toString)
}
} | rkpandey/Bowler | core/src/main/scala/org/bowlerframework/view/json/BigDecimalSerializer.scala | Scala | bsd-3-clause | 936 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive
import java.io._
import java.util.{GregorianCalendar, UUID}
import scala.Array.canBuildFrom
import scala.collection.mutable.ArrayBuffer
import scala.language.implicitConversions
import scala.util.parsing.combinator.RegexParsers
import org.apache.spark
import org.apache.spark.sql._
import org.apache.spark.sql.catalyst.TableIdentifier
import org.apache.spark.sql.catalyst.analysis.NoSuchTableException
import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
import org.apache.spark.sql.execution.command.{AggregateTableAttributes, Partitioner}
import org.apache.spark.sql.hive.client.ClientInterface
import org.apache.spark.sql.types._
import org.apache.carbondata.common.logging.LogServiceFactory
import org.apache.carbondata.core.carbon.CarbonTableIdentifier
import org.apache.carbondata.core.carbon.metadata.CarbonMetadata
import org.apache.carbondata.core.carbon.metadata.converter.ThriftWrapperSchemaConverterImpl
import org.apache.carbondata.core.carbon.metadata.schema.table.CarbonTable
import org.apache.carbondata.core.carbon.path.{CarbonStorePath, CarbonTablePath}
import org.apache.carbondata.core.carbon.querystatistics.{QueryStatistic, QueryStatisticsConstants}
import org.apache.carbondata.core.constants.CarbonCommonConstants
import org.apache.carbondata.core.datastorage.store.filesystem.CarbonFile
import org.apache.carbondata.core.datastorage.store.impl.FileFactory
import org.apache.carbondata.core.datastorage.store.impl.FileFactory.FileType
import org.apache.carbondata.core.reader.ThriftReader
import org.apache.carbondata.core.util.{CarbonProperties, CarbonTimeStatisticsFactory, CarbonUtil}
import org.apache.carbondata.core.writer.ThriftWriter
import org.apache.carbondata.format.{SchemaEvolutionEntry, TableInfo}
import org.apache.carbondata.lcm.locks.ZookeeperInit
import org.apache.carbondata.spark.util.CarbonScalaUtil.CarbonSparkUtil
case class MetaData(var tablesMeta: ArrayBuffer[TableMeta])
case class CarbonMetaData(dims: Seq[String],
msrs: Seq[String],
carbonTable: CarbonTable,
dictionaryMap: DictionaryMap)
case class TableMeta(carbonTableIdentifier: CarbonTableIdentifier, storePath: String,
var carbonTable: CarbonTable, partitioner: Partitioner)
object CarbonMetastoreCatalog {
def readSchemaFileToThriftTable(schemaFilePath: String): TableInfo = {
val createTBase = new ThriftReader.TBaseCreator() {
override def create(): org.apache.thrift.TBase[TableInfo, TableInfo._Fields] = {
new TableInfo()
}
}
val thriftReader = new ThriftReader(schemaFilePath, createTBase)
var tableInfo: TableInfo = null
try {
thriftReader.open()
tableInfo = thriftReader.read().asInstanceOf[TableInfo]
} finally {
thriftReader.close()
}
tableInfo
}
def writeThriftTableToSchemaFile(schemaFilePath: String, tableInfo: TableInfo): Unit = {
val thriftWriter = new ThriftWriter(schemaFilePath, false)
try {
thriftWriter.open()
thriftWriter.write(tableInfo);
} finally {
thriftWriter.close()
}
}
}
case class DictionaryMap(dictionaryMap: Map[String, Boolean]) {
def get(name: String): Option[Boolean] = {
dictionaryMap.get(name.toLowerCase)
}
}
class CarbonMetastoreCatalog(hiveContext: HiveContext, val storePath: String,
client: ClientInterface, queryId: String)
extends HiveMetastoreCatalog(client, hiveContext)
with spark.Logging {
@transient val LOGGER = LogServiceFactory
.getLogService("org.apache.spark.sql.CarbonMetastoreCatalog")
val tableModifiedTimeStore = new java.util.HashMap[String, Long]()
tableModifiedTimeStore
.put(CarbonCommonConstants.DATABASE_DEFAULT_NAME, System.currentTimeMillis())
val metadata = loadMetadata(storePath)
def getTableCreationTime(databaseName: String, tableName: String): Long = {
val tableMeta = metadata.tablesMeta.filter(
c => c.carbonTableIdentifier.getDatabaseName.equalsIgnoreCase(databaseName) &&
c.carbonTableIdentifier.getTableName.equalsIgnoreCase(tableName))
val tableCreationTime = tableMeta.head.carbonTable.getTableLastUpdatedTime
tableCreationTime
}
def lookupRelation1(dbName: Option[String],
tableName: String)(sqlContext: SQLContext): LogicalPlan = {
lookupRelation1(TableIdentifier(tableName, dbName))(sqlContext)
}
def lookupRelation1(tableIdentifier: TableIdentifier,
alias: Option[String] = None)(sqlContext: SQLContext): LogicalPlan = {
checkSchemasModifiedTimeAndReloadTables()
val database = tableIdentifier.database.getOrElse(getDB.getDatabaseName(None, sqlContext))
val tables = metadata.tablesMeta.filter(
c => c.carbonTableIdentifier.getDatabaseName.equalsIgnoreCase(database) &&
c.carbonTableIdentifier.getTableName.equalsIgnoreCase(tableIdentifier.table))
if (tables.nonEmpty) {
CarbonRelation(database, tableIdentifier.table,
CarbonSparkUtil.createSparkMeta(tables.head.carbonTable), tables.head, alias)(sqlContext)
} else {
LOGGER.audit(s"Table Not Found: ${tableIdentifier.table}")
throw new NoSuchTableException
}
}
def tableExists(tableIdentifier: TableIdentifier)(sqlContext: SQLContext): Boolean = {
checkSchemasModifiedTimeAndReloadTables()
val database = tableIdentifier.database.getOrElse(getDB.getDatabaseName(None, sqlContext))
val tables = metadata.tablesMeta.filter(
c => c.carbonTableIdentifier.getDatabaseName.equalsIgnoreCase(database) &&
c.carbonTableIdentifier.getTableName.equalsIgnoreCase(tableIdentifier.table))
tables.nonEmpty
}
def loadMetadata(metadataPath: String): MetaData = {
val recorder = CarbonTimeStatisticsFactory.createDriverRecorder()
val statistic = new QueryStatistic()
// creating zookeeper instance once.
// if zookeeper is configured as carbon lock type.
val zookeeperUrl: String = hiveContext.getConf(CarbonCommonConstants.ZOOKEEPER_URL, null)
if (zookeeperUrl != null) {
CarbonProperties.getInstance.addProperty(CarbonCommonConstants.ZOOKEEPER_URL, zookeeperUrl)
ZookeeperInit.getInstance(zookeeperUrl)
LOGGER.info("Zookeeper url is configured. Taking the zookeeper as lock type.")
var configuredLockType = CarbonProperties.getInstance
.getProperty(CarbonCommonConstants.LOCK_TYPE)
if (null == configuredLockType) {
configuredLockType = CarbonCommonConstants.CARBON_LOCK_TYPE_ZOOKEEPER
CarbonProperties.getInstance
.addProperty(CarbonCommonConstants.LOCK_TYPE,
configuredLockType)
}
}
if (metadataPath == null) {
return null
}
val fileType = FileFactory.getFileType(metadataPath)
val metaDataBuffer = new ArrayBuffer[TableMeta]
fillMetaData(metadataPath, fileType, metaDataBuffer)
updateSchemasUpdatedTime("", "")
statistic.addStatistics(QueryStatisticsConstants.LOAD_META,
System.currentTimeMillis())
recorder.recordStatisticsForDriver(statistic, queryId)
MetaData(metaDataBuffer)
}
private def fillMetaData(basePath: String, fileType: FileType,
metaDataBuffer: ArrayBuffer[TableMeta]): Unit = {
val databasePath = basePath // + "/schemas"
try {
if (FileFactory.isFileExist(databasePath, fileType)) {
val file = FileFactory.getCarbonFile(databasePath, fileType)
val databaseFolders = file.listFiles()
databaseFolders.foreach(databaseFolder => {
if (databaseFolder.isDirectory) {
val dbName = databaseFolder.getName
val tableFolders = databaseFolder.listFiles()
tableFolders.foreach(tableFolder => {
if (tableFolder.isDirectory) {
val carbonTableIdentifier = new CarbonTableIdentifier(databaseFolder.getName,
tableFolder.getName, UUID.randomUUID().toString)
val carbonTablePath = CarbonStorePath.getCarbonTablePath(basePath,
carbonTableIdentifier)
val tableMetadataFile = carbonTablePath.getSchemaFilePath
if (FileFactory.isFileExist(tableMetadataFile, fileType)) {
val tableName = tableFolder.getName
val tableUniqueName = databaseFolder.getName + "_" + tableFolder.getName
val createTBase = new ThriftReader.TBaseCreator() {
override def create(): org.apache.thrift.TBase[TableInfo, TableInfo._Fields] = {
new TableInfo()
}
}
val thriftReader = new ThriftReader(tableMetadataFile, createTBase)
thriftReader.open()
val tableInfo: TableInfo = thriftReader.read().asInstanceOf[TableInfo]
thriftReader.close()
val schemaConverter = new ThriftWrapperSchemaConverterImpl
val wrapperTableInfo = schemaConverter
.fromExternalToWrapperTableInfo(tableInfo, dbName, tableName, basePath)
val schemaFilePath = CarbonStorePath
.getCarbonTablePath(storePath, carbonTableIdentifier).getSchemaFilePath
wrapperTableInfo.setStorePath(storePath)
wrapperTableInfo
.setMetaDataFilepath(CarbonTablePath.getFolderContainingFile(schemaFilePath))
CarbonMetadata.getInstance().loadTableMetadata(wrapperTableInfo)
val carbonTable =
org.apache.carbondata.core.carbon.metadata.CarbonMetadata.getInstance()
.getCarbonTable(tableUniqueName)
metaDataBuffer += TableMeta(
carbonTable.getCarbonTableIdentifier,
storePath,
carbonTable,
// TODO: Need to update Database thirft to hold partitioner
// information and reload when required.
Partitioner("org.apache.carbondata.spark.partition.api.impl." +
"SampleDataPartitionerImpl",
Array(""), 1, Array("")))
}
}
})
}
})
}
else {
// Create folders and files.
FileFactory.mkdirs(databasePath, fileType)
}
}
catch {
case s: java.io.FileNotFoundException =>
// Create folders and files.
FileFactory.mkdirs(databasePath, fileType)
}
}
/**
*
* Prepare Thrift Schema from wrapper TableInfo and write to Schema file.
* Load CarbonTable from wrapper tableinfo
*
*/
def createTableFromThrift(
tableInfo: org.apache.carbondata.core.carbon.metadata.schema.table.TableInfo,
dbName: String, tableName: String, partitioner: Partitioner)
(sqlContext: SQLContext): String = {
if (tableExists(TableIdentifier(tableName, Some(dbName)))(sqlContext)) {
sys.error(s"Table [$tableName] already exists under Database [$dbName]")
}
val schemaConverter = new ThriftWrapperSchemaConverterImpl
val thriftTableInfo = schemaConverter
.fromWrapperToExternalTableInfo(tableInfo, dbName, tableName)
val schemaEvolutionEntry = new SchemaEvolutionEntry(tableInfo.getLastUpdatedTime)
thriftTableInfo.getFact_table.getSchema_evolution.getSchema_evolution_history
.add(schemaEvolutionEntry)
val carbonTableIdentifier = new CarbonTableIdentifier(dbName, tableName,
tableInfo.getFactTable.getTableId)
val carbonTablePath = CarbonStorePath.getCarbonTablePath(storePath, carbonTableIdentifier)
val schemaFilePath = carbonTablePath.getSchemaFilePath
val schemaMetadataPath = CarbonTablePath.getFolderContainingFile(schemaFilePath)
tableInfo.setMetaDataFilepath(schemaMetadataPath)
tableInfo.setStorePath(storePath)
CarbonMetadata.getInstance().loadTableMetadata(tableInfo)
val tableMeta = TableMeta(
carbonTableIdentifier,
storePath,
CarbonMetadata.getInstance().getCarbonTable(dbName + "_" + tableName),
Partitioner("org.apache.carbondata.spark.partition.api.impl.SampleDataPartitionerImpl",
Array(""), 1, DistributionUtil.getNodeList(hiveContext.sparkContext)))
val fileType = FileFactory.getFileType(schemaMetadataPath)
if (!FileFactory.isFileExist(schemaMetadataPath, fileType)) {
FileFactory.mkdirs(schemaMetadataPath, fileType)
}
val thriftWriter = new ThriftWriter(schemaFilePath, false)
thriftWriter.open()
thriftWriter.write(thriftTableInfo)
thriftWriter.close()
metadata.tablesMeta += tableMeta
logInfo(s"Table $tableName for Database $dbName created successfully.")
LOGGER.info("Table " + tableName + " for Database " + dbName + " created successfully.")
updateSchemasUpdatedTime(dbName, tableName)
carbonTablePath.getPath
}
private def updateMetadataByWrapperTable(
wrapperTableInfo: org.apache.carbondata.core.carbon.metadata.schema.table.TableInfo): Unit = {
CarbonMetadata.getInstance().loadTableMetadata(wrapperTableInfo)
val carbonTable = CarbonMetadata.getInstance().getCarbonTable(
wrapperTableInfo.getTableUniqueName)
for (i <- metadata.tablesMeta.indices) {
if (wrapperTableInfo.getTableUniqueName.equals(
metadata.tablesMeta(i).carbonTableIdentifier.getTableUniqueName)) {
metadata.tablesMeta(i).carbonTable = carbonTable
}
}
}
def updateMetadataByThriftTable(schemaFilePath: String,
tableInfo: TableInfo, dbName: String, tableName: String, storePath: String): Unit = {
tableInfo.getFact_table.getSchema_evolution.getSchema_evolution_history.get(0)
.setTime_stamp(System.currentTimeMillis())
val schemaConverter = new ThriftWrapperSchemaConverterImpl
val wrapperTableInfo = schemaConverter
.fromExternalToWrapperTableInfo(tableInfo, dbName, tableName, storePath)
wrapperTableInfo
.setMetaDataFilepath(CarbonTablePath.getFolderContainingFile(schemaFilePath))
wrapperTableInfo.setStorePath(storePath)
updateMetadataByWrapperTable(wrapperTableInfo)
}
def getDimensions(carbonTable: CarbonTable,
aggregateAttributes: List[AggregateTableAttributes]): Array[String] = {
var dimArray = Array[String]()
aggregateAttributes.filter { agg => null == agg.aggType }.foreach { agg =>
val colName = agg.colName
if (null != carbonTable.getMeasureByName(carbonTable.getFactTableName, colName)) {
sys
.error(s"Measure must be provided along with aggregate function :: $colName")
}
if (null == carbonTable.getDimensionByName(carbonTable.getFactTableName, colName)) {
sys
.error(s"Invalid column name. Cannot create an aggregate table :: $colName")
}
if (dimArray.contains(colName)) {
sys.error(s"Duplicate column name. Cannot create an aggregate table :: $colName")
}
dimArray :+= colName
}
dimArray
}
/**
* Shows all schemas which has Database name like
*/
def showDatabases(schemaLike: Option[String]): Seq[String] = {
checkSchemasModifiedTimeAndReloadTables()
metadata.tablesMeta.map { c =>
schemaLike match {
case Some(name) =>
if (c.carbonTableIdentifier.getDatabaseName.contains(name)) {
c.carbonTableIdentifier
.getDatabaseName
}
else {
null
}
case _ => c.carbonTableIdentifier.getDatabaseName
}
}.filter(f => f != null)
}
/**
* Shows all tables for given schema.
*/
def getTables(databaseName: Option[String])(sqlContext: SQLContext): Seq[(String, Boolean)] = {
val dbName =
databaseName.getOrElse(sqlContext.asInstanceOf[HiveContext].catalog.client.currentDatabase)
checkSchemasModifiedTimeAndReloadTables()
metadata.tablesMeta.filter { c =>
c.carbonTableIdentifier.getDatabaseName.equalsIgnoreCase(dbName)
}.map { c => (c.carbonTableIdentifier.getTableName, false) }
}
/**
* Shows all tables in all schemas.
*/
def getAllTables()(sqlContext: SQLContext): Seq[TableIdentifier] = {
checkSchemasModifiedTimeAndReloadTables()
metadata.tablesMeta.map { c =>
TableIdentifier(c.carbonTableIdentifier.getTableName,
Some(c.carbonTableIdentifier.getDatabaseName))
}
}
def isTablePathExists(tableIdentifier: TableIdentifier)(sqlContext: SQLContext): Boolean = {
val dbName = tableIdentifier.database.getOrElse(getDB.getDatabaseName(None, sqlContext))
val tableName = tableIdentifier.table
val tablePath = CarbonStorePath.getCarbonTablePath(this.storePath,
new CarbonTableIdentifier(dbName, tableName, "")).getPath
val fileType = FileFactory.getFileType(tablePath)
FileFactory.isFileExist(tablePath, fileType)
}
def dropTable(tableStorePath: String, tableIdentifier: TableIdentifier)
(sqlContext: SQLContext) {
val dbName = tableIdentifier.database.get
val tableName = tableIdentifier.table
val metadataFilePath = CarbonStorePath.getCarbonTablePath(tableStorePath,
new CarbonTableIdentifier(dbName, tableName, "")).getMetadataDirectoryPath
val fileType = FileFactory.getFileType(metadataFilePath)
if (FileFactory.isFileExist(metadataFilePath, fileType)) {
val file = FileFactory.getCarbonFile(metadataFilePath, fileType)
CarbonUtil.deleteFoldersAndFilesSilent(file.getParentFile)
metadata.tablesMeta -= metadata.tablesMeta.filter(
c => c.carbonTableIdentifier.getDatabaseName.equalsIgnoreCase(dbName) &&
c.carbonTableIdentifier.getTableName.equalsIgnoreCase(tableName))(0)
org.apache.carbondata.core.carbon.metadata.CarbonMetadata.getInstance
.removeTable(dbName + "_" + tableName)
CarbonHiveMetadataUtil.invalidateAndDropTable(dbName, tableName, sqlContext)
updateSchemasUpdatedTime(dbName, tableName)
// discard cached table info in cachedDataSourceTables
sqlContext.catalog.refreshTable(tableIdentifier)
}
}
private def getTimestampFileAndType(databaseName: String, tableName: String) = {
val timestampFile = storePath + "/" + CarbonCommonConstants.SCHEMAS_MODIFIED_TIME_FILE
val timestampFileType = FileFactory.getFileType(timestampFile)
(timestampFile, timestampFileType)
}
def updateSchemasUpdatedTime(databaseName: String, tableName: String) {
val (timestampFile, timestampFileType) = getTimestampFileAndType(databaseName, tableName)
if (!FileFactory.isFileExist(timestampFile, timestampFileType)) {
LOGGER.audit(s"Creating timestamp file for $databaseName.$tableName")
FileFactory.createNewFile(timestampFile, timestampFileType)
}
touchSchemasTimestampFile(databaseName, tableName)
tableModifiedTimeStore.put(CarbonCommonConstants.DATABASE_DEFAULT_NAME,
FileFactory.getCarbonFile(timestampFile, timestampFileType).getLastModifiedTime)
}
def touchSchemasTimestampFile(databaseName: String, tableName: String) {
val (timestampFile, timestampFileType) = getTimestampFileAndType(databaseName, tableName)
FileFactory.getCarbonFile(timestampFile, timestampFileType)
.setLastModifiedTime(System.currentTimeMillis())
}
def checkSchemasModifiedTimeAndReloadTables() {
val (timestampFile, timestampFileType) = getTimestampFileAndType("", "")
if (FileFactory.isFileExist(timestampFile, timestampFileType)) {
if (!(FileFactory.getCarbonFile(timestampFile, timestampFileType).
getLastModifiedTime ==
tableModifiedTimeStore.get(CarbonCommonConstants.DATABASE_DEFAULT_NAME))) {
refreshCache()
}
}
}
def refreshCache() {
metadata.tablesMeta = loadMetadata(storePath).tablesMeta
}
def getSchemaLastUpdatedTime(databaseName: String, tableName: String): Long = {
var schemaLastUpdatedTime = System.currentTimeMillis
val (timestampFile, timestampFileType) = getTimestampFileAndType(databaseName, tableName)
if (FileFactory.isFileExist(timestampFile, timestampFileType)) {
schemaLastUpdatedTime = FileFactory.getCarbonFile(timestampFile, timestampFileType)
.getLastModifiedTime
}
schemaLastUpdatedTime
}
def readTableMetaDataFile(tableFolder: CarbonFile,
fileType: FileFactory.FileType):
(String, String, String, String, Partitioner, Long) = {
val tableMetadataFile = tableFolder.getAbsolutePath + "/metadata"
var schema: String = ""
var databaseName: String = ""
var tableName: String = ""
var dataPath: String = ""
var partitioner: Partitioner = null
val cal = new GregorianCalendar(2011, 1, 1)
var tableCreationTime = cal.getTime.getTime
if (FileFactory.isFileExist(tableMetadataFile, fileType)) {
// load metadata
val in = FileFactory.getDataInputStream(tableMetadataFile, fileType)
var len = 0
try {
len = in.readInt()
} catch {
case others: EOFException => len = 0
}
while (len > 0) {
val databaseNameBytes = new Array[Byte](len)
in.readFully(databaseNameBytes)
databaseName = new String(databaseNameBytes, "UTF8")
val tableNameLen = in.readInt()
val tableNameBytes = new Array[Byte](tableNameLen)
in.readFully(tableNameBytes)
tableName = new String(tableNameBytes, "UTF8")
val dataPathLen = in.readInt()
val dataPathBytes = new Array[Byte](dataPathLen)
in.readFully(dataPathBytes)
dataPath = new String(dataPathBytes, "UTF8")
val versionLength = in.readInt()
val versionBytes = new Array[Byte](versionLength)
in.readFully(versionBytes)
val schemaLen = in.readInt()
val schemaBytes = new Array[Byte](schemaLen)
in.readFully(schemaBytes)
schema = new String(schemaBytes, "UTF8")
val partitionLength = in.readInt()
val partitionBytes = new Array[Byte](partitionLength)
in.readFully(partitionBytes)
val inStream = new ByteArrayInputStream(partitionBytes)
val objStream = new ObjectInputStream(inStream)
partitioner = objStream.readObject().asInstanceOf[Partitioner]
objStream.close()
try {
tableCreationTime = in.readLong()
len = in.readInt()
} catch {
case others: EOFException => len = 0
}
}
in.close()
}
(databaseName, tableName, dataPath, schema, partitioner, tableCreationTime)
}
}
object CarbonMetastoreTypes extends RegexParsers {
protected lazy val primitiveType: Parser[DataType] =
"string" ^^^ StringType |
"float" ^^^ FloatType |
"int" ^^^ IntegerType |
"tinyint" ^^^ ShortType |
"short" ^^^ ShortType |
"double" ^^^ DoubleType |
"long" ^^^ LongType |
"binary" ^^^ BinaryType |
"boolean" ^^^ BooleanType |
fixedDecimalType |
"decimal" ^^^ "decimal" ^^^ DecimalType(18, 2) |
"varchar\\\\((\\\\d+)\\\\)".r ^^^ StringType |
"timestamp" ^^^ TimestampType
protected lazy val fixedDecimalType: Parser[DataType] =
"decimal" ~> "(" ~> "^[1-9]\\\\d*".r ~ ("," ~> "^[0-9]\\\\d*".r <~ ")") ^^ {
case precision ~ scale =>
DecimalType(precision.toInt, scale.toInt)
}
protected lazy val arrayType: Parser[DataType] =
"array" ~> "<" ~> dataType <~ ">" ^^ {
case tpe => ArrayType(tpe)
}
protected lazy val mapType: Parser[DataType] =
"map" ~> "<" ~> dataType ~ "," ~ dataType <~ ">" ^^ {
case t1 ~ _ ~ t2 => MapType(t1, t2)
}
protected lazy val structField: Parser[StructField] =
"[a-zA-Z0-9_]*".r ~ ":" ~ dataType ^^ {
case name ~ _ ~ tpe => StructField(name, tpe, nullable = true)
}
protected lazy val structType: Parser[DataType] =
"struct" ~> "<" ~> repsep(structField, ",") <~ ">" ^^ {
case fields => StructType(fields)
}
protected lazy val dataType: Parser[DataType] =
arrayType |
mapType |
structType |
primitiveType
def toDataType(metastoreType: String): DataType = {
parseAll(dataType, metastoreType) match {
case Success(result, _) => result
case failure: NoSuccess => sys.error(s"Unsupported dataType: $metastoreType")
}
}
def toMetastoreType(dt: DataType): String = {
dt match {
case ArrayType(elementType, _) => s"array<${ toMetastoreType(elementType) }>"
case StructType(fields) =>
s"struct<${
fields.map(f => s"${ f.name }:${ toMetastoreType(f.dataType) }")
.mkString(",")
}>"
case StringType => "string"
case FloatType => "float"
case IntegerType => "int"
case ShortType => "tinyint"
case DoubleType => "double"
case LongType => "bigint"
case BinaryType => "binary"
case BooleanType => "boolean"
case DecimalType() => "decimal"
case TimestampType => "timestamp"
}
}
}
| foryou2030/incubator-carbondata | integration/spark/src/main/scala/org/apache/spark/sql/hive/CarbonMetastoreCatalog.scala | Scala | apache-2.0 | 25,725 |
import avrohugger.format.Standard
import avrohugger.tool.{Main, Directory, GeneratorTool}
import org.apache.avro.tool.Tool
import org.specs2._
import mutable._
import specification._
import scala.collection.JavaConversions._
import scala.util.Try
import java.io.BufferedReader;
import java.io.File;
import java.io.FileReader;
import java.io.IOException;
import java.util.Arrays;
/**
* Verifies that the GeneratorTool generates Scala source properly
*/
class StandardGeneratorToolSpec extends mutable.Specification {
// Runs the actual generator tool with the given input args
private def doCompile(args: Array[String]) = {
val tool = new GeneratorTool(Standard);
Try{
tool.run(null, null, null, Arrays.asList((args:_*)));
}
}
/**
* Not the best implementation, but does the job.
*
*/
private def readFile(file: File) = {
Try {
val reader: BufferedReader = new BufferedReader(new FileReader(file));
val sb: StringBuilder = new StringBuilder();
var line: String = null;
var first: Boolean = true;
line = reader.readLine()
while (line != null) {
if (!first) {
sb.append("\\n");
first = false;
}
sb.append(line);
line = reader.readLine()
}
sb.toString();
}
}
"the generated Scala files" should {
/* //TODO in Java an interface is generated in addition to the types, how to represent in Scala? Trait?
"match the expected single protocol file" in {
doCompile(Array[String] ("protocol",
Directory.TEST_INPUT_DIR.toString() + "/mail.avpr",
Directory.TEST_OUTPUT_SCALA_DIR.getPath()
));
readFile(Directory.TEST_OUTPUT_MESSAGE) === readFile(Directory.TEST_EXPECTED_MESSAGE)
}
*/
"match the expected single datafile file" in {
doCompile(Array[String] ("datafile",
Directory.TEST_INPUT_DIR.toString() + "/twitter.avro",
Directory.TEST_OUTPUT_BASE_DIR.getPath()
));
readFile(Directory.TEST_OUTPUT_TWITTER) === readFile(Directory.TEST_EXPECTED_TWITTER)
}
"match the expected single schema file" in {
doCompile(Array[String] ("schema",
Directory.TEST_INPUT_DIR.toString() + "/nickname.avsc",
Directory.TEST_OUTPUT_BASE_DIR.getPath()
));
readFile(Directory.TEST_OUTPUT_NICKNAME) === readFile(Directory.TEST_EXPECTED_NICKNAME)
}
"match the expected dependent files" in {
doCompile(Array[String]("schema",
Directory.TEST_INPUT_DIR.toString() + "/nickname.avsc",
Directory.TEST_INPUT_DIR.toString() + "/player.avsc",
Directory.TEST_OUTPUT_BASE_DIR.getPath()
));
readFile(Directory.TEST_OUTPUT_PLAYER) === readFile(Directory.TEST_EXPECTED_PLAYER)
}
"match the expected file and directory" in {
doCompile(Array[String]("schema",
Directory.TEST_INPUT_DIR.toString() + "/nickname.avsc",
Directory.TEST_INPUT_DIR.toString(),
Directory.TEST_OUTPUT_BASE_DIR.getPath()
));
readFile(Directory.TEST_OUTPUT_PLAYER) === readFile(Directory.TEST_EXPECTED_PLAYER)
}
"match the expected using the -string option" in {
doCompile(Array[String]("-string", "schema",
Directory.TEST_INPUT_DIR.toString() + "/nickname.avsc",
Directory.TEST_INPUT_DIR.toString() + "/player.avsc",
Directory.TEST_INPUT_DIR.toString() + "/twitter_schema.avro",
Directory.TEST_OUTPUT_STRING_BASE_DIR.getPath()
));
readFile(Directory.TEST_OUTPUT_STRING_PLAYER) === readFile(Directory.TEST_EXPECTED_STRING_PLAYER)
}
}
}
| ppearcy/avrohugger | avrohugger-tools/src/test/scala/StandardGeneratorToolSpec.scala | Scala | apache-2.0 | 3,622 |
package com.nutomic.ensichat.activities
import android.app.Activity
import android.app.AlertDialog.Builder
import android.content.DialogInterface.OnClickListener
import android.content._
import android.os.Bundle
import android.support.v4.app.NavUtils
import android.support.v4.content.LocalBroadcastManager
import android.view._
import android.widget.AdapterView.OnItemClickListener
import android.widget._
import com.google.zxing.integration.android.IntentIntegrator
import com.nutomic.ensichat.R
import com.nutomic.ensichat.core.routing.Address
import com.nutomic.ensichat.service.CallbackHandler
import com.nutomic.ensichat.views.UsersAdapter
/**
* Lists all nearby, connected devices and allows adding them to be added as contacts.
*/
class ConnectionsActivity extends EnsichatActivity with OnItemClickListener {
private lazy val adapter = new UsersAdapter(this)
/**
* Initializes layout, registers connection and message listeners.
*/
override def onCreate(savedInstanceState: Bundle): Unit = {
super.onCreate(savedInstanceState)
getSupportActionBar.setDisplayHomeAsUpEnabled(true)
setContentView(R.layout.activity_connections)
val list = findViewById(android.R.id.list).asInstanceOf[ListView]
list.setAdapter(adapter)
list.setOnItemClickListener(this)
list.setEmptyView(findViewById(android.R.id.empty))
val filter = new IntentFilter()
filter.addAction(CallbackHandler.ActionConnectionsChanged)
filter.addAction(CallbackHandler.ActionContactsUpdated)
LocalBroadcastManager.getInstance(this)
.registerReceiver(onContactsUpdatedReceiver, filter)
}
override def onResume(): Unit = {
super.onResume()
runOnServiceConnected(() => {
updateConnections()
})
}
override def onDestroy(): Unit = {
super.onDestroy()
LocalBroadcastManager.getInstance(this).unregisterReceiver(onContactsUpdatedReceiver)
}
override def onCreateOptionsMenu(menu: Menu): Boolean = {
getMenuInflater.inflate(R.menu.connections, menu)
true
}
override def onOptionsItemSelected(item: MenuItem): Boolean = item.getItemId match {
case R.id.add_contact =>
val et = new EditText(this)
new Builder(this)
.setTitle(R.string.enter_id)
.setView(et)
.setPositiveButton(android.R.string.ok, new OnClickListener {
override def onClick(dialog: DialogInterface, which: Int): Unit = {
addContact(et.getText.toString)
}
})
.setNegativeButton(android.R.string.cancel, null)
.show()
true
case R.id.scan_qr =>
new IntentIntegrator(this).initiateScan
true
case android.R.id.home =>
NavUtils.navigateUpFromSameTask(this)
true
case _ =>
super.onOptionsItemSelected(item)
}
/**
* Initiates adding the device as contact if it hasn't been added yet.
*/
override def onItemClick(parent: AdapterView[_], view: View, position: Int, id: Long): Unit =
addContact(adapter.getItem(position).address.toString)
/**
* Receives value of scanned QR code and sets it as device ID.
*/
override def onActivityResult(requestCode: Int, resultCode: Int, intent: Intent) {
val scanResult = IntentIntegrator.parseActivityResult(requestCode, resultCode, intent)
if (scanResult != null && resultCode == Activity.RESULT_OK) {
addContact(scanResult.getContents)
}
}
/**
* Parses the address, and shows a dialog to add the user as a contact.
*
* Displays a warning toast if the address is invalid or if the user is already a contact.
*/
private def addContact(address: String): Unit = {
val parsedAddress =
try {
new Address(address)
} catch {
case e: IllegalArgumentException =>
Toast.makeText(this, R.string.invalid_address, Toast.LENGTH_LONG).show()
return
}
val user = service.get.getUser(parsedAddress)
if (database.get.getContacts.map(_.address).contains(user.address)) {
val text = getString(R.string.contact_already_added, user.name)
Toast.makeText(this, text, Toast.LENGTH_SHORT).show()
return
}
new Builder(this)
.setMessage(getString(R.string.dialog_add_contact, user.name))
.setPositiveButton(android.R.string.yes, new OnClickListener {
override def onClick(dialog: DialogInterface, which: Int): Unit = {
service.get.addContact(user)
Toast.makeText(ConnectionsActivity.this, R.string.toast_contact_added, Toast.LENGTH_SHORT)
.show()
}
})
.setNegativeButton(android.R.string.no, null)
.show()
}
/**
* Fetches connections and displays them (excluding contacts).
*/
private val onContactsUpdatedReceiver = new BroadcastReceiver() {
override def onReceive(context: Context, intent: Intent): Unit = {
runOnUiThread(new Runnable {
override def run(): Unit = updateConnections()
})
}
}
private def updateConnections(): Unit = {
adapter.clear()
service.get.connections().map(a => service.get.getUser(a))
.foreach(adapter.add)
}
}
| Nutomic/ensichat | android/src/main/scala/com/nutomic/ensichat/activities/ConnectionsActivity.scala | Scala | mpl-2.0 | 5,147 |
package xbar.modifier.abilities
trait CanComplementU[X]
trait CanAdjoinU_[X]
trait CanSpecifyU[X] | hannahlindsley/xbar-dsl | xbar/src/main/scala/xbar/modifier/abilities/CanModifyU.scala | Scala | gpl-3.0 | 99 |
/*
* SPDX-License-Identifier: Apache-2.0
*
* Copyright 2015-2021 Andre White.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.truthencode.ddo.support.matching
/**
* Base trait for matching strings.
*/
trait StringMatchBounds extends MatchBounds {
val stringMatchOption: StringMatchOption
}
| adarro/ddo-calc | subprojects/common/ddo-util/src/main/scala/io/truthencode/ddo/support/matching/StringMatchBounds.scala | Scala | apache-2.0 | 824 |
package org.dsa.iot.rx.core
import org.dsa.iot.rx.RxTransformer
/**
* A shortcut for [[Count]] with a trivial predicate that always returns `true`, i.e. it
* counts all the items in the source sequence.
*/
class Length extends Count[Any] {
this.predicate <~ (_ => true)
}
/**
* Factory for [[Length]] instances.
*/
object Length {
/**
* Creates a new Length instance for either outputting running totals for each item,
* or just the final value.
*/
def apply(rolling: Boolean = true): Length = {
val block = new Length
block.rolling <~ rolling
block
}
} | IOT-DSA/dslink-scala-ignition | src/main/scala/org/dsa/iot/rx/core/Length.scala | Scala | apache-2.0 | 591 |
package ru.maizy.dislk.app.ui
/**
* Copyright (c) Nikita Kovaliov, maizy.ru, 2017
* See LICENSE.txt for details.
*/
import java.awt._
import java.net.URL
import javax.swing._
trait SwingUtils {
protected def createImage(path: String, description: String): Image = {
val imageURL: URL = getClass.getClassLoader.getResource(path)
if (imageURL == null) {
throw new Exception(s"Resource not found: $path")
} else {
new ImageIcon(imageURL, description).getImage
}
}
}
| maizy/dislk | dislk-app/src/main/scala/ru/maizy/dislk/app/ui/SwingUtils.scala | Scala | apache-2.0 | 502 |
/**
* Copyright 2015 Yahoo Inc. Licensed under the Apache License, Version 2.0
* See accompanying LICENSE file.
*/
package kafka.manager
import java.io.File
import java.{util => ju}
import javax.management._
import javax.management.remote.{JMXConnectorFactory, JMXServiceURL}
import scala.collection.JavaConverters._
import scala.util.matching.Regex
import com.yammer.metrics.reporting.JmxReporter.GaugeMBean
import kafka.manager.ActorModel.BrokerMetrics
import org.slf4j.LoggerFactory
import scala.util.{Failure, Try}
import scala.math
object KafkaJMX {
private[this] lazy val logger = LoggerFactory.getLogger(this.getClass)
private[this] val jmxConnectorProperties : java.util.Map[String, _] = {
import scala.collection.JavaConverters._
Map(
"jmx.remote.x.request.waiting.timeout" -> "3000",
"jmx.remote.x.notification.fetch.timeout" -> "3000",
"sun.rmi.transport.connectionTimeout" -> "3000",
"sun.rmi.transport.tcp.handshakeTimeout" -> "3000",
"sun.rmi.transport.tcp.responseTimeout" -> "3000"
).asJava
}
def doWithConnection[T](jmxHost: String, jmxPort: Int)(fn: MBeanServerConnection => T) : Try[T] = {
val urlString = s"service:jmx:rmi:///jndi/rmi://$jmxHost:$jmxPort/jmxrmi"
val url = new JMXServiceURL(urlString)
try {
require(jmxPort > 0, "No jmx port but jmx polling enabled!")
val jmxc = JMXConnectorFactory.connect(url, jmxConnectorProperties)
try {
Try {
fn(jmxc.getMBeanServerConnection)
}
} finally {
jmxc.close()
}
} catch {
case e: Exception =>
logger.error(s"Failed to connect to $urlString",e)
Failure(e)
}
}
}
object KafkaMetrics {
def getBytesInPerSec(kafkaVersion: KafkaVersion, mbsc: MBeanServerConnection, topicOption: Option[String] = None) = {
getBrokerTopicMeterMetrics(kafkaVersion, mbsc, "BytesInPerSec", topicOption)
}
def getBytesOutPerSec(kafkaVersion: KafkaVersion, mbsc: MBeanServerConnection, topicOption: Option[String] = None) = {
getBrokerTopicMeterMetrics(kafkaVersion, mbsc, "BytesOutPerSec", topicOption)
}
def getBytesRejectedPerSec(kafkaVersion: KafkaVersion, mbsc: MBeanServerConnection, topicOption: Option[String] = None) = {
getBrokerTopicMeterMetrics(kafkaVersion, mbsc, "BytesRejectedPerSec", topicOption)
}
def getFailedFetchRequestsPerSec(kafkaVersion: KafkaVersion, mbsc: MBeanServerConnection, topicOption: Option[String] = None) = {
getBrokerTopicMeterMetrics(kafkaVersion, mbsc, "FailedFetchRequestsPerSec", topicOption)
}
def getFailedProduceRequestsPerSec(kafkaVersion: KafkaVersion, mbsc: MBeanServerConnection, topicOption: Option[String] = None) = {
getBrokerTopicMeterMetrics(kafkaVersion, mbsc, "FailedProduceRequestsPerSec", topicOption)
}
def getMessagesInPerSec(kafkaVersion: KafkaVersion, mbsc: MBeanServerConnection, topicOption: Option[String] = None) = {
getBrokerTopicMeterMetrics(kafkaVersion, mbsc, "MessagesInPerSec", topicOption)
}
private def getBrokerTopicMeterMetrics(kafkaVersion: KafkaVersion, mbsc: MBeanServerConnection, metricName: String, topicOption: Option[String]) = {
getMeterMetric(mbsc, getObjectName(kafkaVersion, metricName, topicOption))
}
private def getSep(kafkaVersion: KafkaVersion) : String = {
kafkaVersion match {
case Kafka_0_8_1_1 => "\\""
case _ => ""
}
}
def getObjectName(kafkaVersion: KafkaVersion, name: String, topicOption: Option[String] = None) = {
val sep = getSep(kafkaVersion)
val topicAndName = kafkaVersion match {
case Kafka_0_8_1_1 =>
topicOption.map( topic => s"${sep}$topic-$name${sep}").getOrElse(s"${sep}AllTopics$name${sep}")
case _ =>
val topicProp = topicOption.map(topic => s",topic=$topic").getOrElse("")
s"$name$topicProp"
}
new ObjectName(s"${sep}kafka.server${sep}:type=${sep}BrokerTopicMetrics${sep},name=$topicAndName")
}
/* Gauge, Value : 0 */
private val replicaFetcherManagerMinFetchRate = new ObjectName(
"kafka.server:type=ReplicaFetcherManager,name=MinFetchRate,clientId=Replica")
/* Gauge, Value : 0 */
private val replicaFetcherManagerMaxLag = new ObjectName(
"kafka.server:type=ReplicaFetcherManager,name=MaxLag,clientId=Replica")
/* Gauge, Value : 0 */
private val kafkaControllerActiveControllerCount = new ObjectName(
"kafka.controller:type=KafkaController,name=ActiveControllerCount")
/* Gauge, Value : 0 */
private val kafkaControllerOfflinePartitionsCount = new ObjectName(
"kafka.controller:type=KafkaController,name=OfflinePartitionsCount")
/* Timer*/
private val logFlushStats = new ObjectName(
"kafka.log:type=LogFlushStats,name=LogFlushRateAndTimeMs")
/* Operating System */
private val operatingSystemObjectName = new ObjectName("java.lang:type=OperatingSystem")
/* Log Segments */
private val logSegmentObjectName = new ObjectName("kafka.log:type=Log,name=*-LogSegments")
private val directoryObjectName = new ObjectName("kafka.log:type=Log,name=*-Directory")
private val LogSegmentsNameRegex = new Regex("%s-LogSegments".format("""(.*)-(\\d*)"""), "topic", "partition")
private val DirectoryNameRegex = new Regex("%s-Directory".format("""(.*)-(\\d*)"""), "topic", "partition")
val LogSegmentRegex = new Regex(
"baseOffset=(.*), created=(.*), logSize=(.*), indexSize=(.*)",
"baseOffset", "created", "logSize", "indexSize"
)
private def getOSMetric(mbsc: MBeanServerConnection) = {
import scala.collection.JavaConverters._
try {
val attributes = mbsc.getAttributes(
operatingSystemObjectName,
Array("ProcessCpuLoad", "SystemCpuLoad")
).asList().asScala.toSeq
OSMetric(
getDoubleValue(attributes, "ProcessCpuLoad"),
getDoubleValue(attributes, "SystemCpuload")
)
} catch {
case _: InstanceNotFoundException => OSMetric(0D, 0D)
}
}
private def getMeterMetric(mbsc: MBeanServerConnection, name: ObjectName) = {
import scala.collection.JavaConverters._
try {
val attributeList = mbsc.getAttributes(name, Array("Count", "FifteenMinuteRate", "FiveMinuteRate", "OneMinuteRate", "MeanRate"))
val attributes = attributeList.asList().asScala.toSeq
MeterMetric(getLongValue(attributes, "Count"),
getDoubleValue(attributes, "FifteenMinuteRate"),
getDoubleValue(attributes, "FiveMinuteRate"),
getDoubleValue(attributes, "OneMinuteRate"),
getDoubleValue(attributes, "MeanRate"))
} catch {
case _: InstanceNotFoundException => MeterMetric(0,0,0,0,0)
}
}
private def getLongValue(attributes: Seq[Attribute], name: String) = {
attributes.find(_.getName == name).map(_.getValue.asInstanceOf[Long]).getOrElse(0L)
}
private def getDoubleValue(attributes: Seq[Attribute], name: String) = {
attributes.find(_.getName == name).map(_.getValue.asInstanceOf[Double]).getOrElse(0D)
}
private def topicAndPartition(name: String, regex: Regex) = {
try {
val matches = regex.findAllIn(name).matchData.toSeq
require(matches.size == 1)
val m = matches.head
val topic = m.group("topic")
val partition = m.group("partition").toInt
(topic, partition)
}
catch {
case e: Exception =>
throw new IllegalStateException("Can't parse topic and partition from: <%s>".format(name), e)
}
}
private def queryValues[K, V](
mbsc: MBeanServerConnection,
objectName: ObjectName,
keyConverter: String => K,
valueConverter: Object => V
) = {
val logsSizeObjectNames = mbsc.queryNames(objectName, null).asScala.toSeq
logsSizeObjectNames.par.map {
objectName => queryValue(mbsc, objectName, keyConverter, valueConverter)
}.seq.toSeq
}
private def queryValue[K, V](
mbsc: MBeanServerConnection,
objectName: ObjectName,
keyConverter: String => K,
valueConverter: Object => V
) = {
val name = objectName.getKeyProperty("name")
val mbean = MBeanServerInvocationHandler.newProxyInstance(mbsc, objectName, classOf[GaugeMBean], true)
(keyConverter(name), valueConverter(mbean.getValue))
}
private def parseLogSegment(str: String): LogSegment = {
try {
val matches = LogSegmentRegex.findAllIn(str).matchData.toSeq
require(matches.size == 1)
val m = matches.head
LogSegment(
baseOffset = m.group("baseOffset").toLong,
created = m.group("created").toLong,
logBytes = m.group("logSize").toLong,
indexBytes = m.group("indexSize").toLong
)
} catch {
case e: Exception =>
throw new IllegalStateException("Can't parse segment info from: <%s>".format(str), e)
}
}
def getLogSegmentsInfo(mbsc: MBeanServerConnection) = {
val logSegmentsMap = {
queryValues(
mbsc,
logSegmentObjectName,
key => topicAndPartition(key, LogSegmentsNameRegex),
value => {
val lst = value.asInstanceOf[ju.List[String]]
lst.asScala.map(parseLogSegment).toSeq
}
)
}.toMap
val directoryMap = {
queryValues(
mbsc,
directoryObjectName,
key => topicAndPartition(key, DirectoryNameRegex),
value => value.asInstanceOf[String]
)
}.toMap
val stats: Seq[(String, (Int, LogInfo))] = for (
key <- (logSegmentsMap.keySet ++ directoryMap.keySet).toSeq;
directory <- directoryMap.get(key);
logSegments <- logSegmentsMap.get(key)
) yield {
val directoryFile = new File(directory)
val dir = directoryFile.getParentFile.getAbsolutePath
val (topic, partition) = key
(topic, (partition, LogInfo(dir, logSegments)))
}
stats.groupBy(_._1).mapValues(_.map(_._2).toMap).toMap
}
// return broker metrics with segment metric only when it's provided. if not, it will contain segment metric with value 0L
def getBrokerMetrics(kafkaVersion: KafkaVersion, mbsc: MBeanServerConnection, segmentsMetric: Option[SegmentsMetric] = None, topic: Option[String] = None) : BrokerMetrics = {
BrokerMetrics(
KafkaMetrics.getBytesInPerSec(kafkaVersion, mbsc, topic),
KafkaMetrics.getBytesOutPerSec(kafkaVersion, mbsc, topic),
KafkaMetrics.getBytesRejectedPerSec(kafkaVersion, mbsc, topic),
KafkaMetrics.getFailedFetchRequestsPerSec(kafkaVersion, mbsc, topic),
KafkaMetrics.getFailedProduceRequestsPerSec(kafkaVersion, mbsc, topic),
KafkaMetrics.getMessagesInPerSec(kafkaVersion, mbsc, topic),
KafkaMetrics.getOSMetric(mbsc),
segmentsMetric.getOrElse(SegmentsMetric(0L))
)
}
}
case class GaugeMetric(value: Double)
case class OSMetric(processCpuLoad: Double,
systemCpuLoad: Double) {
def formatProcessCpuLoad = {
FormatMetric.rateFormat(processCpuLoad, 0)
}
def formatSystemCpuLoad = {
FormatMetric.rateFormat(systemCpuLoad, 0)
}
}
case class SegmentsMetric(bytes: Long) {
def +(o: SegmentsMetric) : SegmentsMetric = {
SegmentsMetric(o.bytes + bytes)
}
def formatSize = {
FormatMetric.sizeFormat(bytes)
}
}
case class MeterMetric(count: Long,
fifteenMinuteRate: Double,
fiveMinuteRate: Double,
oneMinuteRate: Double,
meanRate: Double) {
def formatFifteenMinuteRate = {
FormatMetric.rateFormat(fifteenMinuteRate, 0)
}
def formatFiveMinuteRate = {
FormatMetric.rateFormat(fiveMinuteRate, 0)
}
def formatOneMinuteRate = {
FormatMetric.rateFormat(oneMinuteRate, 0)
}
def formatMeanRate = {
FormatMetric.rateFormat(meanRate, 0)
}
def +(o: MeterMetric) : MeterMetric = {
MeterMetric(
o.count + count,
o.fifteenMinuteRate + fifteenMinuteRate,
o.fiveMinuteRate + fiveMinuteRate,
o.oneMinuteRate + oneMinuteRate,
o.meanRate + meanRate)
}
}
case class LogInfo(dir: String, logSegments: Seq[LogSegment]) {
val bytes = logSegments.map(_.bytes).sum
}
case class LogSegment(
baseOffset: Long,
created: Long,
logBytes: Long,
indexBytes: Long) {
val bytes = logBytes + indexBytes
}
object FormatMetric {
private[this] val UNIT = Array[Char]('k', 'm', 'b', 't')
// See: http://stackoverflow.com/a/4753866
def rateFormat(rate: Double, iteration: Int): String = {
if (rate < 100) {
BigDecimal(rate).setScale(2, BigDecimal.RoundingMode.HALF_UP).toString
} else {
val value = (rate.toLong / 100) / 10.0
val isRound: Boolean = (value * 10) % 10 == 0 //true if the decimal part is equal to 0 (then it's trimmed anyway)
if (value < 1000) {
//this determines the class, i.e. 'k', 'm' etc
if (value > 99.9 || isRound || (!isRound && value > 9.99)) {
//this decides whether to trim the decimals
value.toInt * 10 / 10 + "" + UNIT(iteration) // (int) value * 10 / 10 drops the decimal
}
else {
value + "" + UNIT(iteration)
}
}
else {
rateFormat(value, iteration + 1)
}
}
}
// See: http://stackoverflow.com/a/3758880
def sizeFormat(bytes: Long): String = {
val unit = 1000
if (bytes < unit) {
bytes + " B"
} else {
val exp = (math.log(bytes) / math.log(unit)).toInt
val pre = "kMGTPE".charAt(exp-1)
"%.1f %sB".format(bytes / math.pow(unit, exp), pre)
}
}
}
| metamx/kafka-manager | app/kafka/manager/KafkaJMX.scala | Scala | apache-2.0 | 13,479 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.ignite.spark.impl.optimization.accumulator
import org.apache.ignite.spark.impl.optimization.IgniteQueryContext
import org.apache.spark.sql.catalyst.expressions.{Expression, NamedExpression, SortOrder}
import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
/**
* Generic query info accumulator interface.
*/
private[apache] trait QueryAccumulator extends LogicalPlan {
/**
* @return Ignite query context.
*/
def igniteQueryContext: IgniteQueryContext
/**
* @return Generated output.
*/
def outputExpressions: Seq[NamedExpression]
/**
* @return Ordering info.
*/
def orderBy: Option[Seq[SortOrder]]
/**
* @param outputExpressions New output expressions.
* @return Copy of this accumulator with new output.
*/
def withOutputExpressions(outputExpressions: Seq[NamedExpression]): QueryAccumulator
/**
* @param orderBy New ordering.
* @return Copy of this accumulator with new order.
*/
def withOrderBy(orderBy: Seq[SortOrder]): QueryAccumulator
/**
* @return Copy of this accumulator with `limit` expression.
*/
def withLimit(limit: Expression): QueryAccumulator
/**
* @return Copy of this accumulator with `localLimit` expression.
*/
def withLocalLimit(localLimit: Expression): QueryAccumulator
/**
* @param prettyPrint If true human readable query will be generated.
* @return SQL query.
*/
def compileQuery(prettyPrint: Boolean = false, nestedQuery: Boolean = false): String
/**
* @return Qualifier that should be use to select data from this accumulator.
*/
def qualifier: String
/**
* All expressions are resolved when extra optimization executed.
*/
override lazy val resolved = true
}
| samaitra/ignite | modules/spark/src/main/scala/org/apache/ignite/spark/impl/optimization/accumulator/QueryAccumulator.scala | Scala | apache-2.0 | 2,651 |
package wdl
import org.scalatest.{FlatSpec, Matchers}
class AstSpec extends FlatSpec with Matchers {
val namespace = WdlNamespace.loadUsingSource(SampleWdl.ThreeStep.workflowSource(), None, None).get
"Parser" should "produce AST with 3 Task nodes" in {
AstTools.findAsts(namespace.ast, "Task").size shouldEqual 3
}
it should "produce AST with 1 Workflow node" in {
AstTools.findAsts(namespace.ast, "Workflow").size shouldEqual 1
}
it should "produce AST with 3 Call nodes in the Workflow node" in {
AstTools.findAsts(
AstTools.findAsts(namespace.ast, "Workflow").head,
"Call"
).size shouldEqual 3
}
}
| ohsu-comp-bio/cromwell | wdl/src/test/scala/wdl/AstSpec.scala | Scala | bsd-3-clause | 648 |
package mesosphere.marathon
package core.launcher.impl
import mesosphere.marathon.core.instance.Instance
import mesosphere.marathon.core.task.Task
import mesosphere.util.state.FrameworkId
import org.apache.mesos.{ Protos => MesosProtos }
object TaskLabels {
private[this] final val FRAMEWORK_ID_LABEL = "marathon_framework_id"
/**
* For backwards compatibility reasons, this is still a field containing a taskId. Reservations and persistent
* volumes from times before the introduction of instances are labeled with taskIds.
* In case a resident instance is relaunched, Marathon will keep the instanceId but launch a task with a new taskId.
* We can always derive the instanceId from the contained taskId.
*/
private[this] final val TASK_ID_LABEL = "marathon_task_id"
/**
* Returns an instance id for which this reservation has been performed if the reservation was
* labeled by this framework.
*/
def instanceIdForResource(frameworkId: FrameworkId, resource: MesosProtos.Resource): Option[Instance.Id] = {
val labels = ReservationLabels(resource)
val maybeMatchingFrameworkId = labels.get(FRAMEWORK_ID_LABEL).filter(_ == frameworkId.id)
def maybeInstanceId = labels.get(TASK_ID_LABEL).map(Task.Id(_).instanceId)
maybeMatchingFrameworkId.flatMap(_ => maybeInstanceId)
}
def labelsForTask(frameworkId: FrameworkId, taskId: Task.Id): ReservationLabels = {
ReservationLabels(Map(
FRAMEWORK_ID_LABEL -> frameworkId.id,
// This uses taskId.reservationId to match against the id that was originally used to create the reservation
TASK_ID_LABEL -> taskId.reservationId
))
}
def labelKeysForReservations: Set[String] = Set(FRAMEWORK_ID_LABEL, TASK_ID_LABEL)
}
| Caerostris/marathon | src/main/scala/mesosphere/marathon/core/launcher/impl/TaskLabels.scala | Scala | apache-2.0 | 1,753 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.admin
import junit.framework.Assert._
import org.apache.kafka.common.errors.InvalidTopicException
import org.apache.kafka.common.metrics.Quota
import org.apache.kafka.common.protocol.ApiKeys
import org.junit.Test
import java.util.Properties
import kafka.utils._
import kafka.log._
import kafka.zk.ZooKeeperTestHarness
import kafka.utils.{Logging, ZkUtils, TestUtils}
import kafka.common.{TopicExistsException, TopicAndPartition}
import kafka.server.{ConfigType, KafkaServer, KafkaConfig}
import java.io.File
import TestUtils._
import scala.collection.{Map, immutable}
class AdminTest extends ZooKeeperTestHarness with Logging {
@Test
def testReplicaAssignment() {
val brokerList = List(0, 1, 2, 3, 4)
// test 0 replication factor
intercept[AdminOperationException] {
AdminUtils.assignReplicasToBrokers(brokerList, 10, 0)
}
// test wrong replication factor
intercept[AdminOperationException] {
AdminUtils.assignReplicasToBrokers(brokerList, 10, 6)
}
// correct assignment
val expectedAssignment = Map(
0 -> List(0, 1, 2),
1 -> List(1, 2, 3),
2 -> List(2, 3, 4),
3 -> List(3, 4, 0),
4 -> List(4, 0, 1),
5 -> List(0, 2, 3),
6 -> List(1, 3, 4),
7 -> List(2, 4, 0),
8 -> List(3, 0, 1),
9 -> List(4, 1, 2))
val actualAssignment = AdminUtils.assignReplicasToBrokers(brokerList, 10, 3, 0)
val e = (expectedAssignment.toList == actualAssignment.toList)
assertTrue(expectedAssignment.toList == actualAssignment.toList)
}
@Test
def testManualReplicaAssignment() {
val brokers = List(0, 1, 2, 3, 4)
TestUtils.createBrokersInZk(zkUtils, brokers)
// duplicate brokers
intercept[IllegalArgumentException] {
AdminUtils.createOrUpdateTopicPartitionAssignmentPathInZK(zkUtils, "test", Map(0->Seq(0,0)))
}
// inconsistent replication factor
intercept[IllegalArgumentException] {
AdminUtils.createOrUpdateTopicPartitionAssignmentPathInZK(zkUtils, "test", Map(0->Seq(0,1), 1->Seq(0)))
}
// good assignment
val assignment = Map(0 -> List(0, 1, 2),
1 -> List(1, 2, 3))
AdminUtils.createOrUpdateTopicPartitionAssignmentPathInZK(zkUtils, "test", assignment)
val found = zkUtils.getPartitionAssignmentForTopics(Seq("test"))
assertEquals(assignment, found("test"))
}
@Test
def testTopicCreationInZK() {
val expectedReplicaAssignment = Map(
0 -> List(0, 1, 2),
1 -> List(1, 2, 3),
2 -> List(2, 3, 4),
3 -> List(3, 4, 0),
4 -> List(4, 0, 1),
5 -> List(0, 2, 3),
6 -> List(1, 3, 4),
7 -> List(2, 4, 0),
8 -> List(3, 0, 1),
9 -> List(4, 1, 2),
10 -> List(1, 2, 3),
11 -> List(1, 3, 4)
)
val leaderForPartitionMap = immutable.Map(
0 -> 0,
1 -> 1,
2 -> 2,
3 -> 3,
4 -> 4,
5 -> 0,
6 -> 1,
7 -> 2,
8 -> 3,
9 -> 4,
10 -> 1,
11 -> 1
)
val topic = "test"
TestUtils.createBrokersInZk(zkUtils, List(0, 1, 2, 3, 4))
// create the topic
AdminUtils.createOrUpdateTopicPartitionAssignmentPathInZK(zkUtils, topic, expectedReplicaAssignment)
// create leaders for all partitions
TestUtils.makeLeaderForPartition(zkUtils, topic, leaderForPartitionMap, 1)
val actualReplicaList = leaderForPartitionMap.keys.toArray.map(p => (p -> zkUtils.getReplicasForPartition(topic, p))).toMap
assertEquals(expectedReplicaAssignment.size, actualReplicaList.size)
for(i <- 0 until actualReplicaList.size)
assertEquals(expectedReplicaAssignment.get(i).get, actualReplicaList(i))
intercept[TopicExistsException] {
// shouldn't be able to create a topic that already exists
AdminUtils.createOrUpdateTopicPartitionAssignmentPathInZK(zkUtils, topic, expectedReplicaAssignment)
}
}
@Test
def testTopicCreationWithCollision() {
val topic = "test.topic"
val collidingTopic = "test_topic"
TestUtils.createBrokersInZk(zkUtils, List(0, 1, 2, 3, 4))
// create the topic
AdminUtils.createTopic(zkUtils, topic, 3, 1)
intercept[InvalidTopicException] {
// shouldn't be able to create a topic that collides
AdminUtils.createTopic(zkUtils, collidingTopic, 3, 1)
}
}
private def getBrokersWithPartitionDir(servers: Iterable[KafkaServer], topic: String, partitionId: Int): Set[Int] = {
servers.filter(server => new File(server.config.logDirs.head, topic + "-" + partitionId).exists)
.map(_.config.brokerId)
.toSet
}
@Test
def testPartitionReassignmentWithLeaderInNewReplicas() {
val expectedReplicaAssignment = Map(0 -> List(0, 1, 2))
val topic = "test"
// create brokers
val servers = TestUtils.createBrokerConfigs(4, zkConnect, false).map(b => TestUtils.createServer(KafkaConfig.fromProps(b)))
// create the topic
AdminUtils.createOrUpdateTopicPartitionAssignmentPathInZK(zkUtils, topic, expectedReplicaAssignment)
// reassign partition 0
val newReplicas = Seq(0, 2, 3)
val partitionToBeReassigned = 0
val topicAndPartition = TopicAndPartition(topic, partitionToBeReassigned)
val reassignPartitionsCommand = new ReassignPartitionsCommand(zkUtils, Map(topicAndPartition -> newReplicas))
assertTrue("Partition reassignment attempt failed for [test, 0]", reassignPartitionsCommand.reassignPartitions())
// wait until reassignment is completed
TestUtils.waitUntilTrue(() => {
val partitionsBeingReassigned = zkUtils.getPartitionsBeingReassigned().mapValues(_.newReplicas);
ReassignPartitionsCommand.checkIfPartitionReassignmentSucceeded(zkUtils, topicAndPartition, newReplicas,
Map(topicAndPartition -> newReplicas), partitionsBeingReassigned) == ReassignmentCompleted;
},
"Partition reassignment should complete")
val assignedReplicas = zkUtils.getReplicasForPartition(topic, partitionToBeReassigned)
// in sync replicas should not have any replica that is not in the new assigned replicas
checkForPhantomInSyncReplicas(zkUtils, topic, partitionToBeReassigned, assignedReplicas)
assertEquals("Partition should have been reassigned to 0, 2, 3", newReplicas, assignedReplicas)
ensureNoUnderReplicatedPartitions(zkUtils, topic, partitionToBeReassigned, assignedReplicas, servers)
TestUtils.waitUntilTrue(() => getBrokersWithPartitionDir(servers, topic, 0) == newReplicas.toSet,
"New replicas should exist on brokers")
servers.foreach(_.shutdown())
}
@Test
def testPartitionReassignmentWithLeaderNotInNewReplicas() {
val expectedReplicaAssignment = Map(0 -> List(0, 1, 2))
val topic = "test"
// create brokers
val servers = TestUtils.createBrokerConfigs(4, zkConnect, false).map(b => TestUtils.createServer(KafkaConfig.fromProps(b)))
// create the topic
AdminUtils.createOrUpdateTopicPartitionAssignmentPathInZK(zkUtils, topic, expectedReplicaAssignment)
// reassign partition 0
val newReplicas = Seq(1, 2, 3)
val partitionToBeReassigned = 0
val topicAndPartition = TopicAndPartition(topic, partitionToBeReassigned)
val reassignPartitionsCommand = new ReassignPartitionsCommand(zkUtils, Map(topicAndPartition -> newReplicas))
assertTrue("Partition reassignment failed for test, 0", reassignPartitionsCommand.reassignPartitions())
// wait until reassignment is completed
TestUtils.waitUntilTrue(() => {
val partitionsBeingReassigned = zkUtils.getPartitionsBeingReassigned().mapValues(_.newReplicas);
ReassignPartitionsCommand.checkIfPartitionReassignmentSucceeded(zkUtils, topicAndPartition, newReplicas,
Map(topicAndPartition -> newReplicas), partitionsBeingReassigned) == ReassignmentCompleted;
},
"Partition reassignment should complete")
val assignedReplicas = zkUtils.getReplicasForPartition(topic, partitionToBeReassigned)
assertEquals("Partition should have been reassigned to 0, 2, 3", newReplicas, assignedReplicas)
checkForPhantomInSyncReplicas(zkUtils, topic, partitionToBeReassigned, assignedReplicas)
ensureNoUnderReplicatedPartitions(zkUtils, topic, partitionToBeReassigned, assignedReplicas, servers)
TestUtils.waitUntilTrue(() => getBrokersWithPartitionDir(servers, topic, 0) == newReplicas.toSet,
"New replicas should exist on brokers")
servers.foreach(_.shutdown())
}
@Test
def testPartitionReassignmentNonOverlappingReplicas() {
val expectedReplicaAssignment = Map(0 -> List(0, 1))
val topic = "test"
// create brokers
val servers = TestUtils.createBrokerConfigs(4, zkConnect, false).map(b => TestUtils.createServer(KafkaConfig.fromProps(b)))
// create the topic
AdminUtils.createOrUpdateTopicPartitionAssignmentPathInZK(zkUtils, topic, expectedReplicaAssignment)
// reassign partition 0
val newReplicas = Seq(2, 3)
val partitionToBeReassigned = 0
val topicAndPartition = TopicAndPartition(topic, partitionToBeReassigned)
val reassignPartitionsCommand = new ReassignPartitionsCommand(zkUtils, Map(topicAndPartition -> newReplicas))
assertTrue("Partition reassignment failed for test, 0", reassignPartitionsCommand.reassignPartitions())
// wait until reassignment is completed
TestUtils.waitUntilTrue(() => {
val partitionsBeingReassigned = zkUtils.getPartitionsBeingReassigned().mapValues(_.newReplicas);
ReassignPartitionsCommand.checkIfPartitionReassignmentSucceeded(zkUtils, topicAndPartition, newReplicas,
Map(topicAndPartition -> newReplicas), partitionsBeingReassigned) == ReassignmentCompleted;
},
"Partition reassignment should complete")
val assignedReplicas = zkUtils.getReplicasForPartition(topic, partitionToBeReassigned)
assertEquals("Partition should have been reassigned to 2, 3", newReplicas, assignedReplicas)
checkForPhantomInSyncReplicas(zkUtils, topic, partitionToBeReassigned, assignedReplicas)
ensureNoUnderReplicatedPartitions(zkUtils, topic, partitionToBeReassigned, assignedReplicas, servers)
TestUtils.waitUntilTrue(() => getBrokersWithPartitionDir(servers, topic, 0) == newReplicas.toSet,
"New replicas should exist on brokers")
servers.foreach(_.shutdown())
}
@Test
def testReassigningNonExistingPartition() {
val topic = "test"
// create brokers
val servers = TestUtils.createBrokerConfigs(4, zkConnect, false).map(b => TestUtils.createServer(KafkaConfig.fromProps(b)))
// reassign partition 0
val newReplicas = Seq(2, 3)
val partitionToBeReassigned = 0
val topicAndPartition = TopicAndPartition(topic, partitionToBeReassigned)
val reassignPartitionsCommand = new ReassignPartitionsCommand(zkUtils, Map(topicAndPartition -> newReplicas))
assertFalse("Partition reassignment failed for test, 0", reassignPartitionsCommand.reassignPartitions())
val reassignedPartitions = zkUtils.getPartitionsBeingReassigned()
assertFalse("Partition should not be reassigned", reassignedPartitions.contains(topicAndPartition))
servers.foreach(_.shutdown())
}
@Test
def testResumePartitionReassignmentThatWasCompleted() {
val expectedReplicaAssignment = Map(0 -> List(0, 1))
val topic = "test"
// create the topic
AdminUtils.createOrUpdateTopicPartitionAssignmentPathInZK(zkUtils, topic, expectedReplicaAssignment)
// put the partition in the reassigned path as well
// reassign partition 0
val newReplicas = Seq(0, 1)
val partitionToBeReassigned = 0
val topicAndPartition = TopicAndPartition(topic, partitionToBeReassigned)
val reassignPartitionsCommand = new ReassignPartitionsCommand(zkUtils, Map(topicAndPartition -> newReplicas))
reassignPartitionsCommand.reassignPartitions
// create brokers
val servers = TestUtils.createBrokerConfigs(2, zkConnect, false).map(b => TestUtils.createServer(KafkaConfig.fromProps(b)))
// wait until reassignment completes
TestUtils.waitUntilTrue(() => !checkIfReassignPartitionPathExists(zkUtils),
"Partition reassignment should complete")
val assignedReplicas = zkUtils.getReplicasForPartition(topic, partitionToBeReassigned)
assertEquals("Partition should have been reassigned to 0, 1", newReplicas, assignedReplicas)
checkForPhantomInSyncReplicas(zkUtils, topic, partitionToBeReassigned, assignedReplicas)
// ensure that there are no under replicated partitions
ensureNoUnderReplicatedPartitions(zkUtils, topic, partitionToBeReassigned, assignedReplicas, servers)
TestUtils.waitUntilTrue(() => getBrokersWithPartitionDir(servers, topic, 0) == newReplicas.toSet,
"New replicas should exist on brokers")
servers.foreach(_.shutdown())
}
@Test
def testPreferredReplicaJsonData() {
// write preferred replica json data to zk path
val partitionsForPreferredReplicaElection = Set(TopicAndPartition("test", 1), TopicAndPartition("test2", 1))
PreferredReplicaLeaderElectionCommand.writePreferredReplicaElectionData(zkUtils, partitionsForPreferredReplicaElection)
// try to read it back and compare with what was written
val preferredReplicaElectionZkData = zkUtils.readData(ZkUtils.PreferredReplicaLeaderElectionPath)._1
val partitionsUndergoingPreferredReplicaElection =
PreferredReplicaLeaderElectionCommand.parsePreferredReplicaElectionData(preferredReplicaElectionZkData)
assertEquals("Preferred replica election ser-de failed", partitionsForPreferredReplicaElection,
partitionsUndergoingPreferredReplicaElection)
}
@Test
def testBasicPreferredReplicaElection() {
val expectedReplicaAssignment = Map(1 -> List(0, 1, 2))
val topic = "test"
val partition = 1
val preferredReplica = 0
// create brokers
val serverConfigs = TestUtils.createBrokerConfigs(3, zkConnect, false).map(KafkaConfig.fromProps)
// create the topic
AdminUtils.createOrUpdateTopicPartitionAssignmentPathInZK(zkUtils, topic, expectedReplicaAssignment)
val servers = serverConfigs.reverseMap(s => TestUtils.createServer(s))
// broker 2 should be the leader since it was started first
val currentLeader = TestUtils.waitUntilLeaderIsElectedOrChanged(zkUtils, topic, partition, oldLeaderOpt = None).get
// trigger preferred replica election
val preferredReplicaElection = new PreferredReplicaLeaderElectionCommand(zkUtils, Set(TopicAndPartition(topic, partition)))
preferredReplicaElection.moveLeaderToPreferredReplica()
val newLeader = TestUtils.waitUntilLeaderIsElectedOrChanged(zkUtils, topic, partition, oldLeaderOpt = Some(currentLeader)).get
assertEquals("Preferred replica election failed", preferredReplica, newLeader)
servers.foreach(_.shutdown())
}
@Test
def testShutdownBroker() {
val expectedReplicaAssignment = Map(1 -> List(0, 1, 2))
val topic = "test"
val partition = 1
// create brokers
val serverConfigs = TestUtils.createBrokerConfigs(3, zkConnect, false).map(KafkaConfig.fromProps)
val servers = serverConfigs.reverseMap(s => TestUtils.createServer(s))
// create the topic
TestUtils.createTopic(zkUtils, topic, partitionReplicaAssignment = expectedReplicaAssignment, servers = servers)
val controllerId = zkUtils.getController()
val controller = servers.find(p => p.config.brokerId == controllerId).get.kafkaController
var partitionsRemaining = controller.shutdownBroker(2)
var activeServers = servers.filter(s => s.config.brokerId != 2)
try {
// wait for the update metadata request to trickle to the brokers
TestUtils.waitUntilTrue(() =>
activeServers.forall(_.apis.metadataCache.getPartitionInfo(topic,partition).get.leaderIsrAndControllerEpoch.leaderAndIsr.isr.size != 3),
"Topic test not created after timeout")
assertEquals(0, partitionsRemaining.size)
var partitionStateInfo = activeServers.head.apis.metadataCache.getPartitionInfo(topic,partition).get
var leaderAfterShutdown = partitionStateInfo.leaderIsrAndControllerEpoch.leaderAndIsr.leader
assertEquals(0, leaderAfterShutdown)
assertEquals(2, partitionStateInfo.leaderIsrAndControllerEpoch.leaderAndIsr.isr.size)
assertEquals(List(0,1), partitionStateInfo.leaderIsrAndControllerEpoch.leaderAndIsr.isr)
partitionsRemaining = controller.shutdownBroker(1)
assertEquals(0, partitionsRemaining.size)
activeServers = servers.filter(s => s.config.brokerId == 0)
partitionStateInfo = activeServers.head.apis.metadataCache.getPartitionInfo(topic,partition).get
leaderAfterShutdown = partitionStateInfo.leaderIsrAndControllerEpoch.leaderAndIsr.leader
assertEquals(0, leaderAfterShutdown)
assertTrue(servers.forall(_.apis.metadataCache.getPartitionInfo(topic,partition).get.leaderIsrAndControllerEpoch.leaderAndIsr.leader == 0))
partitionsRemaining = controller.shutdownBroker(0)
assertEquals(1, partitionsRemaining.size)
// leader doesn't change since all the replicas are shut down
assertTrue(servers.forall(_.apis.metadataCache.getPartitionInfo(topic,partition).get.leaderIsrAndControllerEpoch.leaderAndIsr.leader == 0))
}
finally {
servers.foreach(_.shutdown())
}
}
/**
* This test creates a topic with a few config overrides and checks that the configs are applied to the new topic
* then changes the config and checks that the new values take effect.
*/
@Test
def testTopicConfigChange() {
val partitions = 3
val topic = "my-topic"
val server = TestUtils.createServer(KafkaConfig.fromProps(TestUtils.createBrokerConfig(0, zkConnect)))
def makeConfig(messageSize: Int, retentionMs: Long) = {
var props = new Properties()
props.setProperty(LogConfig.MaxMessageBytesProp, messageSize.toString)
props.setProperty(LogConfig.RetentionMsProp, retentionMs.toString)
props
}
def checkConfig(messageSize: Int, retentionMs: Long) {
TestUtils.retry(10000) {
for(part <- 0 until partitions) {
val logOpt = server.logManager.getLog(TopicAndPartition(topic, part))
assertTrue(logOpt.isDefined)
assertEquals(retentionMs, logOpt.get.config.retentionMs)
assertEquals(messageSize, logOpt.get.config.maxMessageSize)
}
}
}
try {
// create a topic with a few config overrides and check that they are applied
val maxMessageSize = 1024
val retentionMs = 1000*1000
AdminUtils.createTopic(server.zkUtils, topic, partitions, 1, makeConfig(maxMessageSize, retentionMs))
checkConfig(maxMessageSize, retentionMs)
// now double the config values for the topic and check that it is applied
val newConfig: Properties = makeConfig(2*maxMessageSize, 2 * retentionMs)
AdminUtils.changeTopicConfig(server.zkUtils, topic, makeConfig(2*maxMessageSize, 2 * retentionMs))
checkConfig(2*maxMessageSize, 2 * retentionMs)
// Verify that the same config can be read from ZK
val configInZk = AdminUtils.fetchEntityConfig(server.zkUtils, ConfigType.Topic, topic)
assertEquals(newConfig, configInZk)
} finally {
server.shutdown()
server.config.logDirs.foreach(CoreUtils.rm(_))
}
}
/**
* This test simulates a client config change in ZK whose notification has been purged.
* Basically, it asserts that notifications are bootstrapped from ZK
*/
@Test
def testBootstrapClientIdConfig() {
val clientId = "my-client"
val props = new Properties()
props.setProperty("producer_byte_rate", "1000")
props.setProperty("consumer_byte_rate", "2000")
// Write config without notification to ZK.
val configMap = Map[String, String] ("producer_byte_rate" -> "1000", "consumer_byte_rate" -> "2000")
val map = Map("version" -> 1, "config" -> configMap)
zkUtils.updatePersistentPath(ZkUtils.getEntityConfigPath(ConfigType.Client, clientId), Json.encode(map))
val configInZk: Map[String, Properties] = AdminUtils.fetchAllEntityConfigs(zkUtils, ConfigType.Client)
assertEquals("Must have 1 overriden client config", 1, configInZk.size)
assertEquals(props, configInZk(clientId))
// Test that the existing clientId overrides are read
val server = TestUtils.createServer(KafkaConfig.fromProps(TestUtils.createBrokerConfig(0, zkConnect)))
try {
assertEquals(new Quota(1000, true), server.apis.quotaManagers(ApiKeys.PRODUCE.id).quota(clientId))
assertEquals(new Quota(2000, true), server.apis.quotaManagers(ApiKeys.FETCH.id).quota(clientId))
} finally {
server.shutdown()
server.config.logDirs.foreach(CoreUtils.rm(_))
}
}
}
| samaitra/kafka | core/src/test/scala/unit/kafka/admin/AdminTest.scala | Scala | apache-2.0 | 21,610 |
package mesosphere.marathon
package core.launcher
import org.rogach.scallop.ScallopConf
trait OfferProcessorConfig extends ScallopConf {
lazy val offerMatchingTimeout = opt[Int](
"offer_matching_timeout",
descr = "Offer matching timeout (ms). Stop trying to match additional tasks for this offer after this time.",
default = Some(1000))
lazy val saveTasksToLaunchTimeout = opt[Int](
"save_tasks_to_launch_timeout",
descr = "Timeout (ms) after matching an offer for saving all matched tasks that we are about to launch. " +
"When reaching the timeout, only the tasks that we could save within the timeout are also launched. " +
"All other task launches are temporarily rejected and retried later.",
default = Some(3000))
lazy val declineOfferDuration = opt[Long](
"decline_offer_duration",
descr = "(Default: 120 seconds) " +
"The duration (milliseconds) for which to decline offers by default",
default = Some(120000))
}
| natemurthy/marathon | src/main/scala/mesosphere/marathon/core/launcher/OfferProcessorConfig.scala | Scala | apache-2.0 | 986 |
package com.github.bomgar.sns.domain
abstract class PagedResult {
def nextPageToken: Option[String]
}
| mathissimo/reactive-aws | sns/src/main/scala/com/github/bomgar/sns/domain/PagedResult.scala | Scala | apache-2.0 | 105 |
package test
import edu.mit.csail.cap.query._
class EclipseFold extends SynthesisTest {
override def default = super.default.copy(
PrintStrings = false,
PrintPrimitives = true)
def metadata = "meta_eclipse"
test("JDT fold") {
run("eclipse_jdt_fold", "toggleExpansionState", default.copy(CoverDepth = 8))
}
test("Py fold") {
run("eclipse_py_fold", "toggleExpansionState", default.copy(CoverDepth = 6))
}
test("ANT fold") {
run("eclipse_ant_fold", "toggleExpansionState")
}
test("TeX fold") {
run("eclipse_tex_fold", "toggleExpansionState")
}
}
| kyessenov/semeru | src/test/scala/test/EclipseFold.scala | Scala | gpl-3.0 | 593 |
package com.weez.mercury.macros
import scala.language.experimental.macros
import scala.reflect.macros.whitebox
/*
class dbtype() extends StaticAnnotation {
def macroTransform(annottees: Any*): Any = macro DBTypeMacro.impl
}*/
class DBTypeMacro(val c: whitebox.Context) extends MacroHelper {
import c.universe._
def impl(annottees: c.Tree*): c.Tree = {
def ensureEntityAsSuperType(parents: List[Tree]) = {
val entityType = tq"_root_.com.weez.mercury.common.Entity"
findInherit(parents, entityType) match {
case Some(_) => parents
case None => parents :+ entityType
}
}
def ensureIdParam(paramss: List[List[Tree]]) = {
findParam(paramss, "id", Some(tq"Long")) match {
case Some(_) => paramss
case None =>
val mods = Modifiers(Flag.CASEACCESSOR | Flag.PARAMACCESSOR)
(q"$mods val id: Long" :: paramss.head) :: paramss.tail
}
}
def addExtraApply(paramss: List[List[Tree]], name: TypeName) = {
val argss =
paramss map { params =>
params map {
case q"$mod val $name: $tpe = $rhs" =>
ValDef(Modifiers(Flag.PARAM), name, tpe, rhs)
}
}
var applyArgss =
argss map { params =>
params map {
case ValDef(_, name, _, _) => Ident(name): Tree
}
}
applyArgss = (Literal(Constant(0L)) :: applyArgss.head) :: applyArgss.tail
q"def apply(...$argss): ${Ident(name)} = apply(...$applyArgss)"
}
withException {
annottees.head match {
case q"$mods class $name[..$tparams](...$paramss) extends ..$parents { ..$body }" if mods.hasFlag(Flag.CASE) =>
val caseClassParents = ensureEntityAsSuperType(parents)
val caseClassParams = ensureIdParam(paramss)
val dbObjectType = tq"_root_.com.weez.mercury.common.DBObjectType[$name]"
val companionType =
if (caseClassParams.length == 1) {
val functionType = makeFunctionTypeWithParamList(caseClassParams, Ident(name))
functionType :: dbObjectType :: Nil
} else
dbObjectType :: Nil
val packer =
if (caseClassParams.length == 1)
q"this"
else
flattenFunction(q"apply", caseClassParams)
var companionBody =
q"def nameInDB = ${Literal(Constant(camelCase2sepStyle(name.toString)))}" ::
q"implicit val packer = _root_.com.weez.mercury.common.Packer($packer)" :: Nil
if (paramss.head.length < caseClassParams.head.length)
companionBody = addExtraApply(paramss, name) :: companionBody
val tree = q"""
$mods class $name(...$caseClassParams) extends ..$caseClassParents { ..$body }
object ${name.toTermName} extends ..$companionType { ..$companionBody }
"""
//println(show(tree))
tree
case _ => throw new PositionedException(annottees.head.pos, "expect case class")
}
}
}
}
| weeztech/weez-mercury | macros/src/main/scala/com/weez/mercury/macros/DBTypeMacro.scala | Scala | apache-2.0 | 3,055 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.rdd
import org.apache.spark.{TaskContext, Partition}
private[spark]
class FlatMappedValuesRDD[K, V, U](prev: RDD[_ <: Product2[K, V]], f: V => TraversableOnce[U])
extends RDD[(K, U)](prev) {
override def getPartitions = firstParent[Product2[K, V]].partitions
override val partitioner = firstParent[Product2[K, V]].partitioner
override def compute(split: Partition, context: TaskContext) = {
firstParent[Product2[K, V]].iterator(split, context).flatMap { case Product2(k, v) =>
f(v).map(x => (k, x))
}
}
}
| mkolod/incubator-spark | core/src/main/scala/org/apache/spark/rdd/FlatMappedValuesRDD.scala | Scala | apache-2.0 | 1,362 |
package oo
/**
* OO Interpreter with inheritance. The case class Class is extended by
* a field containing its super class. For method calls and field access
* one has to lookup the path up to the super class and return the first
* catch.
*/
object OOWithInheritanceInterp extends App {
sealed abstract class Expr
case class New(className: Symbol, fields: List[Expr]) extends Expr
case class FAcc(objExpr: Expr, fieldName: Symbol) extends Expr
case class Invoke(objExpr: Expr, methodName: Symbol, args: List[Expr]) extends Expr
case class Id(id: Symbol) extends Expr
case class Class(
// Extended super class
superClass: Symbol,
fields: List[Symbol],
methods: Map[Symbol, Method])
case class Method(params: List[Symbol], body: Expr)
sealed abstract class Value
case class Object(className: Symbol, fields: List[Value]) extends Value
type Env = Map[Symbol, Value]
/**
* Returns all fields in the path from the root to className in the
* inheritance tree
*/
def lookupField(
fieldName: Symbol,
className: Symbol,
fieldVals: List[Value],
classes: Map[Symbol, Class]): Value = className match {
case 'Object => sys.error("Unknown field %s".format(fieldName))
case _ => {
val clazz = classes.getOrElse(className, sys.error("Unknown class %s".format(className)))
val index = clazz.fields.indexOf(fieldName)
if (index >= 0)
fieldVals(index)
else
lookupField(fieldName, clazz.superClass, fieldVals.drop(clazz.fields.size), classes)
}
}
/**
* Returns the first method found in the path from className to the root in
* the inheritance tree or None
*/
def lookupMethod(
methodName: Symbol,
className: Symbol,
classes: Map[Symbol, Class]): Option[Method] = className match {
// We reached the top and found no method ('Object is root of all classes)
case 'Object => None
case _ => {
// Retrieve Class definition for className
val clazz = classes.getOrElse(
className,
sys.error("Unknown class %s".format(className)))
// Search for method. If not present, search in super class
if (clazz.methods.contains(methodName)) Some(clazz.methods(methodName))
else lookupMethod(methodName, clazz.superClass, classes)
}
}
def interp(e: Expr, env: Env, classes: Map[Symbol, Class]): Value = e match {
case New(className, args) => {
if (!classes.contains(className))
sys.error("Can not initialize unknown class %s".format(className))
Object(className, args map { interp(_, env, classes) })
}
case FAcc(objExpr, fieldName) => {
val maybeObj = interp(objExpr, env, classes)
maybeObj match {
case Object(className, fields) =>
lookupField(fieldName, className, fields, classes)
case _ => sys.error("Expected object, but got %s".format(maybeObj))
}
}
case Invoke(objExpr, methodName, args) => {
val maybeObj = interp(objExpr, env, classes)
maybeObj match {
case Object(className, fieldVals) => {
val method = lookupMethod(methodName, className, classes) getOrElse
sys.error("Unknown method %s for class %s".format(methodName, className))
val argVals = args map { interp(_, env, classes) }
val argBindings = method.params zip argVals
val thisBinding = 'this -> maybeObj
val newEnv = Map() ++ argBindings + thisBinding
interp(method.body, newEnv, classes)
}
case _ => sys.error("Expected object, but got %s".format(maybeObj))
}
}
case Id(id) => env(id)
}
val testclasses = Map(
'True -> Class('Object, List.empty, Map(
'ifThenElse -> Method(List('thenExp, 'elseExp), Id('thenExp)),
'and -> Method(List('x), Id('x)))),
'False -> Class('Object, List.empty, Map(
'ifThenElse -> Method(List('thenExp, 'elseExp), Id('elseExp)),
'and -> Method(List('x), Id('this)))),
'Food -> Class('Object, List('organic), Map(
'tastesBetterThan ->
Method(
// Parameter for tastesBetterThan
List('other),
// Body of method
// Invoke(objExpr, methodName, args: List)
// Rufe auf objExpr methodName mit args auf
// Rufe auf this das Feld organic ab, organic ist True/False class Binding entsteht spΓ€ter
Invoke(FAcc(Id('this), 'organic),
'ifThenElse,
List(
// Parameter for ifThenElse method see above
New('True, List.empty), // thenExpr
FAcc(Id('otherFood), 'organic)))))), // elseExpr
'Pizza -> Class('Food, List('hasCheese), Map(
'tastesBetterThan ->
Method(
List('other),
Invoke(
// Call on organic (True, False class) and method with parameter hashCheese on this object
FAcc(Id('this), 'organic),
'and, List(FAcc(Id('this), 'hasCheese)))))))
val testRes =
interp(
Invoke(
New('Pizza, List(New('True, List.empty), New('True, List.empty))),
'tastesBetterThan,
List(New('Food, List(New('True, List.empty))))),
Map(), testclasses)
println(testRes)
assert(
testRes
==
Object('True, List.empty))
}
| Tooa/interpreters | src/V6/OOWithInheritanceInterp.scala | Scala | apache-2.0 | 5,337 |
package org.scalamacros.xml
import scala.reflect.api.Universe
trait Unliftables extends Nodes {
protected val __universe: Universe
import __universe._
import __universe.internal.reificationSupport.{SyntacticBlock => SynBlock}
object XML {
private val xmlpackage = rootMirror.staticPackage("scala.xml")
def unapply(tree: Tree) = tree match {
case q"_root_.scala.xml" => true
case tq"_root_.scala.xml" => true
case rt: RefTree if rt.symbol == xmlpackage => true
case _ => false
}
}
implicit val UnliftComment = Unliftable[xml.Comment] {
case q"new ${XML()}.Comment(${text: String})" => xml.Comment(text)
}
implicit val UnliftText = Unliftable[xml.Text] {
case q"new ${XML()}.Text(${text: String})" => xml.Text(text)
}
implicit val UnliftEntityRef = Unliftable[xml.EntityRef] {
case q"new ${XML()}.EntityRef(${name: String})" => xml.EntityRef(name)
}
implicit val UnliftProcInstr = Unliftable[xml.ProcInstr] {
case q"new ${XML()}.ProcInstr(${target: String}, ${proctext: String})" =>
xml.ProcInstr(target, proctext)
}
implicit val UnliftUnparsed = Unliftable[xml.Unparsed] {
case q"new ${XML()}.Unparsed(${data: String})" => xml.Unparsed(data)
}
implicit val UnliftPCData = Unliftable[xml.PCData] {
case q"new ${XML()}.PCData(${data: String})" => xml.PCData(data)
}
// extract string literal or null
private object Str {
def unapply(tree: Tree): Option[String] = tree match {
case Literal(Constant(s: String)) => Some(s)
case Literal(Constant(null)) => Some(null)
case _ => None
}
}
private def withRetreat[T](f: (() => Nothing) => T)(orElse: => T): T = {
class Stop extends Exception
try f(() => throw new Stop) catch {
case _: Stop => orElse
}
}
private object DDScope {
def unapply(tree: Tree) = tree match {
case q"$$scope" => true
case q"${XML()}.$$scope" => true
case _ => false
}
}
private object Scoped {
def unapply(tree: Tree)(implicit outer: xml.NamespaceBinding): Option[(xml.NamespaceBinding, Tree)] = tree match {
case q"""
var $$tmpscope: ${XML()}.NamespaceBinding = ${DDScope()}
..$scopes
${SynBlock(q"val $$scope: ${XML()}.NamespaceBinding = $$tmpscope" :: last)}
""" =>
withRetreat { retreat =>
Some((scopes.foldLeft[xml.NamespaceBinding](outer) {
case (ns, q"$$tmpscope = new ${XML()}.NamespaceBinding(${Str(prefix)}, ${uri: String}, $$tmpscope)") =>
xml.NamespaceBinding(prefix, uri, ns)
case _ =>
retreat()
}, q"..$last"))
} {
Some((outer, tree))
}
case q"..$stats" =>
Some((outer, q"..$stats"))
}
}
// extract a sequence of $md = FooAttribute(..., $md) as metadata
private object Attributed {
def unapply(tree: Tree)(implicit outer: xml.NamespaceBinding): Option[(xml.MetaData, Tree)] = tree match {
case q"""
var $$md: ${XML()}.MetaData = ${XML()}.Null
..$attributes
$last
""" =>
withRetreat { retreat =>
Some((attributes.foldLeft[xml.MetaData](xml.Null) {
case (md, q"$$md = new ${XML()}.UnprefixedAttribute(${key: String}, ${value: xml.Node}, $$md)") =>
new xml.UnprefixedAttribute(key, value, md)
case (md, q"$$md = new ${XML()}.UnprefixedAttribute(${key: String}, $expr, $$md)") =>
new xml.UnprefixedAttribute(key, Unquote(expr), md)
case (md, q"$$md = new ${XML()}.PrefixedAttribute(${pre: String}, ${key: String}, ${value: xml.Node}, $$md)") =>
new xml.PrefixedAttribute(pre, key, value, md)
case (md, q"$$md = new ${XML()}.PrefixedAttribute(${pre: String}, ${key: String}, $expr, $$md)") =>
new xml.PrefixedAttribute(pre, key, Unquote(expr), md)
case _ =>
retreat()
}, last))
} {
Some((xml.Null, tree))
}
case q"..$stats" =>
Some((xml.Null, q"..$stats"))
}
}
// extract a seq of nodes from mutable nodebuffer-based construction
private object Children {
def unapply(children: List[Tree])(implicit outer: xml.NamespaceBinding): Option[Seq[xml.Node]] = children match {
case Nil => Some(Nil)
case q"{ val $$buf = new ${XML()}.NodeBuffer; ..$additions; $$buf }: _*" :: Nil =>
try Some(additions.map {
case q"$$buf &+ ${node: xml.Node}" => node
case q"$$buf &+ $unquote" => Unquote(unquote)
}) catch {
case _: MatchError => None
}
case _ => None
}
}
private def correspondsAttrRef(attrs: xml.MetaData, attrref: Tree): Boolean = (attrs, attrref) match {
case (xml.Null, q"${XML()}.Null") => true
case (metadata, q"$$md") if metadata.nonEmpty => true
case _ => false
}
implicit def UnliftElem(implicit outer: xml.NamespaceBinding = xml.TopScope): Unliftable[xml.Elem] = new Unliftable[xml.Elem] {
def unapply(tree: Tree): Option[xml.Elem] = {
val Scoped(scope, inner) = tree;
{
val outer = 'shadowed
implicit val current = scope
inner match {
case Attributed(attrs,
q"new ${XML()}.Elem(${Str(prefix)}, ${Str(label)}, $attrref, ${DDScope()}, ${minimizeEmpty: Boolean}, ..${Children(children)})")
if correspondsAttrRef(attrs, attrref) =>
Some(xml.Elem(prefix, label, attrs, scope, minimizeEmpty, children: _*))
case _ =>
None
}
}
}
}
implicit val UnliftAtom = Unliftable[xml.Atom[String]] {
case UnliftPCData(pcdata) => pcdata
case UnliftText(text) => text
case UnliftUnparsed(unparsed) => unparsed
}
implicit val UnliftSpecialNode: Unliftable[xml.SpecialNode] = Unliftable[xml.SpecialNode] {
case UnliftAtom(atom) => atom
case UnliftComment(comment) => comment
case UnliftProcInstr(procinstr) => procinstr
case UnliftEntityRef(entityref) => entityref
}
implicit def UnliftNode(implicit outer: xml.NamespaceBinding = xml.TopScope): Unliftable[xml.Node] = Unliftable[xml.Node] {
case q"${elem: xml.Elem}" => elem
case UnliftSpecialNode(snode) => snode
}
}
| scalamacros/xml | xml/src/main/scala/Unliftables.scala | Scala | bsd-3-clause | 6,566 |
package com.criteo.qwebmon
trait DbDriver {
def name: String
def runningQueries: Seq[RunningQuery]
}
case class RunningQuery(user: String, runSeconds: Int, query: String, hostname: String) | jqcoffey/qwebmon | src/main/scala/com/criteo/qwebmon/DbDriver.scala | Scala | apache-2.0 | 197 |
package doodle
package jvm
import java.awt.{Font => JFont}
import java.awt.font.{FontRenderContext, TextLayout}
import doodle.core.font._
import doodle.backend.BoundingBox
final case class FontMetrics(context: FontRenderContext) {
def boundingBox(font: Font, characters: String): BoundingBox = {
val jFont = FontMetrics.toJFont(font)
// This bounding box has its origin at the top left corner of the text. We
// move it so it is in the center of the text, in keeping with the rest of
// Doodle's builtins.
val jBox = new TextLayout(characters, jFont, context).getBounds
val bb = BoundingBox(jBox.getMinX, jBox.getMaxY, jBox.getMaxX, jBox.getMinY)
BoundingBox(- bb.width/2, bb.height/2, bb.width/2, - bb.height/2)
}
}
object FontMetrics {
import FontFamily._
import FontFace._
import FontSize._
def toJFont(font: Font): JFont =
font match {
case Font(family, face, size) =>
val jFamily =
family match {
case Serif => JFont.SERIF
case SansSerif => JFont.SANS_SERIF
case Monospaced => JFont.MONOSPACED
case Named(name) => name
}
val jStyle =
face match {
case Bold => JFont.BOLD
case Italic => JFont.ITALIC
case Normal => JFont.PLAIN
}
val jSize =
size match {
case Points(pts) => pts
}
new JFont(jFamily, jStyle, jSize)
}
}
| Angeldude/doodle | jvm/src/main/scala/doodle/jvm/FontMetrics.scala | Scala | apache-2.0 | 1,471 |
package com.estus.distribution
import routine._
import org.apache.commons.math3.distribution.LaplaceDistribution
object laplace {
/** *
*
* Laplace Distribution
*
* d_ - probability density function
* p_ - cumulative density function
* q_ - inverse cumulative density function
* r_ - random number from that distribution
*
*/
private def dlaplace_internal(q: Double, log_p: Boolean, dist: LaplaceDistribution): Double = {
(dist.getLocation, dist.getScale) match {
case (a, b) if a.isNaN || b.isNaN || q.isNaN => Double.NaN
case _ =>
if(log_p) dist.logDensity(q) else dist.density(q)
}
}
def dlaplace(q: Double, location: Double, scale: Double, log_p: Boolean): Double = {
val dist = new LaplaceDistribution(location, scale)
dlaplace_internal(q, log_p, dist)
}
def dlaplace(q: List[Double], location: Double, scale: Double, log_p: Boolean): List[Double] = {
if(q.isEmpty) throw new IllegalArgumentException
val dist = new LaplaceDistribution(location, scale)
q.map(tup => dlaplace_internal(tup, log_p, dist))
}
private def plaplace_internal(q: Double, lower_tail: Boolean, log_p: Boolean, dist: LaplaceDistribution): Double = {
val a = dist.getLocation
val b = dist.getScale
if(q.isNaN || a.isNaN || b.isNaN) throw new IllegalArgumentException
val cumprob = dist.cumulativeProbability(q)
(lower_tail, log_p) match {
case (true, false) => cumprob
case (true, true) => math.log(cumprob)
case (false, false) => 1 - cumprob
case (false, true) => math.log(1 - cumprob)
}
}
def plaplace(q: Double, location: Double, scale: Double, lower_tail: Boolean, log_p: Boolean): Double = {
val dist = new LaplaceDistribution(location, scale)
plaplace_internal(q, lower_tail, log_p, dist)
}
def plaplace(q: List[Double], location: Double, scale: Double, lower_tail: Boolean, log_p: Boolean): List[Double] = {
if(q.isEmpty) throw new IllegalArgumentException
val dist = new LaplaceDistribution(location, scale)
q.map(tup => plaplace_internal(tup, lower_tail, log_p, dist))
}
private def qlaplace_internal(p: Double, lower_tail: Boolean, log_p: Boolean, dist: LaplaceDistribution): Double = {
val p_tmp = (lower_tail, log_p) match {
case (true, false) => p
case (true, true) => math.exp(p)
case (false, false) => 1 - p
case (false, true) => 1 - math.exp(p)
}
dist.inverseCumulativeProbability(p_tmp)
}
def qlaplace(p: Double, location: Double, scale: Double, lower_tail: Boolean, log_p: Boolean): Double = {
val dist = new LaplaceDistribution(location, scale)
qlaplace_internal(p, lower_tail, log_p, dist)
}
def qlaplace(p: List[Double], location: Double, scale: Double, lower_tail: Boolean, log_p: Boolean): List[Double] = {
if(p.isEmpty) throw new IllegalArgumentException
val dist = new LaplaceDistribution(location, scale)
p.map(tup => qlaplace_internal(tup, lower_tail, log_p, dist))
}
def rlaplace(n: Int, location: Double, scale: Double): List[Double] = {
if(n < 1 || (location + scale).isNaN) throw new IllegalArgumentException
val dist = new LaplaceDistribution(location, scale)
List.fill(n)(dist.sample)
}
}
| EstusDev/Estus | estus-distribution/src/main/scala/laplace.scala | Scala | apache-2.0 | 3,276 |
package models
import java.io.InputStreamReader
import java.io.ByteArrayInputStream
import play.api.Play.current
import play.api.libs.json._
import play.api.Logger
import play.api.mvc.Request
import com.typesafe.config.ConfigRenderOptions
import com.google.api.client.json.jackson2.JacksonFactory
import com.google.api.client.googleapis.javanet.GoogleNetHttpTransport
import com.google.api.client.googleapis.auth.oauth2.{GoogleClientSecrets, GoogleAuthorizationCodeFlow, GoogleRefreshTokenRequest, GoogleTokenResponse}
import com.google.api.client.auth.oauth2.Credential
/**
* The OAuth2 object contains all methods and value necessary for communicating
* with Google's OAuth end points. This baby is definitely a bit more raw than
* the rest of the application, as it is doing a lot of the config work.
*/
object OAuth2 {
/**
* retrieves config object from application conf as a
* JSON Object.
*/
private val config: JsObject = current.configuration.getObject("google-oauth-2") match {
case Some(configObj) =>
Logger.info("Successfully found OAuth 2.0 configuration")
Json.parse(
configObj.render(
ConfigRenderOptions.concise()
)
).as[JsObject]
case None =>
Logger.error("Error finding OAuth 2.0 configuration")
Json.obj()
}
/**
* Serverside credentials for refresh process.
*/
private val CLIENT_ID : String = (config \\ "client_id").head.as[String]
private val CLIENT_SECRET : String = (config \\ "client_secret").head.as[String]
/**
* Scopes, JSON Factory, and HTTP Transport for token
* request process.
*
* SCOPES: The one thing that you might have to
* change, should you want to access different services
* that require OAuth.
*/
private val JSON_FACTORY = JacksonFactory.getDefaultInstance
private val httpTransport = GoogleNetHttpTransport.newTrustedTransport
private val SCOPES = java.util.Arrays.asList(
"https://docs.google.com/feeds",
"https://spreadsheets.google.com/feeds"
)
/**
* Transforming config object into a InputStream, reads it in, and loads
* the object as a GoogleClientSecrets objects.
*/
private val configBytes = new ByteArrayInputStream(Json.stringify(config).getBytes)
private val configStream = new InputStreamReader(configBytes)
private val secrets: GoogleClientSecrets = GoogleClientSecrets.load(JSON_FACTORY, configStream)
/**
* This is the thing that actually communicates with authentication
* severs and returns tokens. It is where all the magic happens.
*
* SCOPES: The one thing that you might have to change, should we
* want to access different services that require OAuth.
*/
val flow = new GoogleAuthorizationCodeFlow.Builder(
httpTransport,
JSON_FACTORY,
secrets,
SCOPES
).setAccessType("offline").build
/**
* Takes raw credential data as basic datatypes and returns a Google
* Credential object composed of the raw credentials.
*
* @param accessToken Temporary token retrieved via the refresh token.
* This token is what actually allows us access.
* @param refreshToken Permanent token retrieved at the time of initial
* authentication. This is what we use to retrieve
* new access tokens.
* @param expiresAt All tokens expires after 3600 seconds.
* @return A Google credential object for accessing client services.
*/
def apply(accessToken: String, refreshToken: String, expiresAt: Long): Credential = {
val tokenPart = new GoogleTokenResponse()
val tokenPartOne = tokenPart.setAccessToken(accessToken)
val tokenPartTwo = tokenPartOne.setExpiresInSeconds(expiresAt)
val tokenPartThree = tokenPartTwo.setRefreshToken(refreshToken)
flow.createAndStoreCredential(tokenPartThree, null)
}
/**
* Refreshes a credential. Returns an empty one if the refresh process fails.
* @param cred The expired Credential.
* @return A fresh Credential.
*/
def refresh(cred: Credential): Credential = {
val refreshRequest = new GoogleRefreshTokenRequest(
httpTransport,
JSON_FACTORY,
cred.getRefreshToken,
CLIENT_ID,
CLIENT_SECRET
)
try {
val tokenResponse = refreshRequest.execute
flow.createAndStoreCredential(tokenResponse, null)
} catch {
case e: Exception =>
Logger.error("Unable to refresh session token. Please re-authenticate.")
Logger.error(s"${e.getMessage}")
apply("", "", 0)
}
}
/**
* Tries to extract the tokens from the session and make a fresh credential.
* @tparam T The type of request. This parameter is really just here
* to make the compiler happy.
* @param request The request that is passed in from the Application controller.
* @return A optional fresh credential.
*/
def session[T](implicit request: Request[T]): Option[Credential] = request.session.get("refreshToken") match {
case Some(refreshToken) => Some(refresh(apply("", refreshToken, 0)))
case None => None
}
}
| marinatedpork/google-oauth-ember-play-scala | app/models/OAuth2.scala | Scala | mit | 5,138 |
/**********************************************************************************************************************
* This file is part of Scrupal, a Scalable Reactive Web Application Framework for Content Management *
* *
* Copyright (c) 2015, Reactific Software LLC. All Rights Reserved. *
* *
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance *
* with the License. You may obtain a copy of the License at *
* *
* http://www.apache.org/licenses/LICENSE-2.0 *
* *
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed *
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for *
* the specific language governing permissions and limitations under the License. *
**********************************************************************************************************************/
package scrupal.test
import java.util.concurrent.TimeUnit
import akka.actor.ActorSystem
import akka.util.Timeout
import com.typesafe.config.ConfigFactory
import play.api.{Environment, Configuration}
import scrupal.api.{ConfiguredAssetsLocator, AssetsLocator, Site, Scrupal}
import scrupal.storage.api.{Storage, StoreContext}
import scala.concurrent.{Await, Future}
import scala.concurrent.duration._
class FakeScrupal(
name : String,
config_overrides : Map[String,AnyRef]) extends Scrupal(name) {
implicit val _configuration : Configuration = {
Configuration.load(Environment.simple(), config_overrides)
}
implicit val _executionContext = {
scala.concurrent.ExecutionContext.Implicits.global
}
implicit val _storeContext = {
val configToSearch = _configuration.getConfig("scrupal.storage.default")
Await.result(Storage.fromConfiguration(configToSearch, "scrupal", create=true), 2.seconds)
}
implicit val _actorSystem : ActorSystem = ActorSystem(name, _configuration.underlying)
protected def load(config: Configuration, context: StoreContext): Future[Seq[Site]] = {
Future.successful(Seq.empty[Site])
}
implicit val _timeout = Timeout(
_configuration.getMilliseconds("scrupal.response.timeout").getOrElse(16000L), TimeUnit.MILLISECONDS
)
implicit val _assetsLocator : AssetsLocator = new ConfiguredAssetsLocator(_configuration)
}
object FakeScrupal {
def apply( nm : String = "Scrupal",
config_overrides : Map[String,AnyRef]) : FakeScrupal = {
new FakeScrupal(nm, config_overrides)
}
}
| scrupal/scrupal | scrupal-api/src/test/scala/scrupal/test/FakeScrupal.scala | Scala | apache-2.0 | 3,246 |
/***********************************************************************
* Copyright (c) 2013-2017 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.features.avro.serde
import java.io._
import java.nio.charset.StandardCharsets.UTF_8
import java.text.SimpleDateFormat
import java.util.UUID
import com.vividsolutions.jts.geom.{Geometry, GeometryFactory}
import org.apache.avro.io.DecoderFactory
import org.geotools.data.DataUtilities
import org.geotools.filter.identity.FeatureIdImpl
import org.junit.Assert
import org.junit.runner.RunWith
import org.locationtech.geomesa.features.avro.{AvroSimpleFeature, FeatureSpecificReader}
import org.locationtech.geomesa.utils.geotools.SimpleFeatureTypes
import org.locationtech.geomesa.utils.text.WKTUtils
import org.opengis.feature.simple.{SimpleFeature, SimpleFeatureType}
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
import scala.collection.mutable
import scala.collection.mutable.ListBuffer
import scala.io.Codec.UTF8
import scala.io.Source
import scala.util.Random
@RunWith(classOf[JUnitRunner])
class Version1BackwardsCompatTest extends Specification {
val geoFac = new GeometryFactory()
def createTypeWithGeo: Version1ASF = {
val sft = SimpleFeatureTypes.createType("test","f0:Point,f1:Polygon,f2:LineString")
val sf = new Version1ASF(new FeatureIdImpl("fakeid"), sft)
sf.setAttribute("f0", WKTUtils.read("POINT(45.0 49.0)"))
sf.setAttribute("f1", WKTUtils.read("POLYGON((-80 30,-80 23,-70 30,-70 40,-80 40,-80 30))"))
sf.setAttribute("f2", WKTUtils.read("LINESTRING(47.28515625 25.576171875, 48 26, 49 27)"))
sf
}
def writeAvroFile(sfList: Seq[Version1ASF]): File = {
val f = File.createTempFile("avro", ".tmp")
f.deleteOnExit()
val fos = new FileOutputStream(f)
sfList.foreach { sf => sf.write(fos) }
fos.close()
f
}
def readAvroWithFsr(f: File, oldType: SimpleFeatureType): Seq[AvroSimpleFeature] =
readAvroWithFsr(f, oldType, oldType)
def readAvroWithFsr(f: File, oldType: SimpleFeatureType, newType: SimpleFeatureType) = {
val fis = new FileInputStream(f)
val decoder = DecoderFactory.get().binaryDecoder(fis, null)
val fsr = new FeatureSpecificReader(oldType, newType)
val sfList = new ListBuffer[AvroSimpleFeature]()
do {
sfList += fsr.read(null, decoder)
} while(!decoder.isEnd)
fis.close()
sfList.toList
}
def randomString(fieldId: Int, len: Int, r:Random) = {
val sb = new mutable.StringBuilder()
for (i <- 0 until len) {
sb.append(fieldId)
}
sb.toString()
}
def createStringFeatures(schema: String, size: Int, id: String): Version1ASF = {
val sft = SimpleFeatureTypes.createType("test", schema)
val r = new Random()
r.setSeed(0)
var lst = new mutable.MutableList[String]
for (i <- 0 until size) {
lst += randomString(i, 8, r)
}
val sf = new Version1ASF(new FeatureIdImpl(id), sft)
for (i <- 0 until lst.size) {
sf.setAttribute(i, lst(i))
}
sf
}
def getSubsetData = {
val numFields = 60
val numRecords = 10
val geoSchema = (0 until numFields).map { i => f"f$i%d:String" }.mkString(",")
val sfSeq = for (i <- (0 until numRecords).toList) yield createStringFeatures(geoSchema, numFields,i.toString)
sfSeq.foreach { sf => sf must beAnInstanceOf[Version1ASF] }
val oldType = sfSeq(0).getType
val f = writeAvroFile(sfSeq)
val subsetType = SimpleFeatureTypes.createType("subsetType", "f0:String,f1:String,f3:String,f30:String,f59:String")
val subsetList = readAvroWithFsr(f, oldType, subsetType)
subsetList
}
def buildStringSchema(numFields: Int) = (0 until numFields).map { i => f"f$i%d:String" }.mkString(",")
def writePipeFile(sfList: Seq[SimpleFeature]) = {
val f = File.createTempFile("pipe", ".tmp")
f.deleteOnExit()
val writer = new BufferedWriter(new OutputStreamWriter(new FileOutputStream(f), UTF_8))
sfList.foreach { f =>
writer.write(DataUtilities.encodeFeature(f, true))
writer.newLine()
}
writer.close()
f
}
def readPipeFile(f: File, sft: SimpleFeatureType) =
Source.fromFile(f)(UTF8).getLines.map { line => DataUtilities.createFeature(sft, line) }.toList
def createComplicatedFeatures(numFeatures : Int): Seq[Version1ASF] = {
val geoSchema = "f0:String,f1:Integer,f2:Double,f3:Float,f4:Boolean,f5:UUID,f6:Date,f7:Point:srid=4326,f8:Polygon:srid=4326"
val sft = SimpleFeatureTypes.createType("test", geoSchema)
val r = new Random()
r.setSeed(0)
(0 until numFeatures).map { i =>
val fid = new FeatureIdImpl(r.nextString(5))
val sf = new Version1ASF(fid, sft)
sf.setAttribute("f0", r.nextString(10).asInstanceOf[Object])
sf.setAttribute("f1", r.nextInt().asInstanceOf[Object])
sf.setAttribute("f2", r.nextDouble().asInstanceOf[Object])
sf.setAttribute("f3", r.nextFloat().asInstanceOf[Object])
sf.setAttribute("f4", r.nextBoolean().asInstanceOf[Object])
sf.setAttribute("f5", UUID.fromString("12345678-1234-1234-1234-123456789012"))
sf.setAttribute("f6", new SimpleDateFormat("yyyyMMdd").parse("20140102"))
sf.setAttribute("f7", WKTUtils.read("POINT(45.0 49.0)"))
sf.setAttribute("f8", WKTUtils.read("POLYGON((-80 30,-80 23,-70 30,-70 40,-80 40,-80 30))"))
sf
}
}
"FeatureSpecificReader" should {
"do subset data" in {
val subset = getSubsetData
subset.size mustEqual 10
subset.foreach { sf =>
// parsed as the new AvroSimpleFeature
sf must beAnInstanceOf[AvroSimpleFeature]
sf.getAttributeCount mustEqual 5
sf.getAttributes.size mustEqual 5
import scala.collection.JavaConversions._
sf.getAttributes.foreach { a =>
a must not beNull
}
sf.getAttribute("f0") mustEqual "0"*8
sf.getAttribute("f1") mustEqual "1"*8
sf.getAttribute("f3") mustEqual "3"*8
sf.getAttribute("f30") mustEqual "30"*8
sf.getAttribute("f59") mustEqual "59"*8
}
success
}
"ensure a member in subset is null" in {
getSubsetData(0).getAttribute("f20") must beNull
}
"handle geotypes" in {
val orig = createTypeWithGeo
val f = writeAvroFile(List(orig))
val fsrList = readAvroWithFsr(f, orig.getType, orig.getType)
fsrList.size mustEqual 1
fsrList(0).getAttributeCount mustEqual 3
fsrList(0).getAttributeCount mustEqual orig.getAttributeCount
List("f0", "f1", "f2").foreach { f =>
fsrList(0).getAttribute(f) mustEqual orig.getAttribute(f)
}
success
}
"deserialize properly compared to a pipe file" in {
val numFields = 60
val numRecords = 100
val geoSchema = buildStringSchema(numFields)
val sfList = for (i <- (0 until numRecords)) yield createStringFeatures(geoSchema, numFields, i.toString)
val oldType = sfList(0).getType
val avroFile = writeAvroFile(sfList)
val pipeFile = writePipeFile(sfList)
val subsetType = SimpleFeatureTypes.createType("subsetType", "f0:String,f1:String,f3:String,f30:String,f59:String")
val fsrList = readAvroWithFsr(avroFile, oldType, subsetType)
val pipeList = readPipeFile(pipeFile, oldType)
sfList.size mustEqual pipeList.size
fsrList.size mustEqual pipeList.size
for(i <- 0 until sfList.size) {
val f1 = sfList(i)
val f2 = fsrList(i)
val f3 = pipeList(i)
f1.getID mustEqual f2.getID
f1.getID mustEqual f3.getID
f1.getAttributeCount mustEqual numFields
f2.getAttributeCount mustEqual 5 //subset
f3.getAttributeCount mustEqual numFields
List("f0","f1", "f3", "f30", "f59").foreach { s =>
f1.getAttribute(s) mustEqual f2.getAttribute(s)
f2.getAttribute(s) mustEqual f3.getAttribute(s)
}
f1 mustNotEqual f2
}
success
}
"deserialize complex feature" in {
val numRecords = 1
val sfList = createComplicatedFeatures(numRecords)
val oldType = sfList(0).getType
val avroFile = writeAvroFile(sfList)
val pipeFile = writePipeFile(sfList)
val subsetType = SimpleFeatureTypes.createType("subsetType", "f0:String,f3:Float,f5:UUID,f6:Date")
val pipeList = readPipeFile(pipeFile, oldType)
val avroList = readAvroWithFsr(avroFile, oldType, subsetType)
avroList.size mustEqual pipeList.size
avroList.size mustEqual numRecords
for(i <- 0 until numRecords){
val a = pipeList(i)
val b = avroList(i)
List("f0","f3", "f5", "f6").foreach { s =>
Assert.assertEquals(a.getAttribute(s), b.getAttribute(s))
Assert.assertEquals(a.getAttribute(s), sfList(i).getAttribute(s))
}
}
success
}
"properly skip geoms from version 1" in {
val sft = SimpleFeatureTypes.createType("test", "a:Point,b:Point")
val bOnly = SimpleFeatureTypes.createType("bonly", "b:Point")
val v1 = new Version1ASF(new FeatureIdImpl("fakeid"), sft)
v1.setAttribute("a", WKTUtils.read("POINT(2 2)"))
v1.setAttribute("b", WKTUtils.read("POINT(45 56)"))
val baos = new ByteArrayOutputStream()
v1.write(baos)
val bais = new ByteArrayInputStream(baos.toByteArray)
val fsr = new FeatureSpecificReader(sft, bOnly)
val asf = fsr.read(null, DecoderFactory.get.directBinaryDecoder(bais, null))
asf.getAttributeCount mustEqual 1
asf.getAttribute(0).asInstanceOf[Geometry] mustEqual WKTUtils.read("POINT(45 56)")
}
"properly handle null geoms from version 1" in {
val sft = SimpleFeatureTypes.createType("test", "a:Point,b:Point")
val v2 = new Version1ASF(new FeatureIdImpl("fake2"), sft)
v2.setAttribute("b", WKTUtils.read("POINT(45 56)"))
val baos2 = new ByteArrayOutputStream()
v2.write(baos2)
val bais2 = new ByteArrayInputStream(baos2.toByteArray)
val fsr2 = new FeatureSpecificReader(sft)
val asf2 = fsr2.read(null, DecoderFactory.get.directBinaryDecoder(bais2, null))
asf2.getAttributeCount mustEqual 2
asf2.getAttribute(0) must beNull
asf2.getAttribute(1).asInstanceOf[Geometry] mustEqual WKTUtils.read("POINT(45 56)")
}
}
}
| ronq/geomesa | geomesa-features/geomesa-feature-avro/src/test/scala/org/locationtech/geomesa/features/avro/serde/Version1BackwardsCompatTest.scala | Scala | apache-2.0 | 10,679 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.parquet
import java.math.{BigDecimal => JBigDecimal}
import java.nio.charset.StandardCharsets
import java.sql.{Date, Timestamp}
import org.apache.parquet.filter2.predicate.{FilterApi, FilterPredicate, Operators}
import org.apache.parquet.filter2.predicate.FilterApi._
import org.apache.parquet.filter2.predicate.Operators.{Column => _, _}
import org.apache.parquet.schema.MessageType
import org.apache.spark.{SparkConf, SparkException}
import org.apache.spark.sql._
import org.apache.spark.sql.catalyst.dsl.expressions._
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.optimizer.InferFiltersFromConstraints
import org.apache.spark.sql.catalyst.planning.PhysicalOperation
import org.apache.spark.sql.execution.datasources.{DataSourceStrategy, HadoopFsRelation, LogicalRelation}
import org.apache.spark.sql.execution.datasources.v2.DataSourceV2ScanRelation
import org.apache.spark.sql.execution.datasources.v2.parquet.ParquetScan
import org.apache.spark.sql.functions._
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.internal.SQLConf.ParquetOutputTimestampType
import org.apache.spark.sql.test.SharedSparkSession
import org.apache.spark.sql.types._
import org.apache.spark.util.{AccumulatorContext, AccumulatorV2}
/**
* A test suite that tests Parquet filter2 API based filter pushdown optimization.
*
* NOTE:
*
* 1. `!(a cmp b)` is always transformed to its negated form `a cmp' b` by the
* `BooleanSimplification` optimization rule whenever possible. As a result, predicate `!(a < 1)`
* results in a `GtEq` filter predicate rather than a `Not`.
*
* 2. `Tuple1(Option(x))` is used together with `AnyVal` types like `Int` to ensure the inferred
* data type is nullable.
*
* NOTE:
*
* This file intendedly enables record-level filtering explicitly. If new test cases are
* dependent on this configuration, don't forget you better explicitly set this configuration
* within the test.
*/
abstract class ParquetFilterSuite extends QueryTest with ParquetTest with SharedSparkSession {
protected def createParquetFilters(
schema: MessageType,
caseSensitive: Option[Boolean] = None): ParquetFilters =
new ParquetFilters(schema, conf.parquetFilterPushDownDate, conf.parquetFilterPushDownTimestamp,
conf.parquetFilterPushDownDecimal, conf.parquetFilterPushDownStringStartWith,
conf.parquetFilterPushDownInFilterThreshold,
caseSensitive.getOrElse(conf.caseSensitiveAnalysis))
override def beforeEach(): Unit = {
super.beforeEach()
// Note that there are many tests here that require record-level filtering set to be true.
spark.conf.set(SQLConf.PARQUET_RECORD_FILTER_ENABLED.key, "true")
}
override def afterEach(): Unit = {
try {
spark.conf.unset(SQLConf.PARQUET_RECORD_FILTER_ENABLED.key)
} finally {
super.afterEach()
}
}
def checkFilterPredicate(
df: DataFrame,
predicate: Predicate,
filterClass: Class[_ <: FilterPredicate],
checker: (DataFrame, Seq[Row]) => Unit,
expected: Seq[Row]): Unit
private def checkFilterPredicate
(predicate: Predicate, filterClass: Class[_ <: FilterPredicate], expected: Seq[Row])
(implicit df: DataFrame): Unit = {
checkFilterPredicate(df, predicate, filterClass, checkAnswer(_, _: Seq[Row]), expected)
}
private def checkFilterPredicate[T]
(predicate: Predicate, filterClass: Class[_ <: FilterPredicate], expected: T)
(implicit df: DataFrame): Unit = {
checkFilterPredicate(predicate, filterClass, Seq(Row(expected)))(df)
}
private def checkBinaryFilterPredicate
(predicate: Predicate, filterClass: Class[_ <: FilterPredicate], expected: Seq[Row])
(implicit df: DataFrame): Unit = {
def checkBinaryAnswer(df: DataFrame, expected: Seq[Row]) = {
assertResult(expected.map(_.getAs[Array[Byte]](0).mkString(",")).sorted) {
df.rdd.map(_.getAs[Array[Byte]](0).mkString(",")).collect().toSeq.sorted
}
}
checkFilterPredicate(df, predicate, filterClass, checkBinaryAnswer _, expected)
}
private def checkBinaryFilterPredicate
(predicate: Predicate, filterClass: Class[_ <: FilterPredicate], expected: Array[Byte])
(implicit df: DataFrame): Unit = {
checkBinaryFilterPredicate(predicate, filterClass, Seq(Row(expected)))(df)
}
private def testTimestampPushdown(data: Seq[Timestamp]): Unit = {
assert(data.size === 4)
val ts1 = data.head
val ts2 = data(1)
val ts3 = data(2)
val ts4 = data(3)
withParquetDataFrame(data.map(i => Tuple1(i))) { implicit df =>
checkFilterPredicate('_1.isNull, classOf[Eq[_]], Seq.empty[Row])
checkFilterPredicate('_1.isNotNull, classOf[NotEq[_]], data.map(i => Row.apply(i)))
checkFilterPredicate('_1 === ts1, classOf[Eq[_]], ts1)
checkFilterPredicate('_1 <=> ts1, classOf[Eq[_]], ts1)
checkFilterPredicate('_1 =!= ts1, classOf[NotEq[_]],
Seq(ts2, ts3, ts4).map(i => Row.apply(i)))
checkFilterPredicate('_1 < ts2, classOf[Lt[_]], ts1)
checkFilterPredicate('_1 > ts1, classOf[Gt[_]], Seq(ts2, ts3, ts4).map(i => Row.apply(i)))
checkFilterPredicate('_1 <= ts1, classOf[LtEq[_]], ts1)
checkFilterPredicate('_1 >= ts4, classOf[GtEq[_]], ts4)
checkFilterPredicate(Literal(ts1) === '_1, classOf[Eq[_]], ts1)
checkFilterPredicate(Literal(ts1) <=> '_1, classOf[Eq[_]], ts1)
checkFilterPredicate(Literal(ts2) > '_1, classOf[Lt[_]], ts1)
checkFilterPredicate(Literal(ts3) < '_1, classOf[Gt[_]], ts4)
checkFilterPredicate(Literal(ts1) >= '_1, classOf[LtEq[_]], ts1)
checkFilterPredicate(Literal(ts4) <= '_1, classOf[GtEq[_]], ts4)
checkFilterPredicate(!('_1 < ts4), classOf[GtEq[_]], ts4)
checkFilterPredicate('_1 < ts2 || '_1 > ts3, classOf[Operators.Or], Seq(Row(ts1), Row(ts4)))
}
}
private def testDecimalPushDown(data: DataFrame)(f: DataFrame => Unit): Unit = {
withTempPath { file =>
data.write.parquet(file.getCanonicalPath)
readParquetFile(file.toString)(f)
}
}
// This function tests that exactly go through the `canDrop` and `inverseCanDrop`.
private def testStringStartsWith(dataFrame: DataFrame, filter: String): Unit = {
withTempPath { dir =>
val path = dir.getCanonicalPath
dataFrame.write.option("parquet.block.size", 512).parquet(path)
Seq(true, false).foreach { pushDown =>
withSQLConf(
SQLConf.PARQUET_FILTER_PUSHDOWN_STRING_STARTSWITH_ENABLED.key -> pushDown.toString) {
val accu = new NumRowGroupsAcc
sparkContext.register(accu)
val df = spark.read.parquet(path).filter(filter)
df.foreachPartition((it: Iterator[Row]) => it.foreach(v => accu.add(0)))
if (pushDown) {
assert(accu.value == 0)
} else {
assert(accu.value > 0)
}
AccumulatorContext.remove(accu.id)
}
}
}
}
test("filter pushdown - boolean") {
withParquetDataFrame((true :: false :: Nil).map(b => Tuple1.apply(Option(b)))) { implicit df =>
checkFilterPredicate('_1.isNull, classOf[Eq[_]], Seq.empty[Row])
checkFilterPredicate('_1.isNotNull, classOf[NotEq[_]], Seq(Row(true), Row(false)))
checkFilterPredicate('_1 === true, classOf[Eq[_]], true)
checkFilterPredicate('_1 <=> true, classOf[Eq[_]], true)
checkFilterPredicate('_1 =!= true, classOf[NotEq[_]], false)
}
}
test("filter pushdown - tinyint") {
withParquetDataFrame((1 to 4).map(i => Tuple1(Option(i.toByte)))) { implicit df =>
assert(df.schema.head.dataType === ByteType)
checkFilterPredicate('_1.isNull, classOf[Eq[_]], Seq.empty[Row])
checkFilterPredicate('_1.isNotNull, classOf[NotEq[_]], (1 to 4).map(Row.apply(_)))
checkFilterPredicate('_1 === 1.toByte, classOf[Eq[_]], 1)
checkFilterPredicate('_1 <=> 1.toByte, classOf[Eq[_]], 1)
checkFilterPredicate('_1 =!= 1.toByte, classOf[NotEq[_]], (2 to 4).map(Row.apply(_)))
checkFilterPredicate('_1 < 2.toByte, classOf[Lt[_]], 1)
checkFilterPredicate('_1 > 3.toByte, classOf[Gt[_]], 4)
checkFilterPredicate('_1 <= 1.toByte, classOf[LtEq[_]], 1)
checkFilterPredicate('_1 >= 4.toByte, classOf[GtEq[_]], 4)
checkFilterPredicate(Literal(1.toByte) === '_1, classOf[Eq[_]], 1)
checkFilterPredicate(Literal(1.toByte) <=> '_1, classOf[Eq[_]], 1)
checkFilterPredicate(Literal(2.toByte) > '_1, classOf[Lt[_]], 1)
checkFilterPredicate(Literal(3.toByte) < '_1, classOf[Gt[_]], 4)
checkFilterPredicate(Literal(1.toByte) >= '_1, classOf[LtEq[_]], 1)
checkFilterPredicate(Literal(4.toByte) <= '_1, classOf[GtEq[_]], 4)
checkFilterPredicate(!('_1 < 4.toByte), classOf[GtEq[_]], 4)
checkFilterPredicate('_1 < 2.toByte || '_1 > 3.toByte,
classOf[Operators.Or], Seq(Row(1), Row(4)))
}
}
test("filter pushdown - smallint") {
withParquetDataFrame((1 to 4).map(i => Tuple1(Option(i.toShort)))) { implicit df =>
assert(df.schema.head.dataType === ShortType)
checkFilterPredicate('_1.isNull, classOf[Eq[_]], Seq.empty[Row])
checkFilterPredicate('_1.isNotNull, classOf[NotEq[_]], (1 to 4).map(Row.apply(_)))
checkFilterPredicate('_1 === 1.toShort, classOf[Eq[_]], 1)
checkFilterPredicate('_1 <=> 1.toShort, classOf[Eq[_]], 1)
checkFilterPredicate('_1 =!= 1.toShort, classOf[NotEq[_]], (2 to 4).map(Row.apply(_)))
checkFilterPredicate('_1 < 2.toShort, classOf[Lt[_]], 1)
checkFilterPredicate('_1 > 3.toShort, classOf[Gt[_]], 4)
checkFilterPredicate('_1 <= 1.toShort, classOf[LtEq[_]], 1)
checkFilterPredicate('_1 >= 4.toShort, classOf[GtEq[_]], 4)
checkFilterPredicate(Literal(1.toShort) === '_1, classOf[Eq[_]], 1)
checkFilterPredicate(Literal(1.toShort) <=> '_1, classOf[Eq[_]], 1)
checkFilterPredicate(Literal(2.toShort) > '_1, classOf[Lt[_]], 1)
checkFilterPredicate(Literal(3.toShort) < '_1, classOf[Gt[_]], 4)
checkFilterPredicate(Literal(1.toShort) >= '_1, classOf[LtEq[_]], 1)
checkFilterPredicate(Literal(4.toShort) <= '_1, classOf[GtEq[_]], 4)
checkFilterPredicate(!('_1 < 4.toShort), classOf[GtEq[_]], 4)
checkFilterPredicate('_1 < 2.toShort || '_1 > 3.toShort,
classOf[Operators.Or], Seq(Row(1), Row(4)))
}
}
test("filter pushdown - integer") {
withParquetDataFrame((1 to 4).map(i => Tuple1(Option(i)))) { implicit df =>
checkFilterPredicate('_1.isNull, classOf[Eq[_]], Seq.empty[Row])
checkFilterPredicate('_1.isNotNull, classOf[NotEq[_]], (1 to 4).map(Row.apply(_)))
checkFilterPredicate('_1 === 1, classOf[Eq[_]], 1)
checkFilterPredicate('_1 <=> 1, classOf[Eq[_]], 1)
checkFilterPredicate('_1 =!= 1, classOf[NotEq[_]], (2 to 4).map(Row.apply(_)))
checkFilterPredicate('_1 < 2, classOf[Lt[_]], 1)
checkFilterPredicate('_1 > 3, classOf[Gt[_]], 4)
checkFilterPredicate('_1 <= 1, classOf[LtEq[_]], 1)
checkFilterPredicate('_1 >= 4, classOf[GtEq[_]], 4)
checkFilterPredicate(Literal(1) === '_1, classOf[Eq[_]], 1)
checkFilterPredicate(Literal(1) <=> '_1, classOf[Eq[_]], 1)
checkFilterPredicate(Literal(2) > '_1, classOf[Lt[_]], 1)
checkFilterPredicate(Literal(3) < '_1, classOf[Gt[_]], 4)
checkFilterPredicate(Literal(1) >= '_1, classOf[LtEq[_]], 1)
checkFilterPredicate(Literal(4) <= '_1, classOf[GtEq[_]], 4)
checkFilterPredicate(!('_1 < 4), classOf[GtEq[_]], 4)
checkFilterPredicate('_1 < 2 || '_1 > 3, classOf[Operators.Or], Seq(Row(1), Row(4)))
}
}
test("filter pushdown - long") {
withParquetDataFrame((1 to 4).map(i => Tuple1(Option(i.toLong)))) { implicit df =>
checkFilterPredicate('_1.isNull, classOf[Eq[_]], Seq.empty[Row])
checkFilterPredicate('_1.isNotNull, classOf[NotEq[_]], (1 to 4).map(Row.apply(_)))
checkFilterPredicate('_1 === 1, classOf[Eq[_]], 1)
checkFilterPredicate('_1 <=> 1, classOf[Eq[_]], 1)
checkFilterPredicate('_1 =!= 1, classOf[NotEq[_]], (2 to 4).map(Row.apply(_)))
checkFilterPredicate('_1 < 2, classOf[Lt[_]], 1)
checkFilterPredicate('_1 > 3, classOf[Gt[_]], 4)
checkFilterPredicate('_1 <= 1, classOf[LtEq[_]], 1)
checkFilterPredicate('_1 >= 4, classOf[GtEq[_]], 4)
checkFilterPredicate(Literal(1) === '_1, classOf[Eq[_]], 1)
checkFilterPredicate(Literal(1) <=> '_1, classOf[Eq[_]], 1)
checkFilterPredicate(Literal(2) > '_1, classOf[Lt[_]], 1)
checkFilterPredicate(Literal(3) < '_1, classOf[Gt[_]], 4)
checkFilterPredicate(Literal(1) >= '_1, classOf[LtEq[_]], 1)
checkFilterPredicate(Literal(4) <= '_1, classOf[GtEq[_]], 4)
checkFilterPredicate(!('_1 < 4), classOf[GtEq[_]], 4)
checkFilterPredicate('_1 < 2 || '_1 > 3, classOf[Operators.Or], Seq(Row(1), Row(4)))
}
}
test("filter pushdown - float") {
withParquetDataFrame((1 to 4).map(i => Tuple1(Option(i.toFloat)))) { implicit df =>
checkFilterPredicate('_1.isNull, classOf[Eq[_]], Seq.empty[Row])
checkFilterPredicate('_1.isNotNull, classOf[NotEq[_]], (1 to 4).map(Row.apply(_)))
checkFilterPredicate('_1 === 1, classOf[Eq[_]], 1)
checkFilterPredicate('_1 <=> 1, classOf[Eq[_]], 1)
checkFilterPredicate('_1 =!= 1, classOf[NotEq[_]], (2 to 4).map(Row.apply(_)))
checkFilterPredicate('_1 < 2, classOf[Lt[_]], 1)
checkFilterPredicate('_1 > 3, classOf[Gt[_]], 4)
checkFilterPredicate('_1 <= 1, classOf[LtEq[_]], 1)
checkFilterPredicate('_1 >= 4, classOf[GtEq[_]], 4)
checkFilterPredicate(Literal(1) === '_1, classOf[Eq[_]], 1)
checkFilterPredicate(Literal(1) <=> '_1, classOf[Eq[_]], 1)
checkFilterPredicate(Literal(2) > '_1, classOf[Lt[_]], 1)
checkFilterPredicate(Literal(3) < '_1, classOf[Gt[_]], 4)
checkFilterPredicate(Literal(1) >= '_1, classOf[LtEq[_]], 1)
checkFilterPredicate(Literal(4) <= '_1, classOf[GtEq[_]], 4)
checkFilterPredicate(!('_1 < 4), classOf[GtEq[_]], 4)
checkFilterPredicate('_1 < 2 || '_1 > 3, classOf[Operators.Or], Seq(Row(1), Row(4)))
}
}
test("filter pushdown - double") {
withParquetDataFrame((1 to 4).map(i => Tuple1(Option(i.toDouble)))) { implicit df =>
checkFilterPredicate('_1.isNull, classOf[Eq[_]], Seq.empty[Row])
checkFilterPredicate('_1.isNotNull, classOf[NotEq[_]], (1 to 4).map(Row.apply(_)))
checkFilterPredicate('_1 === 1, classOf[Eq[_]], 1)
checkFilterPredicate('_1 <=> 1, classOf[Eq[_]], 1)
checkFilterPredicate('_1 =!= 1, classOf[NotEq[_]], (2 to 4).map(Row.apply(_)))
checkFilterPredicate('_1 < 2, classOf[Lt[_]], 1)
checkFilterPredicate('_1 > 3, classOf[Gt[_]], 4)
checkFilterPredicate('_1 <= 1, classOf[LtEq[_]], 1)
checkFilterPredicate('_1 >= 4, classOf[GtEq[_]], 4)
checkFilterPredicate(Literal(1) === '_1, classOf[Eq[_]], 1)
checkFilterPredicate(Literal(1) <=> '_1, classOf[Eq[_]], 1)
checkFilterPredicate(Literal(2) > '_1, classOf[Lt[_]], 1)
checkFilterPredicate(Literal(3) < '_1, classOf[Gt[_]], 4)
checkFilterPredicate(Literal(1) >= '_1, classOf[LtEq[_]], 1)
checkFilterPredicate(Literal(4) <= '_1, classOf[GtEq[_]], 4)
checkFilterPredicate(!('_1 < 4), classOf[GtEq[_]], 4)
checkFilterPredicate('_1 < 2 || '_1 > 3, classOf[Operators.Or], Seq(Row(1), Row(4)))
}
}
test("filter pushdown - string") {
withParquetDataFrame((1 to 4).map(i => Tuple1(i.toString))) { implicit df =>
checkFilterPredicate('_1.isNull, classOf[Eq[_]], Seq.empty[Row])
checkFilterPredicate(
'_1.isNotNull, classOf[NotEq[_]], (1 to 4).map(i => Row.apply(i.toString)))
checkFilterPredicate('_1 === "1", classOf[Eq[_]], "1")
checkFilterPredicate('_1 <=> "1", classOf[Eq[_]], "1")
checkFilterPredicate(
'_1 =!= "1", classOf[NotEq[_]], (2 to 4).map(i => Row.apply(i.toString)))
checkFilterPredicate('_1 < "2", classOf[Lt[_]], "1")
checkFilterPredicate('_1 > "3", classOf[Gt[_]], "4")
checkFilterPredicate('_1 <= "1", classOf[LtEq[_]], "1")
checkFilterPredicate('_1 >= "4", classOf[GtEq[_]], "4")
checkFilterPredicate(Literal("1") === '_1, classOf[Eq[_]], "1")
checkFilterPredicate(Literal("1") <=> '_1, classOf[Eq[_]], "1")
checkFilterPredicate(Literal("2") > '_1, classOf[Lt[_]], "1")
checkFilterPredicate(Literal("3") < '_1, classOf[Gt[_]], "4")
checkFilterPredicate(Literal("1") >= '_1, classOf[LtEq[_]], "1")
checkFilterPredicate(Literal("4") <= '_1, classOf[GtEq[_]], "4")
checkFilterPredicate(!('_1 < "4"), classOf[GtEq[_]], "4")
checkFilterPredicate('_1 < "2" || '_1 > "3", classOf[Operators.Or], Seq(Row("1"), Row("4")))
}
}
test("filter pushdown - binary") {
implicit class IntToBinary(int: Int) {
def b: Array[Byte] = int.toString.getBytes(StandardCharsets.UTF_8)
}
withParquetDataFrame((1 to 4).map(i => Tuple1(i.b))) { implicit df =>
checkBinaryFilterPredicate('_1 === 1.b, classOf[Eq[_]], 1.b)
checkBinaryFilterPredicate('_1 <=> 1.b, classOf[Eq[_]], 1.b)
checkBinaryFilterPredicate('_1.isNull, classOf[Eq[_]], Seq.empty[Row])
checkBinaryFilterPredicate(
'_1.isNotNull, classOf[NotEq[_]], (1 to 4).map(i => Row.apply(i.b)).toSeq)
checkBinaryFilterPredicate(
'_1 =!= 1.b, classOf[NotEq[_]], (2 to 4).map(i => Row.apply(i.b)).toSeq)
checkBinaryFilterPredicate('_1 < 2.b, classOf[Lt[_]], 1.b)
checkBinaryFilterPredicate('_1 > 3.b, classOf[Gt[_]], 4.b)
checkBinaryFilterPredicate('_1 <= 1.b, classOf[LtEq[_]], 1.b)
checkBinaryFilterPredicate('_1 >= 4.b, classOf[GtEq[_]], 4.b)
checkBinaryFilterPredicate(Literal(1.b) === '_1, classOf[Eq[_]], 1.b)
checkBinaryFilterPredicate(Literal(1.b) <=> '_1, classOf[Eq[_]], 1.b)
checkBinaryFilterPredicate(Literal(2.b) > '_1, classOf[Lt[_]], 1.b)
checkBinaryFilterPredicate(Literal(3.b) < '_1, classOf[Gt[_]], 4.b)
checkBinaryFilterPredicate(Literal(1.b) >= '_1, classOf[LtEq[_]], 1.b)
checkBinaryFilterPredicate(Literal(4.b) <= '_1, classOf[GtEq[_]], 4.b)
checkBinaryFilterPredicate(!('_1 < 4.b), classOf[GtEq[_]], 4.b)
checkBinaryFilterPredicate(
'_1 < 2.b || '_1 > 3.b, classOf[Operators.Or], Seq(Row(1.b), Row(4.b)))
}
}
test("filter pushdown - date") {
implicit class StringToDate(s: String) {
def date: Date = Date.valueOf(s)
}
val data = Seq("2018-03-18", "2018-03-19", "2018-03-20", "2018-03-21")
withParquetDataFrame(data.map(i => Tuple1(i.date))) { implicit df =>
checkFilterPredicate('_1.isNull, classOf[Eq[_]], Seq.empty[Row])
checkFilterPredicate('_1.isNotNull, classOf[NotEq[_]], data.map(i => Row.apply(i.date)))
checkFilterPredicate('_1 === "2018-03-18".date, classOf[Eq[_]], "2018-03-18".date)
checkFilterPredicate('_1 <=> "2018-03-18".date, classOf[Eq[_]], "2018-03-18".date)
checkFilterPredicate('_1 =!= "2018-03-18".date, classOf[NotEq[_]],
Seq("2018-03-19", "2018-03-20", "2018-03-21").map(i => Row.apply(i.date)))
checkFilterPredicate('_1 < "2018-03-19".date, classOf[Lt[_]], "2018-03-18".date)
checkFilterPredicate('_1 > "2018-03-20".date, classOf[Gt[_]], "2018-03-21".date)
checkFilterPredicate('_1 <= "2018-03-18".date, classOf[LtEq[_]], "2018-03-18".date)
checkFilterPredicate('_1 >= "2018-03-21".date, classOf[GtEq[_]], "2018-03-21".date)
checkFilterPredicate(
Literal("2018-03-18".date) === '_1, classOf[Eq[_]], "2018-03-18".date)
checkFilterPredicate(
Literal("2018-03-18".date) <=> '_1, classOf[Eq[_]], "2018-03-18".date)
checkFilterPredicate(
Literal("2018-03-19".date) > '_1, classOf[Lt[_]], "2018-03-18".date)
checkFilterPredicate(
Literal("2018-03-20".date) < '_1, classOf[Gt[_]], "2018-03-21".date)
checkFilterPredicate(
Literal("2018-03-18".date) >= '_1, classOf[LtEq[_]], "2018-03-18".date)
checkFilterPredicate(
Literal("2018-03-21".date) <= '_1, classOf[GtEq[_]], "2018-03-21".date)
checkFilterPredicate(!('_1 < "2018-03-21".date), classOf[GtEq[_]], "2018-03-21".date)
checkFilterPredicate(
'_1 < "2018-03-19".date || '_1 > "2018-03-20".date,
classOf[Operators.Or],
Seq(Row("2018-03-18".date), Row("2018-03-21".date)))
}
}
test("filter pushdown - timestamp") {
// spark.sql.parquet.outputTimestampType = TIMESTAMP_MILLIS
val millisData = Seq(Timestamp.valueOf("2018-06-14 08:28:53.123"),
Timestamp.valueOf("2018-06-15 08:28:53.123"),
Timestamp.valueOf("2018-06-16 08:28:53.123"),
Timestamp.valueOf("2018-06-17 08:28:53.123"))
withSQLConf(SQLConf.PARQUET_OUTPUT_TIMESTAMP_TYPE.key ->
ParquetOutputTimestampType.TIMESTAMP_MILLIS.toString) {
testTimestampPushdown(millisData)
}
// spark.sql.parquet.outputTimestampType = TIMESTAMP_MICROS
val microsData = Seq(Timestamp.valueOf("2018-06-14 08:28:53.123456"),
Timestamp.valueOf("2018-06-15 08:28:53.123456"),
Timestamp.valueOf("2018-06-16 08:28:53.123456"),
Timestamp.valueOf("2018-06-17 08:28:53.123456"))
withSQLConf(SQLConf.PARQUET_OUTPUT_TIMESTAMP_TYPE.key ->
ParquetOutputTimestampType.TIMESTAMP_MICROS.toString) {
testTimestampPushdown(microsData)
}
// spark.sql.parquet.outputTimestampType = INT96 doesn't support pushdown
withSQLConf(SQLConf.PARQUET_OUTPUT_TIMESTAMP_TYPE.key ->
ParquetOutputTimestampType.INT96.toString) {
withParquetDataFrame(millisData.map(i => Tuple1(i))) { implicit df =>
val schema = new SparkToParquetSchemaConverter(conf).convert(df.schema)
assertResult(None) {
createParquetFilters(schema).createFilter(sources.IsNull("_1"))
}
}
}
}
test("filter pushdown - decimal") {
Seq(
(false, Decimal.MAX_INT_DIGITS), // int32Writer
(false, Decimal.MAX_LONG_DIGITS), // int64Writer
(true, Decimal.MAX_LONG_DIGITS), // binaryWriterUsingUnscaledLong
(false, DecimalType.MAX_PRECISION) // binaryWriterUsingUnscaledBytes
).foreach { case (legacyFormat, precision) =>
withSQLConf(SQLConf.PARQUET_WRITE_LEGACY_FORMAT.key -> legacyFormat.toString) {
val schema = StructType.fromDDL(s"a decimal($precision, 2)")
val rdd =
spark.sparkContext.parallelize((1 to 4).map(i => Row(new java.math.BigDecimal(i))))
val dataFrame = spark.createDataFrame(rdd, schema)
testDecimalPushDown(dataFrame) { implicit df =>
assert(df.schema === schema)
checkFilterPredicate('a.isNull, classOf[Eq[_]], Seq.empty[Row])
checkFilterPredicate('a.isNotNull, classOf[NotEq[_]], (1 to 4).map(Row.apply(_)))
checkFilterPredicate('a === 1, classOf[Eq[_]], 1)
checkFilterPredicate('a <=> 1, classOf[Eq[_]], 1)
checkFilterPredicate('a =!= 1, classOf[NotEq[_]], (2 to 4).map(Row.apply(_)))
checkFilterPredicate('a < 2, classOf[Lt[_]], 1)
checkFilterPredicate('a > 3, classOf[Gt[_]], 4)
checkFilterPredicate('a <= 1, classOf[LtEq[_]], 1)
checkFilterPredicate('a >= 4, classOf[GtEq[_]], 4)
checkFilterPredicate(Literal(1) === 'a, classOf[Eq[_]], 1)
checkFilterPredicate(Literal(1) <=> 'a, classOf[Eq[_]], 1)
checkFilterPredicate(Literal(2) > 'a, classOf[Lt[_]], 1)
checkFilterPredicate(Literal(3) < 'a, classOf[Gt[_]], 4)
checkFilterPredicate(Literal(1) >= 'a, classOf[LtEq[_]], 1)
checkFilterPredicate(Literal(4) <= 'a, classOf[GtEq[_]], 4)
checkFilterPredicate(!('a < 4), classOf[GtEq[_]], 4)
checkFilterPredicate('a < 2 || 'a > 3, classOf[Operators.Or], Seq(Row(1), Row(4)))
}
}
}
}
test("Ensure that filter value matched the parquet file schema") {
val scale = 2
val schema = StructType(Seq(
StructField("cint", IntegerType),
StructField("cdecimal1", DecimalType(Decimal.MAX_INT_DIGITS, scale)),
StructField("cdecimal2", DecimalType(Decimal.MAX_LONG_DIGITS, scale)),
StructField("cdecimal3", DecimalType(DecimalType.MAX_PRECISION, scale))
))
val parquetSchema = new SparkToParquetSchemaConverter(conf).convert(schema)
val decimal = new JBigDecimal(10).setScale(scale)
val decimal1 = new JBigDecimal(10).setScale(scale + 1)
assert(decimal.scale() === scale)
assert(decimal1.scale() === scale + 1)
val parquetFilters = createParquetFilters(parquetSchema)
assertResult(Some(lt(intColumn("cdecimal1"), 1000: Integer))) {
parquetFilters.createFilter(sources.LessThan("cdecimal1", decimal))
}
assertResult(None) {
parquetFilters.createFilter(sources.LessThan("cdecimal1", decimal1))
}
assertResult(Some(lt(longColumn("cdecimal2"), 1000L: java.lang.Long))) {
parquetFilters.createFilter(sources.LessThan("cdecimal2", decimal))
}
assertResult(None) {
parquetFilters.createFilter(sources.LessThan("cdecimal2", decimal1))
}
assert(parquetFilters.createFilter(sources.LessThan("cdecimal3", decimal)).isDefined)
assertResult(None) {
parquetFilters.createFilter(sources.LessThan("cdecimal3", decimal1))
}
}
test("SPARK-6554: don't push down predicates which reference partition columns") {
import testImplicits._
withSQLConf(SQLConf.PARQUET_FILTER_PUSHDOWN_ENABLED.key -> "true") {
withTempPath { dir =>
val path = s"${dir.getCanonicalPath}/part=1"
(1 to 3).map(i => (i, i.toString)).toDF("a", "b").write.parquet(path)
// If the "part = 1" filter gets pushed down, this query will throw an exception since
// "part" is not a valid column in the actual Parquet file
checkAnswer(
spark.read.parquet(dir.getCanonicalPath).filter("part = 1"),
(1 to 3).map(i => Row(i, i.toString, 1)))
}
}
}
test("SPARK-10829: Filter combine partition key and attribute doesn't work in DataSource scan") {
import testImplicits._
withSQLConf(SQLConf.PARQUET_FILTER_PUSHDOWN_ENABLED.key -> "true") {
withTempPath { dir =>
val path = s"${dir.getCanonicalPath}/part=1"
(1 to 3).map(i => (i, i.toString)).toDF("a", "b").write.parquet(path)
// If the "part = 1" filter gets pushed down, this query will throw an exception since
// "part" is not a valid column in the actual Parquet file
checkAnswer(
spark.read.parquet(dir.getCanonicalPath).filter("a > 0 and (part = 0 or a > 1)"),
(2 to 3).map(i => Row(i, i.toString, 1)))
}
}
}
test("SPARK-12231: test the filter and empty project in partitioned DataSource scan") {
import testImplicits._
withSQLConf(SQLConf.PARQUET_FILTER_PUSHDOWN_ENABLED.key -> "true") {
withTempPath { dir =>
val path = s"${dir.getCanonicalPath}"
(1 to 3).map(i => (i, i + 1, i + 2, i + 3)).toDF("a", "b", "c", "d").
write.partitionBy("a").parquet(path)
// The filter "a > 1 or b < 2" will not get pushed down, and the projection is empty,
// this query will throw an exception since the project from combinedFilter expect
// two projection while the
val df1 = spark.read.parquet(dir.getCanonicalPath)
assert(df1.filter("a > 1 or b < 2").count() == 2)
}
}
}
test("SPARK-12231: test the new projection in partitioned DataSource scan") {
import testImplicits._
withSQLConf(SQLConf.PARQUET_FILTER_PUSHDOWN_ENABLED.key -> "true") {
withTempPath { dir =>
val path = s"${dir.getCanonicalPath}"
(1 to 3).map(i => (i, i + 1, i + 2, i + 3)).toDF("a", "b", "c", "d").
write.partitionBy("a").parquet(path)
// test the generate new projection case
// when projects != partitionAndNormalColumnProjs
val df1 = spark.read.parquet(dir.getCanonicalPath)
checkAnswer(
df1.filter("a > 1 or b > 2").orderBy("a").selectExpr("a", "b", "c", "d"),
(2 to 3).map(i => Row(i, i + 1, i + 2, i + 3)))
}
}
}
test("Filter applied on merged Parquet schema with new column should work") {
import testImplicits._
Seq("true", "false").foreach { vectorized =>
withSQLConf(SQLConf.PARQUET_FILTER_PUSHDOWN_ENABLED.key -> "true",
SQLConf.PARQUET_SCHEMA_MERGING_ENABLED.key -> "true",
SQLConf.PARQUET_VECTORIZED_READER_ENABLED.key -> vectorized) {
withTempPath { dir =>
val path1 = s"${dir.getCanonicalPath}/table1"
(1 to 3).map(i => (i, i.toString)).toDF("a", "b").write.parquet(path1)
val path2 = s"${dir.getCanonicalPath}/table2"
(1 to 3).map(i => (i, i.toString)).toDF("c", "b").write.parquet(path2)
// No matter "c = 1" gets pushed down or not, this query should work without exception.
val df = spark.read.parquet(path1, path2).filter("c = 1").selectExpr("c", "b", "a")
checkAnswer(
df,
Row(1, "1", null))
val path3 = s"${dir.getCanonicalPath}/table3"
val dfStruct = sparkContext.parallelize(Seq((1, 1))).toDF("a", "b")
dfStruct.select(struct("a").as("s")).write.parquet(path3)
val path4 = s"${dir.getCanonicalPath}/table4"
val dfStruct2 = sparkContext.parallelize(Seq((1, 1))).toDF("c", "b")
dfStruct2.select(struct("c").as("s")).write.parquet(path4)
// No matter "s.c = 1" gets pushed down or not, this query should work without exception.
val dfStruct3 = spark.read.parquet(path3, path4).filter("s.c = 1")
.selectExpr("s")
checkAnswer(dfStruct3, Row(Row(null, 1)))
}
}
}
}
// The unsafe row RecordReader does not support row by row filtering so run it with it disabled.
test("SPARK-11661 Still pushdown filters returned by unhandledFilters") {
import testImplicits._
withSQLConf(SQLConf.PARQUET_FILTER_PUSHDOWN_ENABLED.key -> "true") {
withSQLConf(SQLConf.PARQUET_VECTORIZED_READER_ENABLED.key -> "false") {
withTempPath { dir =>
val path = s"${dir.getCanonicalPath}/part=1"
(1 to 3).map(i => (i, i.toString)).toDF("a", "b").write.parquet(path)
val df = spark.read.parquet(path).filter("a = 2")
// The result should be single row.
// When a filter is pushed to Parquet, Parquet can apply it to every row.
// So, we can check the number of rows returned from the Parquet
// to make sure our filter pushdown work.
assert(stripSparkFilter(df).count == 1)
}
}
}
}
test("SPARK-12218: 'Not' is included in Parquet filter pushdown") {
import testImplicits._
withSQLConf(SQLConf.PARQUET_FILTER_PUSHDOWN_ENABLED.key -> "true") {
withTempPath { dir =>
val path = s"${dir.getCanonicalPath}/table1"
(1 to 5).map(i => (i, (i % 2).toString)).toDF("a", "b").write.parquet(path)
checkAnswer(
spark.read.parquet(path).where("not (a = 2) or not(b in ('1'))"),
(1 to 5).map(i => Row(i, (i % 2).toString)))
checkAnswer(
spark.read.parquet(path).where("not (a = 2 and b in ('1'))"),
(1 to 5).map(i => Row(i, (i % 2).toString)))
}
}
}
test("SPARK-12218 and SPARK-25559 Converting conjunctions into Parquet filter predicates") {
val schema = StructType(Seq(
StructField("a", IntegerType, nullable = false),
StructField("b", StringType, nullable = true),
StructField("c", DoubleType, nullable = true)
))
val parquetSchema = new SparkToParquetSchemaConverter(conf).convert(schema)
val parquetFilters = createParquetFilters(parquetSchema)
assertResult(Some(and(
lt(intColumn("a"), 10: Integer),
gt(doubleColumn("c"), 1.5: java.lang.Double)))
) {
parquetFilters.createFilter(
sources.And(
sources.LessThan("a", 10),
sources.GreaterThan("c", 1.5D)))
}
// Testing when `canRemoveOneSideInAnd == true`
// case sources.And(lhs, rhs) =>
// ...
// case (Some(lhsFilter), None) if canRemoveOneSideInAnd => Some(lhsFilter)
assertResult(Some(lt(intColumn("a"), 10: Integer))) {
parquetFilters.createFilter(
sources.And(
sources.LessThan("a", 10),
sources.StringContains("b", "prefix")))
}
// Testing when `canRemoveOneSideInAnd == true`
// case sources.And(lhs, rhs) =>
// ...
// case (None, Some(rhsFilter)) if canRemoveOneSideInAnd => Some(rhsFilter)
assertResult(Some(lt(intColumn("a"), 10: Integer))) {
parquetFilters.createFilter(
sources.And(
sources.StringContains("b", "prefix"),
sources.LessThan("a", 10)))
}
// Testing complex And conditions
assertResult(Some(
FilterApi.and(lt(intColumn("a"), 10: Integer), gt(intColumn("a"), 5: Integer)))) {
parquetFilters.createFilter(
sources.And(
sources.And(
sources.LessThan("a", 10),
sources.StringContains("b", "prefix")
),
sources.GreaterThan("a", 5)))
}
// Testing complex And conditions
assertResult(Some(
FilterApi.and(gt(intColumn("a"), 5: Integer), lt(intColumn("a"), 10: Integer)))) {
parquetFilters.createFilter(
sources.And(
sources.GreaterThan("a", 5),
sources.And(
sources.StringContains("b", "prefix"),
sources.LessThan("a", 10)
)))
}
// Testing
// case sources.Not(pred) =>
// createFilterHelper(nameToParquetField, pred, canRemoveOneSideInAnd = false)
// .map(FilterApi.not)
//
// and
//
// Testing when `canRemoveOneSideInAnd == false`
// case sources.And(lhs, rhs) =>
// ...
// case (Some(lhsFilter), None) if canRemoveOneSideInAnd => Some(lhsFilter)
assertResult(None) {
parquetFilters.createFilter(
sources.Not(
sources.And(
sources.GreaterThan("a", 1),
sources.StringContains("b", "prefix"))))
}
// Testing
// case sources.Not(pred) =>
// createFilterHelper(nameToParquetField, pred, canRemoveOneSideInAnd = false)
// .map(FilterApi.not)
//
// and
//
// Testing when `canRemoveOneSideInAnd == false`
// case sources.And(lhs, rhs) =>
// ...
// case (None, Some(rhsFilter)) if canRemoveOneSideInAnd => Some(rhsFilter)
assertResult(None) {
parquetFilters.createFilter(
sources.Not(
sources.And(
sources.StringContains("b", "prefix"),
sources.GreaterThan("a", 1))))
}
// Testing
// case sources.Not(pred) =>
// createFilterHelper(nameToParquetField, pred, canRemoveOneSideInAnd = false)
// .map(FilterApi.not)
//
// and
//
// Testing passing `canRemoveOneSideInAnd = false` into
// case sources.And(lhs, rhs) =>
// val lhsFilterOption = createFilterHelper(nameToParquetField, lhs, canRemoveOneSideInAnd)
assertResult(None) {
parquetFilters.createFilter(
sources.Not(
sources.And(
sources.And(
sources.GreaterThan("a", 1),
sources.StringContains("b", "prefix")),
sources.GreaterThan("a", 2))))
}
// Testing
// case sources.Not(pred) =>
// createFilterHelper(nameToParquetField, pred, canRemoveOneSideInAnd = false)
// .map(FilterApi.not)
//
// and
//
// Testing passing `canRemoveOneSideInAnd = false` into
// case sources.And(lhs, rhs) =>
// val rhsFilterOption = createFilterHelper(nameToParquetField, rhs, canRemoveOneSideInAnd)
assertResult(None) {
parquetFilters.createFilter(
sources.Not(
sources.And(
sources.GreaterThan("a", 2),
sources.And(
sources.GreaterThan("a", 1),
sources.StringContains("b", "prefix")))))
}
}
test("SPARK-27699 Converting disjunctions into Parquet filter predicates") {
val schema = StructType(Seq(
StructField("a", IntegerType, nullable = false),
StructField("b", StringType, nullable = true),
StructField("c", DoubleType, nullable = true)
))
val parquetSchema = new SparkToParquetSchemaConverter(conf).convert(schema)
val parquetFilters = createParquetFilters(parquetSchema)
// Testing
// case sources.Or(lhs, rhs) =>
// ...
// lhsFilter <- createFilterHelper(nameToParquetField, lhs, canRemoveOneSideInAnd = true)
assertResult(Some(
FilterApi.or(gt(intColumn("a"), 1: Integer), gt(intColumn("a"), 2: Integer)))) {
parquetFilters.createFilter(
sources.Or(
sources.And(
sources.GreaterThan("a", 1),
sources.StringContains("b", "prefix")),
sources.GreaterThan("a", 2)))
}
// Testing
// case sources.Or(lhs, rhs) =>
// ...
// rhsFilter <- createFilterHelper(nameToParquetField, rhs, canRemoveOneSideInAnd = true)
assertResult(Some(
FilterApi.or(gt(intColumn("a"), 2: Integer), gt(intColumn("a"), 1: Integer)))) {
parquetFilters.createFilter(
sources.Or(
sources.GreaterThan("a", 2),
sources.And(
sources.GreaterThan("a", 1),
sources.StringContains("b", "prefix"))))
}
// Testing
// case sources.Or(lhs, rhs) =>
// ...
// lhsFilter <- createFilterHelper(nameToParquetField, lhs, canRemoveOneSideInAnd = true)
// rhsFilter <- createFilterHelper(nameToParquetField, rhs, canRemoveOneSideInAnd = true)
assertResult(Some(
FilterApi.or(gt(intColumn("a"), 1: Integer), lt(intColumn("a"), 0: Integer)))) {
parquetFilters.createFilter(
sources.Or(
sources.And(
sources.GreaterThan("a", 1),
sources.StringContains("b", "prefix")),
sources.And(
sources.LessThan("a", 0),
sources.StringContains("b", "foobar"))))
}
}
test("SPARK-27698 Convertible Parquet filter predicates") {
val schema = StructType(Seq(
StructField("a", IntegerType, nullable = false),
StructField("b", StringType, nullable = true),
StructField("c", DoubleType, nullable = true)
))
val parquetSchema = new SparkToParquetSchemaConverter(conf).convert(schema)
val parquetFilters = createParquetFilters(parquetSchema)
assertResult(Seq(sources.And(sources.LessThan("a", 10), sources.GreaterThan("c", 1.5D)))) {
parquetFilters.convertibleFilters(
Seq(sources.And(
sources.LessThan("a", 10),
sources.GreaterThan("c", 1.5D))))
}
assertResult(Seq(sources.LessThan("a", 10))) {
parquetFilters.convertibleFilters(
Seq(sources.And(
sources.LessThan("a", 10),
sources.StringContains("b", "prefix"))))
}
assertResult(Seq(sources.LessThan("a", 10))) {
parquetFilters.convertibleFilters(
Seq(sources.And(
sources.StringContains("b", "prefix"),
sources.LessThan("a", 10))))
}
// Testing complex And conditions
assertResult(Seq(sources.And(sources.LessThan("a", 10), sources.GreaterThan("a", 5)))) {
parquetFilters.convertibleFilters(
Seq(sources.And(
sources.And(
sources.LessThan("a", 10),
sources.StringContains("b", "prefix")
),
sources.GreaterThan("a", 5))))
}
// Testing complex And conditions
assertResult(Seq(sources.And(sources.GreaterThan("a", 5), sources.LessThan("a", 10)))) {
parquetFilters.convertibleFilters(
Seq(sources.And(
sources.GreaterThan("a", 5),
sources.And(
sources.StringContains("b", "prefix"),
sources.LessThan("a", 10)
))))
}
// Testing complex And conditions
assertResult(Seq(sources.Or(sources.GreaterThan("a", 1), sources.GreaterThan("a", 2)))) {
parquetFilters.convertibleFilters(
Seq(sources.Or(
sources.And(
sources.GreaterThan("a", 1),
sources.StringContains("b", "prefix")),
sources.GreaterThan("a", 2))))
}
// Testing complex And/Or conditions, the And condition under Or condition can't be pushed down.
assertResult(Seq(sources.And(sources.LessThan("a", 10),
sources.Or(sources.GreaterThan("a", 1), sources.GreaterThan("a", 2))))) {
parquetFilters.convertibleFilters(
Seq(sources.And(
sources.LessThan("a", 10),
sources.Or(
sources.And(
sources.GreaterThan("a", 1),
sources.StringContains("b", "prefix")),
sources.GreaterThan("a", 2)))))
}
assertResult(Seq(sources.Or(sources.GreaterThan("a", 2), sources.GreaterThan("c", 1.1)))) {
parquetFilters.convertibleFilters(
Seq(sources.Or(
sources.GreaterThan("a", 2),
sources.And(
sources.GreaterThan("c", 1.1),
sources.StringContains("b", "prefix")))))
}
// Testing complex Not conditions.
assertResult(Seq.empty) {
parquetFilters.convertibleFilters(
Seq(sources.Not(
sources.And(
sources.GreaterThan("a", 1),
sources.StringContains("b", "prefix")))))
}
assertResult(Seq.empty) {
parquetFilters.convertibleFilters(
Seq(sources.Not(
sources.And(
sources.StringContains("b", "prefix"),
sources.GreaterThan("a", 1)))))
}
assertResult(Seq.empty) {
parquetFilters.convertibleFilters(
Seq(sources.Not(
sources.And(
sources.And(
sources.GreaterThan("a", 1),
sources.StringContains("b", "prefix")),
sources.GreaterThan("a", 2)))))
}
assertResult(Seq.empty) {
parquetFilters.convertibleFilters(
Seq(sources.Not(
sources.And(
sources.GreaterThan("a", 2),
sources.And(
sources.GreaterThan("a", 1),
sources.StringContains("b", "prefix"))))))
}
}
test("SPARK-16371 Do not push down filters when inner name and outer name are the same") {
withParquetDataFrame((1 to 4).map(i => Tuple1(Tuple1(i)))) { implicit df =>
// Here the schema becomes as below:
//
// root
// |-- _1: struct (nullable = true)
// | |-- _1: integer (nullable = true)
//
// The inner column name, `_1` and outer column name `_1` are the same.
// Obviously this should not push down filters because the outer column is struct.
assert(df.filter("_1 IS NOT NULL").count() === 4)
}
}
test("Filters should be pushed down for vectorized Parquet reader at row group level") {
import testImplicits._
withSQLConf(SQLConf.PARQUET_VECTORIZED_READER_ENABLED.key -> "true",
SQLConf.WHOLESTAGE_CODEGEN_ENABLED.key -> "false") {
withTempPath { dir =>
val path = s"${dir.getCanonicalPath}/table"
(1 to 1024).map(i => (101, i)).toDF("a", "b").write.parquet(path)
Seq(true, false).foreach { enablePushDown =>
withSQLConf(SQLConf.PARQUET_FILTER_PUSHDOWN_ENABLED.key -> enablePushDown.toString) {
val accu = new NumRowGroupsAcc
sparkContext.register(accu)
val df = spark.read.parquet(path).filter("a < 100")
df.foreachPartition((it: Iterator[Row]) => it.foreach(v => accu.add(0)))
if (enablePushDown) {
assert(accu.value == 0)
} else {
assert(accu.value > 0)
}
AccumulatorContext.remove(accu.id)
}
}
}
}
}
test("SPARK-17213: Broken Parquet filter push-down for string columns") {
Seq(true, false).foreach { vectorizedEnabled =>
withSQLConf(SQLConf.PARQUET_VECTORIZED_READER_ENABLED.key -> vectorizedEnabled.toString) {
withTempPath { dir =>
import testImplicits._
val path = dir.getCanonicalPath
// scalastyle:off nonascii
Seq("a", "Γ©").toDF("name").write.parquet(path)
// scalastyle:on nonascii
assert(spark.read.parquet(path).where("name > 'a'").count() == 1)
assert(spark.read.parquet(path).where("name >= 'a'").count() == 2)
// scalastyle:off nonascii
assert(spark.read.parquet(path).where("name < 'Γ©'").count() == 1)
assert(spark.read.parquet(path).where("name <= 'Γ©'").count() == 2)
// scalastyle:on nonascii
}
}
}
}
test("SPARK-20364: Disable Parquet predicate pushdown for fields having dots in the names") {
import testImplicits._
Seq(true, false).foreach { vectorized =>
withSQLConf(SQLConf.PARQUET_VECTORIZED_READER_ENABLED.key -> vectorized.toString,
SQLConf.PARQUET_FILTER_PUSHDOWN_ENABLED.key -> true.toString,
SQLConf.SUPPORT_QUOTED_REGEX_COLUMN_NAME.key -> "false") {
withTempPath { path =>
Seq(Some(1), None).toDF("col.dots").write.parquet(path.getAbsolutePath)
val readBack = spark.read.parquet(path.getAbsolutePath).where("`col.dots` IS NOT NULL")
assert(readBack.count() == 1)
}
}
}
}
test("Filters should be pushed down for Parquet readers at row group level") {
import testImplicits._
withSQLConf(
// Makes sure disabling 'spark.sql.parquet.recordFilter' still enables
// row group level filtering.
SQLConf.PARQUET_RECORD_FILTER_ENABLED.key -> "false",
SQLConf.PARQUET_FILTER_PUSHDOWN_ENABLED.key -> "true",
SQLConf.PARQUET_VECTORIZED_READER_ENABLED.key -> "false") {
withTempPath { path =>
val data = (1 to 1024)
data.toDF("a").coalesce(1)
.write.option("parquet.block.size", 512)
.parquet(path.getAbsolutePath)
val df = spark.read.parquet(path.getAbsolutePath).filter("a == 500")
// Here, we strip the Spark side filter and check the actual results from Parquet.
val actual = stripSparkFilter(df).collect().length
// Since those are filtered at row group level, the result count should be less
// than the total length but should not be a single record.
// Note that, if record level filtering is enabled, it should be a single record.
// If no filter is pushed down to Parquet, it should be the total length of data.
assert(actual > 1 && actual < data.length)
}
}
}
test("SPARK-23852: Broken Parquet push-down for partially-written stats") {
withSQLConf(SQLConf.PARQUET_FILTER_PUSHDOWN_ENABLED.key -> "true") {
// parquet-1217.parquet contains a single column with values -1, 0, 1, 2 and null.
// The row-group statistics include null counts, but not min and max values, which
// triggers PARQUET-1217.
val df = readResourceParquetFile("test-data/parquet-1217.parquet")
// Will return 0 rows if PARQUET-1217 is not fixed.
assert(df.where("col > 0").count() === 2)
}
}
test("filter pushdown - StringStartsWith") {
withParquetDataFrame((1 to 4).map(i => Tuple1(i + "str" + i))) { implicit df =>
checkFilterPredicate(
'_1.startsWith("").asInstanceOf[Predicate],
classOf[UserDefinedByInstance[_, _]],
Seq("1str1", "2str2", "3str3", "4str4").map(Row(_)))
Seq("2", "2s", "2st", "2str", "2str2").foreach { prefix =>
checkFilterPredicate(
'_1.startsWith(prefix).asInstanceOf[Predicate],
classOf[UserDefinedByInstance[_, _]],
"2str2")
}
Seq("2S", "null", "2str22").foreach { prefix =>
checkFilterPredicate(
'_1.startsWith(prefix).asInstanceOf[Predicate],
classOf[UserDefinedByInstance[_, _]],
Seq.empty[Row])
}
checkFilterPredicate(
!'_1.startsWith("").asInstanceOf[Predicate],
classOf[Operators.Not],
Seq().map(Row(_)))
Seq("2", "2s", "2st", "2str", "2str2").foreach { prefix =>
checkFilterPredicate(
!'_1.startsWith(prefix).asInstanceOf[Predicate],
classOf[Operators.Not],
Seq("1str1", "3str3", "4str4").map(Row(_)))
}
Seq("2S", "null", "2str22").foreach { prefix =>
checkFilterPredicate(
!'_1.startsWith(prefix).asInstanceOf[Predicate],
classOf[Operators.Not],
Seq("1str1", "2str2", "3str3", "4str4").map(Row(_)))
}
val schema = new SparkToParquetSchemaConverter(conf).convert(df.schema)
assertResult(None) {
createParquetFilters(schema).createFilter(sources.StringStartsWith("_1", null))
}
}
// SPARK-28371: make sure filter is null-safe.
withParquetDataFrame(Seq(Tuple1[String](null))) { implicit df =>
checkFilterPredicate(
'_1.startsWith("blah").asInstanceOf[Predicate],
classOf[UserDefinedByInstance[_, _]],
Seq.empty[Row])
}
import testImplicits._
// Test canDrop() has taken effect
testStringStartsWith(spark.range(1024).map(_.toString).toDF(), "value like 'a%'")
// Test inverseCanDrop() has taken effect
testStringStartsWith(spark.range(1024).map(c => "100").toDF(), "value not like '10%'")
}
test("SPARK-17091: Convert IN predicate to Parquet filter push-down") {
val schema = StructType(Seq(
StructField("a", IntegerType, nullable = false)
))
val parquetSchema = new SparkToParquetSchemaConverter(conf).convert(schema)
val parquetFilters = createParquetFilters(parquetSchema)
assertResult(Some(FilterApi.eq(intColumn("a"), null: Integer))) {
parquetFilters.createFilter(sources.In("a", Array(null)))
}
assertResult(Some(FilterApi.eq(intColumn("a"), 10: Integer))) {
parquetFilters.createFilter(sources.In("a", Array(10)))
}
// Remove duplicates
assertResult(Some(FilterApi.eq(intColumn("a"), 10: Integer))) {
parquetFilters.createFilter(sources.In("a", Array(10, 10)))
}
assertResult(Some(or(or(
FilterApi.eq(intColumn("a"), 10: Integer),
FilterApi.eq(intColumn("a"), 20: Integer)),
FilterApi.eq(intColumn("a"), 30: Integer)))
) {
parquetFilters.createFilter(sources.In("a", Array(10, 20, 30)))
}
assert(parquetFilters.createFilter(sources.In("a",
Range(0, conf.parquetFilterPushDownInFilterThreshold).toArray)).isDefined)
assert(parquetFilters.createFilter(sources.In("a",
Range(0, conf.parquetFilterPushDownInFilterThreshold + 1).toArray)).isEmpty)
import testImplicits._
withTempPath { path =>
val data = 0 to 1024
data.toDF("a").selectExpr("if (a = 1024, null, a) AS a") // convert 1024 to null
.coalesce(1).write.option("parquet.block.size", 512)
.parquet(path.getAbsolutePath)
val df = spark.read.parquet(path.getAbsolutePath)
Seq(true, false).foreach { pushEnabled =>
withSQLConf(
SQLConf.PARQUET_FILTER_PUSHDOWN_ENABLED.key -> pushEnabled.toString) {
Seq(1, 5, 10, 11).foreach { count =>
val filter = s"a in(${Range(0, count).mkString(",")})"
assert(df.where(filter).count() === count)
val actual = stripSparkFilter(df.where(filter)).collect().length
if (pushEnabled && count <= conf.parquetFilterPushDownInFilterThreshold) {
assert(actual > 1 && actual < data.length)
} else {
assert(actual === data.length)
}
}
assert(df.where("a in(null)").count() === 0)
assert(df.where("a = null").count() === 0)
assert(df.where("a is null").count() === 1)
}
}
}
}
test("SPARK-25207: Case-insensitive field resolution for pushdown when reading parquet") {
def testCaseInsensitiveResolution(
schema: StructType,
expected: FilterPredicate,
filter: sources.Filter): Unit = {
val parquetSchema = new SparkToParquetSchemaConverter(conf).convert(schema)
val caseSensitiveParquetFilters =
createParquetFilters(parquetSchema, caseSensitive = Some(true))
val caseInsensitiveParquetFilters =
createParquetFilters(parquetSchema, caseSensitive = Some(false))
assertResult(Some(expected)) {
caseInsensitiveParquetFilters.createFilter(filter)
}
assertResult(None) {
caseSensitiveParquetFilters.createFilter(filter)
}
}
val schema = StructType(Seq(StructField("cint", IntegerType)))
testCaseInsensitiveResolution(
schema, FilterApi.eq(intColumn("cint"), null.asInstanceOf[Integer]), sources.IsNull("CINT"))
testCaseInsensitiveResolution(
schema,
FilterApi.notEq(intColumn("cint"), null.asInstanceOf[Integer]),
sources.IsNotNull("CINT"))
testCaseInsensitiveResolution(
schema, FilterApi.eq(intColumn("cint"), 1000: Integer), sources.EqualTo("CINT", 1000))
testCaseInsensitiveResolution(
schema,
FilterApi.notEq(intColumn("cint"), 1000: Integer),
sources.Not(sources.EqualTo("CINT", 1000)))
testCaseInsensitiveResolution(
schema, FilterApi.eq(intColumn("cint"), 1000: Integer), sources.EqualNullSafe("CINT", 1000))
testCaseInsensitiveResolution(
schema,
FilterApi.notEq(intColumn("cint"), 1000: Integer),
sources.Not(sources.EqualNullSafe("CINT", 1000)))
testCaseInsensitiveResolution(
schema,
FilterApi.lt(intColumn("cint"), 1000: Integer), sources.LessThan("CINT", 1000))
testCaseInsensitiveResolution(
schema,
FilterApi.ltEq(intColumn("cint"), 1000: Integer),
sources.LessThanOrEqual("CINT", 1000))
testCaseInsensitiveResolution(
schema, FilterApi.gt(intColumn("cint"), 1000: Integer), sources.GreaterThan("CINT", 1000))
testCaseInsensitiveResolution(
schema,
FilterApi.gtEq(intColumn("cint"), 1000: Integer),
sources.GreaterThanOrEqual("CINT", 1000))
testCaseInsensitiveResolution(
schema,
FilterApi.or(
FilterApi.eq(intColumn("cint"), 10: Integer),
FilterApi.eq(intColumn("cint"), 20: Integer)),
sources.In("CINT", Array(10, 20)))
val dupFieldSchema = StructType(
Seq(StructField("cint", IntegerType), StructField("cINT", IntegerType)))
val dupParquetSchema = new SparkToParquetSchemaConverter(conf).convert(dupFieldSchema)
val dupCaseInsensitiveParquetFilters =
createParquetFilters(dupParquetSchema, caseSensitive = Some(false))
assertResult(None) {
dupCaseInsensitiveParquetFilters.createFilter(sources.EqualTo("CINT", 1000))
}
}
test("SPARK-25207: exception when duplicate fields in case-insensitive mode") {
withTempPath { dir =>
val count = 10
val tableName = "spark_25207"
val tableDir = dir.getAbsoluteFile + "/table"
withTable(tableName) {
withSQLConf(SQLConf.CASE_SENSITIVE.key -> "true") {
spark.range(count).selectExpr("id as A", "id as B", "id as b")
.write.mode("overwrite").parquet(tableDir)
}
sql(
s"""
|CREATE TABLE $tableName (A LONG, B LONG) USING PARQUET LOCATION '$tableDir'
""".stripMargin)
withSQLConf(SQLConf.CASE_SENSITIVE.key -> "false") {
val e = intercept[SparkException] {
sql(s"select a from $tableName where b > 0").collect()
}
assert(e.getCause.isInstanceOf[RuntimeException] && e.getCause.getMessage.contains(
"""Found duplicate field(s) "B": [B, b] in case-insensitive mode"""))
}
withSQLConf(SQLConf.CASE_SENSITIVE.key -> "true") {
checkAnswer(sql(s"select A from $tableName where B > 0"), (1 until count).map(Row(_)))
}
}
}
}
test("SPARK-30826: case insensitivity of StringStartsWith attribute") {
import testImplicits._
withSQLConf(SQLConf.CASE_SENSITIVE.key -> "false") {
withTable("t1") {
withTempPath { dir =>
val path = dir.toURI.toString
Seq("42").toDF("COL").write.parquet(path)
spark.sql(
s"""
|CREATE TABLE t1 (col STRING)
|USING parquet
|OPTIONS (path '$path')
""".stripMargin)
checkAnswer(
spark.sql("SELECT * FROM t1 WHERE col LIKE '4%'"),
Row("42"))
}
}
}
}
}
class ParquetV1FilterSuite extends ParquetFilterSuite {
override protected def sparkConf: SparkConf =
super
.sparkConf
.set(SQLConf.USE_V1_SOURCE_LIST, "parquet")
override def checkFilterPredicate(
df: DataFrame,
predicate: Predicate,
filterClass: Class[_ <: FilterPredicate],
checker: (DataFrame, Seq[Row]) => Unit,
expected: Seq[Row]): Unit = {
val output = predicate.collect { case a: Attribute => a }.distinct
withSQLConf(
SQLConf.PARQUET_FILTER_PUSHDOWN_ENABLED.key -> "true",
SQLConf.PARQUET_FILTER_PUSHDOWN_DATE_ENABLED.key -> "true",
SQLConf.PARQUET_FILTER_PUSHDOWN_TIMESTAMP_ENABLED.key -> "true",
SQLConf.PARQUET_FILTER_PUSHDOWN_DECIMAL_ENABLED.key -> "true",
SQLConf.PARQUET_FILTER_PUSHDOWN_STRING_STARTSWITH_ENABLED.key -> "true",
// Disable adding filters from constraints because it adds, for instance,
// is-not-null to pushed filters, which makes it hard to test if the pushed
// filter is expected or not (this had to be fixed with SPARK-13495).
SQLConf.OPTIMIZER_EXCLUDED_RULES.key -> InferFiltersFromConstraints.ruleName,
SQLConf.PARQUET_VECTORIZED_READER_ENABLED.key -> "false") {
val query = df
.select(output.map(e => Column(e)): _*)
.where(Column(predicate))
var maybeRelation: Option[HadoopFsRelation] = None
val maybeAnalyzedPredicate = query.queryExecution.optimizedPlan.collect {
case PhysicalOperation(_, filters,
LogicalRelation(relation: HadoopFsRelation, _, _, _)) =>
maybeRelation = Some(relation)
filters
}.flatten.reduceLeftOption(_ && _)
assert(maybeAnalyzedPredicate.isDefined, "No filter is analyzed from the given query")
val (_, selectedFilters, _) =
DataSourceStrategy.selectFilters(maybeRelation.get, maybeAnalyzedPredicate.toSeq)
assert(selectedFilters.nonEmpty, "No filter is pushed down")
val schema = new SparkToParquetSchemaConverter(conf).convert(df.schema)
val parquetFilters = createParquetFilters(schema)
// In this test suite, all the simple predicates are convertible here.
assert(parquetFilters.convertibleFilters(selectedFilters) === selectedFilters)
val pushedParquetFilters = selectedFilters.map { pred =>
val maybeFilter = parquetFilters.createFilter(pred)
assert(maybeFilter.isDefined, s"Couldn't generate filter predicate for $pred")
maybeFilter.get
}
// Doesn't bother checking type parameters here (e.g. `Eq[Integer]`)
assert(pushedParquetFilters.exists(_.getClass === filterClass),
s"${pushedParquetFilters.map(_.getClass).toList} did not contain ${filterClass}.")
checker(stripSparkFilter(query), expected)
}
}
}
class ParquetV2FilterSuite extends ParquetFilterSuite {
// TODO: enable Parquet V2 write path after file source V2 writers are workable.
override protected def sparkConf: SparkConf =
super
.sparkConf
.set(SQLConf.USE_V1_SOURCE_LIST, "")
override def checkFilterPredicate(
df: DataFrame,
predicate: Predicate,
filterClass: Class[_ <: FilterPredicate],
checker: (DataFrame, Seq[Row]) => Unit,
expected: Seq[Row]): Unit = {
val output = predicate.collect { case a: Attribute => a }.distinct
withSQLConf(
SQLConf.PARQUET_FILTER_PUSHDOWN_ENABLED.key -> "true",
SQLConf.PARQUET_FILTER_PUSHDOWN_DATE_ENABLED.key -> "true",
SQLConf.PARQUET_FILTER_PUSHDOWN_TIMESTAMP_ENABLED.key -> "true",
SQLConf.PARQUET_FILTER_PUSHDOWN_DECIMAL_ENABLED.key -> "true",
SQLConf.PARQUET_FILTER_PUSHDOWN_STRING_STARTSWITH_ENABLED.key -> "true",
// Disable adding filters from constraints because it adds, for instance,
// is-not-null to pushed filters, which makes it hard to test if the pushed
// filter is expected or not (this had to be fixed with SPARK-13495).
SQLConf.OPTIMIZER_EXCLUDED_RULES.key -> InferFiltersFromConstraints.ruleName,
SQLConf.PARQUET_VECTORIZED_READER_ENABLED.key -> "false") {
val query = df
.select(output.map(e => Column(e)): _*)
.where(Column(predicate))
query.queryExecution.optimizedPlan.collectFirst {
case PhysicalOperation(_, filters,
DataSourceV2ScanRelation(_, scan: ParquetScan, _)) =>
assert(filters.nonEmpty, "No filter is analyzed from the given query")
val sourceFilters = filters.flatMap(DataSourceStrategy.translateFilter).toArray
val pushedFilters = scan.pushedFilters
assert(pushedFilters.nonEmpty, "No filter is pushed down")
val schema = new SparkToParquetSchemaConverter(conf).convert(df.schema)
val parquetFilters = createParquetFilters(schema)
// In this test suite, all the simple predicates are convertible here.
assert(parquetFilters.convertibleFilters(sourceFilters) === pushedFilters)
val pushedParquetFilters = pushedFilters.map { pred =>
val maybeFilter = parquetFilters.createFilter(pred)
assert(maybeFilter.isDefined, s"Couldn't generate filter predicate for $pred")
maybeFilter.get
}
// Doesn't bother checking type parameters here (e.g. `Eq[Integer]`)
assert(pushedParquetFilters.exists(_.getClass === filterClass),
s"${pushedParquetFilters.map(_.getClass).toList} did not contain ${filterClass}.")
checker(stripSparkFilter(query), expected)
case _ =>
throw new AnalysisException("Can not match ParquetTable in the query.")
}
}
}
}
class NumRowGroupsAcc extends AccumulatorV2[Integer, Integer] {
private var _sum = 0
override def isZero: Boolean = _sum == 0
override def copy(): AccumulatorV2[Integer, Integer] = {
val acc = new NumRowGroupsAcc()
acc._sum = _sum
acc
}
override def reset(): Unit = _sum = 0
override def add(v: Integer): Unit = _sum += v
override def merge(other: AccumulatorV2[Integer, Integer]): Unit = other match {
case a: NumRowGroupsAcc => _sum += a._sum
case _ => throw new UnsupportedOperationException(
s"Cannot merge ${this.getClass.getName} with ${other.getClass.getName}")
}
override def value: Integer = _sum
}
| goldmedal/spark | sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetFilterSuite.scala | Scala | apache-2.0 | 63,790 |
/**
* @author ven
*/
package org.jetbrains.plugins.scala
package lang
package psi
package impl
package toplevel
package typedef
import com.intellij.openapi.progress.ProgressManager
import com.intellij.openapi.project.Project
import com.intellij.psi.search.GlobalSearchScope
import com.intellij.psi.{PsiClass, PsiClassType, PsiElement}
import org.jetbrains.plugins.scala.caches.CachesUtil
import org.jetbrains.plugins.scala.extensions._
import org.jetbrains.plugins.scala.lang.psi.api.statements.ScTypeAliasDefinition
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.{ScObject, ScTemplateDefinition, ScTrait, ScTypeDefinition}
import org.jetbrains.plugins.scala.lang.psi.impl.toplevel.synthetic.ScSyntheticClass
import org.jetbrains.plugins.scala.lang.psi.types._
import org.jetbrains.plugins.scala.lang.psi.types.result.{Success, TypingContext}
import org.jetbrains.plugins.scala.lang.refactoring.util.ScTypeUtil.AliasType
import org.jetbrains.plugins.scala.util.ScEquivalenceUtil
import scala.annotation.tailrec
import scala.collection.mutable
import scala.collection.mutable.{ArrayBuffer, ListBuffer}
abstract class MixinNodes {
type T
def equiv(t1: T, t2: T): Boolean
def same(t1: T, t2: T): Boolean
def computeHashCode(t: T): Int
def elemName(t: T): String
def isAbstract(t: T): Boolean
def isImplicit(t: T): Boolean
def isPrivate(t: T): Boolean
class Node(val info: T, val substitutor: ScSubstitutor) {
var supers: Seq[Node] = Seq.empty
var primarySuper: Option[Node] = None
}
class Map extends mutable.HashMap[String, ArrayBuffer[(T, Node)]] {
private[Map] val implicitNames: mutable.HashSet[String] = new mutable.HashSet[String]
private val privatesMap: mutable.HashMap[String, ArrayBuffer[(T, Node)]] = mutable.HashMap.empty
def addToMap(key: T, node: Node) {
val name = ScalaPsiUtil.convertMemberName(elemName(key))
(if (!isPrivate(key)) this else privatesMap).
getOrElseUpdate(name, new ArrayBuffer) += ((key, node))
if (isImplicit(key)) implicitNames.add(name)
}
@volatile
private var supersList: List[Map] = List.empty
def setSupersMap(list: List[Map]) {
for (m <- list) {
implicitNames ++= m.implicitNames
}
supersList = list
}
private val calculatedNames: mutable.HashSet[String] = new mutable.HashSet
private val calculated: mutable.HashMap[String, AllNodes] = new mutable.HashMap
private val calculatedSupers: mutable.HashMap[String, AllNodes] = new mutable.HashMap
def forName(name: String): (AllNodes, AllNodes) = {
val convertedName = ScalaPsiUtil.convertMemberName(name)
synchronized {
if (calculatedNames.contains(convertedName)) {
return (calculated(convertedName), calculatedSupers(convertedName))
}
}
val thisMap: NodesMap = toNodesMap(getOrElse(convertedName, new ArrayBuffer))
val maps: List[NodesMap] = supersList.map(sup => toNodesMap(sup.getOrElse(convertedName, new ArrayBuffer)))
val supers = mergeWithSupers(thisMap, mergeSupers(maps))
val list = supersList.map(_.privatesMap.getOrElse(convertedName, new ArrayBuffer[(T, Node)])).flatten
val supersPrivates = toNodesSeq(list)
val thisPrivates = toNodesSeq(privatesMap.getOrElse(convertedName, new ArrayBuffer[(T, Node)]).toList ::: list)
val thisAllNodes = new AllNodes(thisMap, thisPrivates)
val supersAllNodes = new AllNodes(supers, supersPrivates)
synchronized {
calculatedNames.add(convertedName)
calculated.+=((convertedName, thisAllNodes))
calculatedSupers.+=((convertedName, supersAllNodes))
}
(thisAllNodes, supersAllNodes)
}
@volatile
private var forImplicitsCache: List[(T, Node)] = null
def forImplicits(): List[(T, Node)] = {
if (forImplicitsCache != null) return forImplicitsCache
val res = new ArrayBuffer[(T, Node)]()
for (name <- implicitNames) {
val map = forName(name)._1
for (elem <- map) {
if (isImplicit(elem._1)) res += elem
}
}
forImplicitsCache = res.toList
forImplicitsCache
}
def allNames(): mutable.Set[String] = {
val names = new mutable.HashSet[String]
names ++= keySet
names ++= privatesMap.keySet
for (sup <- supersList) {
names ++= sup.keySet
names ++= sup.privatesMap.keySet
}
names
}
private def forAll(): (mutable.HashMap[String, AllNodes], mutable.HashMap[String, AllNodes]) = {
for (name <- allNames()) forName(name)
synchronized {
(calculated, calculatedSupers)
}
}
def allFirstSeq(): Seq[AllNodes] = {
forAll()._1.toSeq.map(_._2)
}
def allSecondSeq(): Seq[AllNodes] = {
forAll()._1.toSeq.map(_._2)
}
private def toNodesSeq(seq: List[(T, Node)]): NodesSeq = {
val map = new mutable.HashMap[Int, List[(T, Node)]]
for (elem <- seq) {
val key = computeHashCode(elem._1)
val prev = map.getOrElse(key, List.empty)
map.update(key, elem :: prev)
}
new NodesSeq(map)
}
private def toNodesMap(buf: ArrayBuffer[(T, Node)]): NodesMap = {
val res = new NodesMap
res ++= buf
res
}
private class MultiMap extends mutable.HashMap[T, mutable.Set[Node]] with collection.mutable.MultiMap[T, Node] {
override def elemHashCode(t : T) = computeHashCode(t)
override def elemEquals(t1 : T, t2 : T) = equiv(t1, t2)
override def makeSet = new mutable.LinkedHashSet[Node]
}
private object MultiMap {def empty = new MultiMap}
private def mergeSupers(maps: List[NodesMap]) : MultiMap = {
val res = MultiMap.empty
val mapsIterator = maps.iterator
while (mapsIterator.hasNext) {
val currentIterator = mapsIterator.next().iterator
while (currentIterator.hasNext) {
val (k, node) = currentIterator.next()
res.addBinding(k, node)
}
}
res
}
//Return primary selected from supersMerged
private def mergeWithSupers(thisMap: NodesMap, supersMerged: MultiMap): NodesMap = {
val primarySupers = new NodesMap
for ((key, nodes) <- supersMerged) {
val primarySuper = nodes.find {n => !isAbstract(n.info)} match {
case None => nodes.toList(0)
case Some(concrete) => concrete
}
primarySupers += ((key, primarySuper))
thisMap.get(key) match {
case Some(node) =>
node.primarySuper = Some(primarySuper)
node.supers = nodes.toSeq
case None =>
nodes -= primarySuper
primarySuper.supers = nodes.toSeq
thisMap += ((key, primarySuper))
}
}
primarySupers
}
}
class AllNodes(publics: NodesMap, privates: NodesSeq) {
def get(s: T): Option[Node] = {
publics.get(s) match {
case res: Some[Node] => res
case _ => privates.get(s)
}
}
def foreach(p: ((T, Node)) => Unit) {
publics.foreach(p)
privates.map.values.flatten.foreach(p)
}
def map[R](p: ((T, Node)) => R): Seq[R] = {
publics.map(p).toSeq ++ privates.map.values.flatten.map(p)
}
def filter(p: ((T, Node)) => Boolean): Seq[(T, Node)] = {
publics.filter(p).toSeq ++ privates.map.values.flatten.filter(p)
}
def withFilter(p: ((T, Node)) => Boolean) = {
(publics.toSeq ++ privates.map.values.flatten).withFilter(p)
}
def flatMap[R](p: ((T, Node)) => Traversable[R]): Seq[R] = {
publics.flatMap(p).toSeq ++ privates.map.values.flatten.flatMap(p)
}
def iterator: Iterator[(T, Node)] = {
new Iterator[(T, Node)] {
private val iter1 = publics.iterator
private val iter2 = privates.map.values.flatten.iterator
def hasNext: Boolean = iter1.hasNext || iter2.hasNext
def next(): (T, Node) = if (iter1.hasNext) iter1.next() else iter2.next()
}
}
def fastPhysicalSignatureGet(key: T): Option[Node] = {
publics.fastPhysicalSignatureGet(key) match {
case res: Some[Node] => res
case _ => privates.get(key)
}
}
def isEmpty: Boolean = publics.isEmpty && privates.map.values.forall(_.isEmpty)
}
class NodesSeq(private[MixinNodes] val map: mutable.HashMap[Int, List[(T, Node)]]) {
def get(s: T): Option[Node] = {
val list = map.getOrElse(computeHashCode(s), Nil)
val iterator = list.iterator
while (iterator.hasNext) {
val next = iterator.next()
if (same(s, next._1)) return Some(next._2)
}
None
}
def fastPhysicalSignatureGet(key: T): Option[Node] = {
val list = map.getOrElse(computeHashCode(key), List.empty)
list match {
case Nil => None
case x :: Nil => Some(x._2)
case e =>
val iterator = e.iterator
while (iterator.hasNext) {
val next = iterator.next()
if (same(key, next._1)) return Some(next._2)
}
None
}
}
}
class NodesMap extends mutable.HashMap[T, Node] {
override def elemHashCode(t: T) = computeHashCode(t)
override def elemEquals(t1 : T, t2 : T) = equiv(t1, t2)
/**
* Use this method if you are sure, that map contains key
*/
def fastGet(key: T): Option[Node] = {
//todo: possible optimization to filter without types first then if only one variant left, get it.
val h = index(elemHashCode(key))
var e = table(h).asInstanceOf[Entry]
if (e != null && e.next == null) return Some(e.value)
while (e != null) {
if (elemEquals(e.key, key)) return Some(e.value)
e = e.next
if (e.next == null) return Some(e.value)
}
None
}
def fastPhysicalSignatureGet(key: T): Option[Node] = {
key match {
case p: PhysicalSignature =>
val h = index(elemHashCode(key))
var e = table(h).asInstanceOf[Entry]
if (e != null && e.next == null) {
e.value.info match {
case p2: PhysicalSignature =>
if (p.method == p2.method) return Some(e.value)
else return None
case _ => return None
}
}
while (e != null) {
e.value.info match {
case p2: PhysicalSignature =>
if (p.method == p2.method) return Some(e.value)
case _ =>
}
e = e.next
}
fastGet(key)
case _ => fastGet(key)
}
}
}
def build(clazz: PsiClass): Map = build(ScType.designator(clazz))
def build(tp: ScType, compoundThisType: Option[ScType] = None): Map = {
var isPredef = false
var place: Option[PsiElement] = None
val map = new Map
val superTypesBuff = new ListBuffer[Map]
val (superTypes, subst, thisTypeSubst): (Seq[ScType], ScSubstitutor, ScSubstitutor) = tp match {
case cp: ScCompoundType =>
processRefinement(cp, map, place)
val thisTypeSubst = compoundThisType match {
case Some(_) => new ScSubstitutor(Map.empty, Map.empty, compoundThisType)
case _ => new ScSubstitutor(Predef.Map.empty, Predef.Map.empty, Some(tp))
}
(MixinNodes.linearization(cp), ScSubstitutor.empty, thisTypeSubst)
case _ =>
val clazz = tp match {
case ScDesignatorType(clazz: PsiClass) => clazz
case ScProjectionType(_, clazz: PsiClass, _) => clazz
case _ => null
}
if (clazz == null) (Seq.empty, ScSubstitutor.empty, ScSubstitutor.empty)
else
clazz match {
case template: ScTypeDefinition =>
if (template.qualifiedName == "scala.Predef") isPredef = true
place = Option(template.extendsBlock)
processScala(template, ScSubstitutor.empty, map, place, base = true)
val lin = MixinNodes.linearization(template)
var zSubst = new ScSubstitutor(Map.empty, Map.empty, Some(ScThisType(template)))
var placer = template.getContext
while (placer != null) {
placer match {
case t: ScTemplateDefinition => zSubst = zSubst.followed(
new ScSubstitutor(Map.empty, Map.empty, Some(ScThisType(t)))
)
case _ =>
}
placer = placer.getContext
}
(if (!lin.isEmpty) lin.tail else lin, Bounds.putAliases(template, ScSubstitutor.empty), zSubst)
case template: ScTemplateDefinition =>
place = Option(template.asInstanceOf[ScalaStubBasedElementImpl[_]].getLastChildStub)
processScala(template, ScSubstitutor.empty, map, place, base = true)
var zSubst = new ScSubstitutor(Map.empty, Map.empty, Some(ScThisType(template)))
var placer = template.getContext
while (placer != null) {
placer match {
case t: ScTemplateDefinition => zSubst = zSubst.followed(
new ScSubstitutor(Map.empty, Map.empty, Some(ScThisType(t)))
)
case _ =>
}
placer = placer.getContext
}
(MixinNodes.linearization(template),
Bounds.putAliases(template, ScSubstitutor.empty), zSubst)
case syn: ScSyntheticClass =>
(syn.getSuperTypes.map{psiType => ScType.create(psiType, syn.getProject)} : Seq[ScType],
ScSubstitutor.empty, ScSubstitutor.empty)
case clazz: PsiClass =>
place = Option(clazz.getLastChild)
processJava(clazz, ScSubstitutor.empty, map, place)
val lin = MixinNodes.linearization(clazz)
(if (!lin.isEmpty) lin.tail else lin,
ScSubstitutor.empty, ScSubstitutor.empty)
case _ =>
(Seq.empty, ScSubstitutor.empty, ScSubstitutor.empty)
}
}
val iter = superTypes.iterator
while (iter.hasNext) {
val superType = iter.next()
ScType.extractClassType(superType, place.map(_.getProject)) match {
case Some((superClass, s)) =>
// Do not include scala.ScalaObject to Predef's base types to prevent SOE
if (!(superClass.qualifiedName == "scala.ScalaObject" && isPredef)) {
val dependentSubst = superType match {
case p@ScProjectionType(proj, eem, _) => new ScSubstitutor(proj).followed(p.actualSubst)
case ScParameterizedType(p@ScProjectionType(proj, _, _), _) => new ScSubstitutor(proj).followed(p.actualSubst)
case _ => ScSubstitutor.empty
}
val newSubst = combine(s, subst, superClass).followed(thisTypeSubst).followed(dependentSubst)
val newMap = new Map
superClass match {
case template: ScTemplateDefinition => processScala(template, newSubst, newMap, place, base = false)
case syn: ScSyntheticClass =>
//it's required to do like this to have possibility mix Synthetic types
val clazz = ScalaPsiManager.instance(syn.getProject).getCachedClass(syn.getQualifiedName,
GlobalSearchScope.allScope(syn.getProject), ScalaPsiManager.ClassCategory.TYPE
)
clazz match {
case template: ScTemplateDefinition => processScala(template, newSubst, newMap, place, base = false)
case _ => //do nothing
}
case _ => processJava(superClass, newSubst, newMap, place)
}
superTypesBuff += newMap
}
case _ =>
}
(superType.isAliasType match {
case Some(AliasType(td: ScTypeAliasDefinition, lower, _)) => lower.getOrElse(superType)
case _ => superType
}) match {
case c: ScCompoundType =>
processRefinement(c, map, place)
case _ =>
}
}
map.setSupersMap(superTypesBuff.toList)
map
}
def combine(superSubst : ScSubstitutor, derived : ScSubstitutor, superClass : PsiClass) = {
var res : ScSubstitutor = ScSubstitutor.empty
for (tp <- superClass.getTypeParameters) {
res = res bindT ((tp.name, ScalaPsiUtil.getPsiElementId(tp)),
derived.subst(superSubst.subst(ScalaPsiManager.typeVariable(tp))))
}
superClass match {
case td : ScTypeDefinition =>
var aliasesMap = res.aliasesMap
for (alias <- td.aliases) {
derived.aliasesMap.get(alias.name) match {
case Some(t) => aliasesMap = aliasesMap + ((alias.name, t))
case None =>
}
}
res = new ScSubstitutor(res.tvMap, aliasesMap, None)
case _ => ()
}
res
}
def processJava(clazz: PsiClass, subst: ScSubstitutor, map: Map, place: Option[PsiElement])
def processScala(template: ScTemplateDefinition, subst: ScSubstitutor, map: Map,
place: Option[PsiElement], base: Boolean)
def processRefinement(cp: ScCompoundType, map: Map, place: Option[PsiElement])
}
object MixinNodes {
def linearization(clazz: PsiClass): Seq[ScType] = {
clazz match {
case obj: ScObject if obj.isPackageObject && obj.qualifiedName == "scala" =>
return Seq(ScType.designator(obj))
case _ =>
}
CachesUtil.getWithRecursionPreventingWithRollback(clazz, CachesUtil.LINEARIZATION_KEY,
new CachesUtil.MyOptionalProvider(clazz, (clazz: PsiClass) => linearizationInner(clazz))
(ScalaPsiUtil.getDependentItem(clazz)), Seq.empty)
}
def linearization(compound: ScCompoundType, addTp: Boolean = false): Seq[ScType] = {
val comps = compound.components
generalLinearization(None, compound, addTp = addTp, supers = comps)
}
private def linearizationInner(clazz: PsiClass): Seq[ScType] = {
ProgressManager.checkCanceled()
val tp = {
def default =
if (clazz.getTypeParameters.length == 0) ScType.designator(clazz)
else ScParameterizedType(ScType.designator(clazz), clazz.
getTypeParameters.map(tp => ScalaPsiManager.instance(clazz.getProject).typeVariable(tp)))
clazz match {
case td: ScTypeDefinition => td.getType(TypingContext.empty).getOrElse(default)
case _ => default
}
}
val supers: Seq[ScType] = {
clazz match {
case td: ScTemplateDefinition => td.superTypes
case clazz: PsiClass => clazz.getSuperTypes.map {
case ctp: PsiClassType =>
val cl = ctp.resolve()
if (cl != null && cl.qualifiedName == "java.lang.Object") ScDesignatorType(cl)
else ScType.create(ctp, clazz.getProject)
case ctp => ScType.create(ctp, clazz.getProject)
}.toSeq
}
}
generalLinearization(Some(clazz.getProject), tp, addTp = true, supers = supers)
}
private def generalLinearization(project: Option[Project], tp: ScType, addTp: Boolean, supers: Seq[ScType]): Seq[ScType] = {
val buffer = new ListBuffer[ScType]
val set: mutable.HashSet[String] = new mutable.HashSet //to add here qualified names of classes
def classString(clazz: PsiClass): String = {
clazz match {
case obj: ScObject => "Object: " + obj.qualifiedName
case tra: ScTrait => "Trait: " + tra.qualifiedName
case _ => "Class: " + clazz.qualifiedName
}
}
def add(tp: ScType) {
ScType.extractClass(tp, project) match {
case Some(clazz) if clazz.qualifiedName != null && !set.contains(classString(clazz)) =>
tp +=: buffer
set += classString(clazz)
case Some(clazz) if clazz.getTypeParameters.length != 0 =>
val i = buffer.indexWhere(newTp => {
ScType.extractClass(newTp, Some(clazz.getProject)) match {
case Some(newClazz) if ScEquivalenceUtil.areClassesEquivalent(newClazz, clazz) => true
case _ => false
}
})
if (i != -1) {
val newTp = buffer.apply(i)
if (tp.conforms(newTp)) buffer.update(i, tp)
}
case _ =>
(tp.isAliasType match {
case Some(AliasType(td: ScTypeAliasDefinition, lower, _)) => lower.getOrElse(tp)
case _ => tp
}) match {
case c: ScCompoundType => c +=: buffer
case _ =>
}
}
}
val iterator = supers.iterator
while (iterator.hasNext) {
var tp = iterator.next()
@tailrec
def updateTp(tp: ScType): ScType = {
tp.isAliasType match {
case Some(AliasType(_, _, Success(upper, _))) => updateTp(upper)
case _ =>
tp match {
case ex: ScExistentialType => ex.skolem
case _ => tp
}
}
}
tp = updateTp(tp)
ScType.extractClassType(tp) match {
case Some((clazz, subst)) =>
val lin = linearization(clazz)
val newIterator = lin.reverseIterator
while (newIterator.hasNext) {
val tp = newIterator.next()
add(subst.subst(tp))
}
case _ =>
(tp.isAliasType match {
case Some(AliasType(td: ScTypeAliasDefinition, lower, _)) => lower.getOrElse(tp)
case _ => tp
}) match {
case c: ScCompoundType =>
val lin = linearization(c, addTp = true)
val newIterator = lin.reverseIterator
while (newIterator.hasNext) {
val tp = newIterator.next()
add(tp)
}
case _ =>
}
}
}
if (addTp) add(tp)
buffer.toSeq
}
}
| triggerNZ/intellij-scala | src/org/jetbrains/plugins/scala/lang/psi/impl/toplevel/typedef/MixinNodes.scala | Scala | apache-2.0 | 21,842 |
package thesis.orderings
import thesis.matrixTypes._
import thesis.utils._
import scala.collection.mutable.ArrayBuffer
import scala.collection.mutable.PriorityQueue
import breeze.linalg._
import breeze.numerics._
/**
* Ordering that finds k rows in the matrix with maximal error such that
* there are separated by half the size of the kernel.
* Then use those k rows to delimit k+1 blocks.
* Reorder the rows with a tsp
* @param k the number of rows with maximal error to find
* @param equalBlocks if false apply the procedure described above to find the blocks
* Otherwise, use k blocks of equal size
*/
class KMaxOrdering(k: Int = 10, equalBlocks: Boolean = false) extends MatrixOrdering with UsesKernelOptimization{
override def toString:String = "KMax"
/**
* @param mat The matrix on which the errors are computed
* @return An array containing the error for every row
*/
private def getErrorRows(mat: MatrixMoves): Array[Double] = {
Array.tabulate(mat.rows){ r =>
mat.getFullConvolutionErrorSingleRow(r)
}
}
/**
* @param mat The matrix on which the maximal error rows are computed
* @param filteredRows The rows that were filtered such that they are far enough away from the max row
* @param errors The errors corresponding to the filtered rows
* @return The row in filteredRows with maximal error
*/
private def getMax(mat: MatrixMoves, filteredRows: Array[Int], errors: Array[Double]) = {
var max = errors(filteredRows(0))
var best_row = filteredRows(0)
var i = 0
while(i < filteredRows.size){
if(errors(filteredRows(i)) > max){
max = errors(filteredRows(i))
best_row = filteredRows(i)
}
i += 1
}
best_row
}
/**
* @param mat the matrix on which to compute the hamming distance
* @param i First row index
* @param j Second row index
* @return the hamming distance between i and j in mat
*/
private def hamming(i: Int, j: Int, mat: MatrixMoves) = {
var count = 0
var c = 0
while(c < mat.cols){
if(mat(i,c) != mat(j,c)) count += 1
c += 1
}
count
}
/**
* @param rows: rows with maximal error
* @param mat: matrix being reordered
* @return an array containing the different blocks in the format
* upper row block i, lower row block i
*/
private def getBlocks(rows: PriorityQueue[Int], mat: MatrixMoves): Array[Array[Int]] = {
val size = rows.size - 1
var next = rows.dequeue
val blocks = Array.tabulate[Int](size,2){ case(i,j) =>
if(j==0) next
else if(i == size-1) {
rows.dequeue
}
else {
val temp = rows.dequeue
// we cut the block between the rows with the greatest hamming distance
// to keep similar rows together
if(hamming(temp,temp+1, mat) > hamming(temp,temp-1, mat)) next = temp + 1
else next = temp
next - 1
}
}
assert(blocks.forall{array =>
array.forall{i =>
mat.isDefinedAtRow(i)
}
})
blocks
}
/**
* @param up Last line of the upper block
* @param down First line of the lower block
* @param i Index that must be converted
* @return if i is in the upper block, returns i
* otherwise return the line in the lower block
*/
private def convertUpToDown(up: Int, down: Int)(i: Int): Int = {
if(i <= up) i
else down + (i - up) - 1
}
/**
* @param up last line of the upper block
* @param down first line of the lower block
* @param i index that must be converted
* @return if i is in the lower block, returns i
* otherwise return the line in the upper block
*/
private def convertDownToUp(up: Int, down: Int)(i: Int): Int = {
if(down <= i) i
else up + (i - down) + 1
}
/**
* @param up upper block
* @param down lower block
* @param blocks array defining the bounds of the blocks
* @param mat matrix we are reordering
* @return the error obtained if block i is put above block j
*/
private def distanceBlocks(up: Array[Int], down: Array[Int], mat: MatrixMoves) = {
var sum = 0.0
var n = mat.getConvolutionSettings.kernel.rows
sum += mat.partialErrorBlocks(Math.max(up(1) - n/2 + 1, 0), up(1), convertUpToDown(up(1), down(0)))
sum + mat.partialErrorBlocks(down(0), Math.min(down(0) + n/2 - 1, mat.rows-1), convertDownToUp(up(1), down(0)))
}
/**
* @return a matrix containing the distances between the blocks
* @note BEWARE! it is not always symmetric
*/
private def getAsymmetricDistances(mat: MatrixMoves, blocks: Array[Array[Int]]): DenseMatrix[Double] = {
DenseMatrix.tabulate[Double](blocks.size,blocks.size){ case(i,j) =>
distanceBlocks(blocks(i),blocks(j), mat)
}
}
/**
* @return Convert the asymmetric matrix dists used for the tsp
* into a symmetric distance matrix by introducing ghost rows
*/
private def makeDistsSym(dists: DenseMatrix[Double]): DenseMatrix[Double] = {
DenseMatrix.tabulate[Double](2*dists.rows,2*dists.rows){ case(i,j) =>
if((i < dists.rows && j < dists.rows) || (dists.rows <= i && dists.rows <= j)) Double.MaxValue
else if(i == (j - dists.rows) || j == (i - dists.rows)) Double.MinValue
else if(j > i) dists(j - dists.rows,i)
else dists(i - dists.rows,j)
}
}
/**
* @return Remove the ghost rows from order.
*/
private def filterDouble(order: Array[Int], mat: MatrixMoves): Array[Int] = {
Array.tabulate(order.size/2){ i =>
assert(Math.abs(order(2*i) - order(2*i+1)) == mat.rows)
Math.min(order(2*i), order(2*i+1))
}
}
/**
* @param orderBlocks order in which the blocks should be reordered
* @return the corresponding order of the rows
*/
private def getPermutation(orderBlocks: Array[Int], blocks: Array[Array[Int]]): Array[Int] = {
orderBlocks.flatMap{ b =>
(blocks(b)(0) to blocks(b)(1))
}
}
/**
* @inheritdoc
*/
override def changeOrdering(mat:MatrixMoves) = {
var previousError = Double.MaxValue
while(Math.abs(previousError - mat.getError) > 0.01){
previousError = mat.getError
println(mat.getError)
val blocks = if(!equalBlocks){
val errors = getErrorRows(mat)
// get k rows with highest error separated by a distance at least n
// where n is the number or rows in the kernel
var n = mat.getConvolutionSettings.kernel.rows
var b = PriorityQueue[Int]()
var filteredRows = (0 until mat.rows).toArray
while(b.size < k && filteredRows.size > 0){
var maxRow = getMax(mat, filteredRows, errors)
// filter rows that are too close to maxRow
filteredRows = filteredRows.filter{ x => Math.abs(x - maxRow) > n}
b += maxRow
}
// add the first and last rows if there are not there yet
if(b.min != 0) b += 0
if(b.max != mat.rows-1) b += mat.rows-1
b = b.reverse // we want the first row first
getBlocks(b, mat)
}
else{
val blockSize = Math.ceil(mat.rows.toDouble/k).toInt
Array.tabulate[Array[Int]](k){ x =>
Array(x*blockSize, Math.min((x+1)*blockSize - 1, mat.rows-1))
}
}
// get the distance matrix and make it symmetric
var aSymDists = getAsymmetricDistances(mat, blocks)
val symDists = makeDistsSym(aSymDists)
val orderBlocks = filterDouble(TSPSolver.solve_tsp(symDists), mat)
mat.permute(getPermutation(orderBlocks, blocks))
mat.t
}
if(mat.isTranspose) mat.t
else mat
}
}
| GLeurquin/Faithful-visualization-of-categorical-datasets | src/main/scala/Orderings/KMaxOrdering.scala | Scala | mit | 8,447 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.livy.server.batch
import javax.servlet.http.HttpServletRequest
import org.apache.livy.LivyConf
import org.apache.livy.server.{AccessManager, SessionServlet}
import org.apache.livy.server.recovery.SessionStore
import org.apache.livy.sessions.BatchSessionManager
import org.apache.livy.utils.AppInfo
case class BatchSessionView(
id: Long,
name: Option[String],
owner: String,
proxyUser: Option[String],
state: String,
appId: Option[String],
appInfo: AppInfo,
log: Seq[String])
class BatchSessionServlet(
sessionManager: BatchSessionManager,
sessionStore: SessionStore,
livyConf: LivyConf,
accessManager: AccessManager)
extends SessionServlet(sessionManager, livyConf, accessManager)
{
override protected def createSession(req: HttpServletRequest): BatchSession = {
val createRequest = bodyAs[CreateBatchRequest](req)
val sessionId = sessionManager.nextId()
val sessionName = createRequest.name
BatchSession.create(
sessionId,
sessionName,
createRequest,
livyConf,
accessManager,
remoteUser(req),
proxyUser(req, createRequest.proxyUser),
sessionStore)
}
override protected[batch] def clientSessionView(
session: BatchSession,
req: HttpServletRequest): Any = {
val logs =
if (accessManager.hasViewAccess(session.owner,
effectiveUser(req),
session.proxyUser.getOrElse(""))) {
val lines = session.logLines()
val size = 10
val from = math.max(0, lines.length - size)
val until = from + size
lines.view(from, until).toSeq
} else {
Nil
}
BatchSessionView(session.id, session.name, session.owner, session.proxyUser,
session.state.toString, session.appId, session.appInfo, logs)
}
}
| ajbozarth/incubator-livy | server/src/main/scala/org/apache/livy/server/batch/BatchSessionServlet.scala | Scala | apache-2.0 | 2,676 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.json
import java.io._
import java.nio.charset.{Charset, StandardCharsets, UnsupportedCharsetException}
import java.nio.file.Files
import java.sql.{Date, Timestamp}
import java.util.Locale
import com.fasterxml.jackson.core.JsonFactory
import org.apache.hadoop.fs.{Path, PathFilter}
import org.apache.hadoop.io.SequenceFile.CompressionType
import org.apache.hadoop.io.compress.GzipCodec
import org.apache.spark.{SparkException, TestUtils}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{functions => F, _}
import org.apache.spark.sql.catalyst.json._
import org.apache.spark.sql.catalyst.util.DateTimeUtils
import org.apache.spark.sql.execution.ExternalRDD
import org.apache.spark.sql.execution.datasources.DataSource
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.test.SharedSQLContext
import org.apache.spark.sql.types._
import org.apache.spark.sql.types.StructType.fromDDL
import org.apache.spark.util.Utils
class TestFileFilter extends PathFilter {
override def accept(path: Path): Boolean = path.getParent.getName != "p=2"
}
class JsonSuite extends QueryTest with SharedSQLContext with TestJsonData {
import testImplicits._
test("Type promotion") {
def checkTypePromotion(expected: Any, actual: Any) {
assert(expected.getClass == actual.getClass,
s"Failed to promote ${actual.getClass} to ${expected.getClass}.")
assert(expected == actual,
s"Promoted value ${actual}(${actual.getClass}) does not equal the expected value " +
s"${expected}(${expected.getClass}).")
}
val factory = new JsonFactory()
def enforceCorrectType(
value: Any,
dataType: DataType,
options: Map[String, String] = Map.empty): Any = {
val writer = new StringWriter()
Utils.tryWithResource(factory.createGenerator(writer)) { generator =>
generator.writeObject(value)
generator.flush()
}
val dummyOption = new JSONOptions(options, SQLConf.get.sessionLocalTimeZone)
val dummySchema = StructType(Seq.empty)
val parser = new JacksonParser(dummySchema, dummyOption, allowArrayAsStructs = true)
Utils.tryWithResource(factory.createParser(writer.toString)) { jsonParser =>
jsonParser.nextToken()
val converter = parser.makeConverter(dataType)
converter.apply(jsonParser)
}
}
val intNumber: Int = 2147483647
checkTypePromotion(intNumber, enforceCorrectType(intNumber, IntegerType))
checkTypePromotion(intNumber.toLong, enforceCorrectType(intNumber, LongType))
checkTypePromotion(intNumber.toDouble, enforceCorrectType(intNumber, DoubleType))
checkTypePromotion(
Decimal(intNumber), enforceCorrectType(intNumber, DecimalType.SYSTEM_DEFAULT))
val longNumber: Long = 9223372036854775807L
checkTypePromotion(longNumber, enforceCorrectType(longNumber, LongType))
checkTypePromotion(longNumber.toDouble, enforceCorrectType(longNumber, DoubleType))
checkTypePromotion(
Decimal(longNumber), enforceCorrectType(longNumber, DecimalType.SYSTEM_DEFAULT))
val doubleNumber: Double = 1.7976931348623157E308d
checkTypePromotion(doubleNumber.toDouble, enforceCorrectType(doubleNumber, DoubleType))
checkTypePromotion(DateTimeUtils.fromJavaTimestamp(new Timestamp(intNumber * 1000L)),
enforceCorrectType(intNumber, TimestampType))
checkTypePromotion(DateTimeUtils.fromJavaTimestamp(new Timestamp(intNumber.toLong * 1000L)),
enforceCorrectType(intNumber.toLong, TimestampType))
val strTime = "2014-09-30 12:34:56"
checkTypePromotion(
expected = DateTimeUtils.fromJavaTimestamp(Timestamp.valueOf(strTime)),
enforceCorrectType(strTime, TimestampType,
Map("timestampFormat" -> "yyyy-MM-dd HH:mm:ss")))
val strDate = "2014-10-15"
checkTypePromotion(
DateTimeUtils.fromJavaDate(Date.valueOf(strDate)), enforceCorrectType(strDate, DateType))
val ISO8601Time1 = "1970-01-01T01:00:01.0Z"
checkTypePromotion(DateTimeUtils.fromJavaTimestamp(new Timestamp(3601000)),
enforceCorrectType(
ISO8601Time1,
TimestampType,
Map("timestampFormat" -> "yyyy-MM-dd'T'HH:mm:ss.SX")))
val ISO8601Time2 = "1970-01-01T02:00:01-01:00"
checkTypePromotion(DateTimeUtils.fromJavaTimestamp(new Timestamp(10801000)),
enforceCorrectType(
ISO8601Time2,
TimestampType,
Map("timestampFormat" -> "yyyy-MM-dd'T'HH:mm:ssXXX")))
val ISO8601Date = "1970-01-01"
checkTypePromotion(DateTimeUtils.millisToDays(32400000),
enforceCorrectType(ISO8601Date, DateType))
}
test("Get compatible type") {
def checkDataType(t1: DataType, t2: DataType, expected: DataType) {
var actual = JsonInferSchema.compatibleType(t1, t2)
assert(actual == expected,
s"Expected $expected as the most general data type for $t1 and $t2, found $actual")
actual = JsonInferSchema.compatibleType(t2, t1)
assert(actual == expected,
s"Expected $expected as the most general data type for $t1 and $t2, found $actual")
}
// NullType
checkDataType(NullType, BooleanType, BooleanType)
checkDataType(NullType, IntegerType, IntegerType)
checkDataType(NullType, LongType, LongType)
checkDataType(NullType, DoubleType, DoubleType)
checkDataType(NullType, DecimalType.SYSTEM_DEFAULT, DecimalType.SYSTEM_DEFAULT)
checkDataType(NullType, StringType, StringType)
checkDataType(NullType, ArrayType(IntegerType), ArrayType(IntegerType))
checkDataType(NullType, StructType(Nil), StructType(Nil))
checkDataType(NullType, NullType, NullType)
// BooleanType
checkDataType(BooleanType, BooleanType, BooleanType)
checkDataType(BooleanType, IntegerType, StringType)
checkDataType(BooleanType, LongType, StringType)
checkDataType(BooleanType, DoubleType, StringType)
checkDataType(BooleanType, DecimalType.SYSTEM_DEFAULT, StringType)
checkDataType(BooleanType, StringType, StringType)
checkDataType(BooleanType, ArrayType(IntegerType), StringType)
checkDataType(BooleanType, StructType(Nil), StringType)
// IntegerType
checkDataType(IntegerType, IntegerType, IntegerType)
checkDataType(IntegerType, LongType, LongType)
checkDataType(IntegerType, DoubleType, DoubleType)
checkDataType(IntegerType, DecimalType.SYSTEM_DEFAULT, DecimalType.SYSTEM_DEFAULT)
checkDataType(IntegerType, StringType, StringType)
checkDataType(IntegerType, ArrayType(IntegerType), StringType)
checkDataType(IntegerType, StructType(Nil), StringType)
// LongType
checkDataType(LongType, LongType, LongType)
checkDataType(LongType, DoubleType, DoubleType)
checkDataType(LongType, DecimalType.SYSTEM_DEFAULT, DecimalType.SYSTEM_DEFAULT)
checkDataType(LongType, StringType, StringType)
checkDataType(LongType, ArrayType(IntegerType), StringType)
checkDataType(LongType, StructType(Nil), StringType)
// DoubleType
checkDataType(DoubleType, DoubleType, DoubleType)
checkDataType(DoubleType, DecimalType.SYSTEM_DEFAULT, DoubleType)
checkDataType(DoubleType, StringType, StringType)
checkDataType(DoubleType, ArrayType(IntegerType), StringType)
checkDataType(DoubleType, StructType(Nil), StringType)
// DecimalType
checkDataType(DecimalType.SYSTEM_DEFAULT, DecimalType.SYSTEM_DEFAULT,
DecimalType.SYSTEM_DEFAULT)
checkDataType(DecimalType.SYSTEM_DEFAULT, StringType, StringType)
checkDataType(DecimalType.SYSTEM_DEFAULT, ArrayType(IntegerType), StringType)
checkDataType(DecimalType.SYSTEM_DEFAULT, StructType(Nil), StringType)
// StringType
checkDataType(StringType, StringType, StringType)
checkDataType(StringType, ArrayType(IntegerType), StringType)
checkDataType(StringType, StructType(Nil), StringType)
// ArrayType
checkDataType(ArrayType(IntegerType), ArrayType(IntegerType), ArrayType(IntegerType))
checkDataType(ArrayType(IntegerType), ArrayType(LongType), ArrayType(LongType))
checkDataType(ArrayType(IntegerType), ArrayType(StringType), ArrayType(StringType))
checkDataType(ArrayType(IntegerType), StructType(Nil), StringType)
checkDataType(
ArrayType(IntegerType, true), ArrayType(IntegerType), ArrayType(IntegerType, true))
checkDataType(
ArrayType(IntegerType, true), ArrayType(IntegerType, false), ArrayType(IntegerType, true))
checkDataType(
ArrayType(IntegerType, true), ArrayType(IntegerType, true), ArrayType(IntegerType, true))
checkDataType(
ArrayType(IntegerType, false), ArrayType(IntegerType), ArrayType(IntegerType, true))
checkDataType(
ArrayType(IntegerType, false), ArrayType(IntegerType, false), ArrayType(IntegerType, false))
checkDataType(
ArrayType(IntegerType, false), ArrayType(IntegerType, true), ArrayType(IntegerType, true))
// StructType
checkDataType(StructType(Nil), StructType(Nil), StructType(Nil))
checkDataType(
StructType(StructField("f1", IntegerType, true) :: Nil),
StructType(StructField("f1", IntegerType, true) :: Nil),
StructType(StructField("f1", IntegerType, true) :: Nil))
checkDataType(
StructType(StructField("f1", IntegerType, true) :: Nil),
StructType(Nil),
StructType(StructField("f1", IntegerType, true) :: Nil))
checkDataType(
StructType(
StructField("f1", IntegerType, true) ::
StructField("f2", IntegerType, true) :: Nil),
StructType(StructField("f1", LongType, true) :: Nil),
StructType(
StructField("f1", LongType, true) ::
StructField("f2", IntegerType, true) :: Nil))
checkDataType(
StructType(
StructField("f1", IntegerType, true) :: Nil),
StructType(
StructField("f2", IntegerType, true) :: Nil),
StructType(
StructField("f1", IntegerType, true) ::
StructField("f2", IntegerType, true) :: Nil))
checkDataType(
StructType(
StructField("f1", IntegerType, true) :: Nil),
DecimalType.SYSTEM_DEFAULT,
StringType)
}
test("Complex field and type inferring with null in sampling") {
val jsonDF = spark.read.json(jsonNullStruct)
val expectedSchema = StructType(
StructField("headers", StructType(
StructField("Charset", StringType, true) ::
StructField("Host", StringType, true) :: Nil)
, true) ::
StructField("ip", StringType, true) ::
StructField("nullstr", StringType, true):: Nil)
assert(expectedSchema === jsonDF.schema)
jsonDF.createOrReplaceTempView("jsonTable")
checkAnswer(
sql("select nullstr, headers.Host from jsonTable"),
Seq(Row("", "1.abc.com"), Row("", null), Row("", null), Row(null, null))
)
}
test("Primitive field and type inferring") {
val jsonDF = spark.read.json(primitiveFieldAndType)
val expectedSchema = StructType(
StructField("bigInteger", DecimalType(20, 0), true) ::
StructField("boolean", BooleanType, true) ::
StructField("double", DoubleType, true) ::
StructField("integer", LongType, true) ::
StructField("long", LongType, true) ::
StructField("null", StringType, true) ::
StructField("string", StringType, true) :: Nil)
assert(expectedSchema === jsonDF.schema)
jsonDF.createOrReplaceTempView("jsonTable")
checkAnswer(
sql("select * from jsonTable"),
Row(new java.math.BigDecimal("92233720368547758070"),
true,
1.7976931348623157E308,
10,
21474836470L,
null,
"this is a simple string.")
)
}
test("Complex field and type inferring") {
val jsonDF = spark.read.json(complexFieldAndType1)
val expectedSchema = StructType(
StructField("arrayOfArray1", ArrayType(ArrayType(StringType, true), true), true) ::
StructField("arrayOfArray2", ArrayType(ArrayType(DoubleType, true), true), true) ::
StructField("arrayOfBigInteger", ArrayType(DecimalType(21, 0), true), true) ::
StructField("arrayOfBoolean", ArrayType(BooleanType, true), true) ::
StructField("arrayOfDouble", ArrayType(DoubleType, true), true) ::
StructField("arrayOfInteger", ArrayType(LongType, true), true) ::
StructField("arrayOfLong", ArrayType(LongType, true), true) ::
StructField("arrayOfNull", ArrayType(StringType, true), true) ::
StructField("arrayOfString", ArrayType(StringType, true), true) ::
StructField("arrayOfStruct", ArrayType(
StructType(
StructField("field1", BooleanType, true) ::
StructField("field2", StringType, true) ::
StructField("field3", StringType, true) :: Nil), true), true) ::
StructField("struct", StructType(
StructField("field1", BooleanType, true) ::
StructField("field2", DecimalType(20, 0), true) :: Nil), true) ::
StructField("structWithArrayFields", StructType(
StructField("field1", ArrayType(LongType, true), true) ::
StructField("field2", ArrayType(StringType, true), true) :: Nil), true) :: Nil)
assert(expectedSchema === jsonDF.schema)
jsonDF.createOrReplaceTempView("jsonTable")
// Access elements of a primitive array.
checkAnswer(
sql("select arrayOfString[0], arrayOfString[1], arrayOfString[2] from jsonTable"),
Row("str1", "str2", null)
)
// Access an array of null values.
checkAnswer(
sql("select arrayOfNull from jsonTable"),
Row(Seq(null, null, null, null))
)
// Access elements of a BigInteger array (we use DecimalType internally).
checkAnswer(
sql("select arrayOfBigInteger[0], arrayOfBigInteger[1], arrayOfBigInteger[2] from jsonTable"),
Row(new java.math.BigDecimal("922337203685477580700"),
new java.math.BigDecimal("-922337203685477580800"), null)
)
// Access elements of an array of arrays.
checkAnswer(
sql("select arrayOfArray1[0], arrayOfArray1[1] from jsonTable"),
Row(Seq("1", "2", "3"), Seq("str1", "str2"))
)
// Access elements of an array of arrays.
checkAnswer(
sql("select arrayOfArray2[0], arrayOfArray2[1] from jsonTable"),
Row(Seq(1.0, 2.0, 3.0), Seq(1.1, 2.1, 3.1))
)
// Access elements of an array inside a filed with the type of ArrayType(ArrayType).
checkAnswer(
sql("select arrayOfArray1[1][1], arrayOfArray2[1][1] from jsonTable"),
Row("str2", 2.1)
)
// Access elements of an array of structs.
checkAnswer(
sql("select arrayOfStruct[0], arrayOfStruct[1], arrayOfStruct[2], arrayOfStruct[3] " +
"from jsonTable"),
Row(
Row(true, "str1", null),
Row(false, null, null),
Row(null, null, null),
null)
)
// Access a struct and fields inside of it.
checkAnswer(
sql("select struct, struct.field1, struct.field2 from jsonTable"),
Row(
Row(true, new java.math.BigDecimal("92233720368547758070")),
true,
new java.math.BigDecimal("92233720368547758070")) :: Nil
)
// Access an array field of a struct.
checkAnswer(
sql("select structWithArrayFields.field1, structWithArrayFields.field2 from jsonTable"),
Row(Seq(4, 5, 6), Seq("str1", "str2"))
)
// Access elements of an array field of a struct.
checkAnswer(
sql("select structWithArrayFields.field1[1], structWithArrayFields.field2[3] from jsonTable"),
Row(5, null)
)
}
test("GetField operation on complex data type") {
val jsonDF = spark.read.json(complexFieldAndType1)
jsonDF.createOrReplaceTempView("jsonTable")
checkAnswer(
sql("select arrayOfStruct[0].field1, arrayOfStruct[0].field2 from jsonTable"),
Row(true, "str1")
)
// Getting all values of a specific field from an array of structs.
checkAnswer(
sql("select arrayOfStruct.field1, arrayOfStruct.field2 from jsonTable"),
Row(Seq(true, false, null), Seq("str1", null, null))
)
}
test("Type conflict in primitive field values") {
val jsonDF = spark.read.json(primitiveFieldValueTypeConflict)
val expectedSchema = StructType(
StructField("num_bool", StringType, true) ::
StructField("num_num_1", LongType, true) ::
StructField("num_num_2", DoubleType, true) ::
StructField("num_num_3", DoubleType, true) ::
StructField("num_str", StringType, true) ::
StructField("str_bool", StringType, true) :: Nil)
assert(expectedSchema === jsonDF.schema)
jsonDF.createOrReplaceTempView("jsonTable")
checkAnswer(
sql("select * from jsonTable"),
Row("true", 11L, null, 1.1, "13.1", "str1") ::
Row("12", null, 21474836470.9, null, null, "true") ::
Row("false", 21474836470L, 92233720368547758070d, 100, "str1", "false") ::
Row(null, 21474836570L, 1.1, 21474836470L, "92233720368547758070", null) :: Nil
)
// Number and Boolean conflict: resolve the type as number in this query.
checkAnswer(
sql("select num_bool - 10 from jsonTable where num_bool > 11"),
Row(2)
)
// Widening to LongType
checkAnswer(
sql("select num_num_1 - 100 from jsonTable where num_num_1 > 11"),
Row(21474836370L) :: Row(21474836470L) :: Nil
)
checkAnswer(
sql("select num_num_1 - 100 from jsonTable where num_num_1 > 10"),
Row(-89) :: Row(21474836370L) :: Row(21474836470L) :: Nil
)
// Widening to DecimalType
checkAnswer(
sql("select num_num_2 + 1.3 from jsonTable where num_num_2 > 1.1"),
Row(21474836472.2) ::
Row(92233720368547758071.3) :: Nil
)
// Widening to Double
checkAnswer(
sql("select num_num_3 + 1.2 from jsonTable where num_num_3 > 1.1"),
Row(101.2) :: Row(21474836471.2) :: Nil
)
// Number and String conflict: resolve the type as number in this query.
checkAnswer(
sql("select num_str + 1.2 from jsonTable where num_str > 14d"),
Row(92233720368547758071.2)
)
// Number and String conflict: resolve the type as number in this query.
checkAnswer(
sql("select num_str + 1.2 from jsonTable where num_str >= 92233720368547758060"),
Row(new java.math.BigDecimal("92233720368547758071.2").doubleValue)
)
// String and Boolean conflict: resolve the type as string.
checkAnswer(
sql("select * from jsonTable where str_bool = 'str1'"),
Row("true", 11L, null, 1.1, "13.1", "str1")
)
}
ignore("Type conflict in primitive field values (Ignored)") {
val jsonDF = spark.read.json(primitiveFieldValueTypeConflict)
jsonDF.createOrReplaceTempView("jsonTable")
// Right now, the analyzer does not promote strings in a boolean expression.
// Number and Boolean conflict: resolve the type as boolean in this query.
checkAnswer(
sql("select num_bool from jsonTable where NOT num_bool"),
Row(false)
)
checkAnswer(
sql("select str_bool from jsonTable where NOT str_bool"),
Row(false)
)
// Right now, the analyzer does not know that num_bool should be treated as a boolean.
// Number and Boolean conflict: resolve the type as boolean in this query.
checkAnswer(
sql("select num_bool from jsonTable where num_bool"),
Row(true)
)
checkAnswer(
sql("select str_bool from jsonTable where str_bool"),
Row(false)
)
// The plan of the following DSL is
// Project [(CAST(num_str#65:4, DoubleType) + 1.2) AS num#78]
// Filter (CAST(CAST(num_str#65:4, DoubleType), DecimalType) > 92233720368547758060)
// ExistingRdd [num_bool#61,num_num_1#62L,num_num_2#63,num_num_3#64,num_str#65,str_bool#66]
// We should directly cast num_str to DecimalType and also need to do the right type promotion
// in the Project.
checkAnswer(
jsonDF.
where('num_str >= BigDecimal("92233720368547758060")).
select(('num_str + 1.2).as("num")),
Row(new java.math.BigDecimal("92233720368547758071.2").doubleValue())
)
// The following test will fail. The type of num_str is StringType.
// So, to evaluate num_str + 1.2, we first need to use Cast to convert the type.
// In our test data, one value of num_str is 13.1.
// The result of (CAST(num_str#65:4, DoubleType) + 1.2) for this value is 14.299999999999999,
// which is not 14.3.
// Number and String conflict: resolve the type as number in this query.
checkAnswer(
sql("select num_str + 1.2 from jsonTable where num_str > 13"),
Row(BigDecimal("14.3")) :: Row(BigDecimal("92233720368547758071.2")) :: Nil
)
}
test("Type conflict in complex field values") {
val jsonDF = spark.read.json(complexFieldValueTypeConflict)
val expectedSchema = StructType(
StructField("array", ArrayType(LongType, true), true) ::
StructField("num_struct", StringType, true) ::
StructField("str_array", StringType, true) ::
StructField("struct", StructType(
StructField("field", StringType, true) :: Nil), true) ::
StructField("struct_array", StringType, true) :: Nil)
assert(expectedSchema === jsonDF.schema)
jsonDF.createOrReplaceTempView("jsonTable")
checkAnswer(
sql("select * from jsonTable"),
Row(Seq(), "11", "[1,2,3]", Row(null), "[]") ::
Row(null, """{"field":false}""", null, null, "{}") ::
Row(Seq(4, 5, 6), null, "str", Row(null), "[7,8,9]") ::
Row(Seq(7), "{}", """["str1","str2",33]""", Row("str"), """{"field":true}""") :: Nil
)
}
test("Type conflict in array elements") {
val jsonDF = spark.read.json(arrayElementTypeConflict)
val expectedSchema = StructType(
StructField("array1", ArrayType(StringType, true), true) ::
StructField("array2", ArrayType(StructType(
StructField("field", LongType, true) :: Nil), true), true) ::
StructField("array3", ArrayType(StringType, true), true) :: Nil)
assert(expectedSchema === jsonDF.schema)
jsonDF.createOrReplaceTempView("jsonTable")
checkAnswer(
sql("select * from jsonTable"),
Row(Seq("1", "1.1", "true", null, "[]", "{}", "[2,3,4]",
"""{"field":"str"}"""), Seq(Row(214748364700L), Row(1)), null) ::
Row(null, null, Seq("""{"field":"str"}""", """{"field":1}""")) ::
Row(null, null, Seq("1", "2", "3")) :: Nil
)
// Treat an element as a number.
checkAnswer(
sql("select array1[0] + 1 from jsonTable where array1 is not null"),
Row(2)
)
}
test("Handling missing fields") {
val jsonDF = spark.read.json(missingFields)
val expectedSchema = StructType(
StructField("a", BooleanType, true) ::
StructField("b", LongType, true) ::
StructField("c", ArrayType(LongType, true), true) ::
StructField("d", StructType(
StructField("field", BooleanType, true) :: Nil), true) ::
StructField("e", StringType, true) :: Nil)
assert(expectedSchema === jsonDF.schema)
jsonDF.createOrReplaceTempView("jsonTable")
}
test("Loading a JSON dataset from a text file") {
val dir = Utils.createTempDir()
dir.delete()
val path = dir.getCanonicalPath
primitiveFieldAndType.map(record => record.replaceAll("\\n", " ")).write.text(path)
val jsonDF = spark.read.json(path)
val expectedSchema = StructType(
StructField("bigInteger", DecimalType(20, 0), true) ::
StructField("boolean", BooleanType, true) ::
StructField("double", DoubleType, true) ::
StructField("integer", LongType, true) ::
StructField("long", LongType, true) ::
StructField("null", StringType, true) ::
StructField("string", StringType, true) :: Nil)
assert(expectedSchema === jsonDF.schema)
jsonDF.createOrReplaceTempView("jsonTable")
checkAnswer(
sql("select * from jsonTable"),
Row(new java.math.BigDecimal("92233720368547758070"),
true,
1.7976931348623157E308,
10,
21474836470L,
null,
"this is a simple string.")
)
}
test("Loading a JSON dataset primitivesAsString returns schema with primitive types as strings") {
val dir = Utils.createTempDir()
dir.delete()
val path = dir.getCanonicalPath
primitiveFieldAndType.map(record => record.replaceAll("\\n", " ")).write.text(path)
val jsonDF = spark.read.option("primitivesAsString", "true").json(path)
val expectedSchema = StructType(
StructField("bigInteger", StringType, true) ::
StructField("boolean", StringType, true) ::
StructField("double", StringType, true) ::
StructField("integer", StringType, true) ::
StructField("long", StringType, true) ::
StructField("null", StringType, true) ::
StructField("string", StringType, true) :: Nil)
assert(expectedSchema === jsonDF.schema)
jsonDF.createOrReplaceTempView("jsonTable")
checkAnswer(
sql("select * from jsonTable"),
Row("92233720368547758070",
"true",
"1.7976931348623157E308",
"10",
"21474836470",
null,
"this is a simple string.")
)
}
test("Loading a JSON dataset primitivesAsString returns complex fields as strings") {
val jsonDF = spark.read.option("primitivesAsString", "true").json(complexFieldAndType1)
val expectedSchema = StructType(
StructField("arrayOfArray1", ArrayType(ArrayType(StringType, true), true), true) ::
StructField("arrayOfArray2", ArrayType(ArrayType(StringType, true), true), true) ::
StructField("arrayOfBigInteger", ArrayType(StringType, true), true) ::
StructField("arrayOfBoolean", ArrayType(StringType, true), true) ::
StructField("arrayOfDouble", ArrayType(StringType, true), true) ::
StructField("arrayOfInteger", ArrayType(StringType, true), true) ::
StructField("arrayOfLong", ArrayType(StringType, true), true) ::
StructField("arrayOfNull", ArrayType(StringType, true), true) ::
StructField("arrayOfString", ArrayType(StringType, true), true) ::
StructField("arrayOfStruct", ArrayType(
StructType(
StructField("field1", StringType, true) ::
StructField("field2", StringType, true) ::
StructField("field3", StringType, true) :: Nil), true), true) ::
StructField("struct", StructType(
StructField("field1", StringType, true) ::
StructField("field2", StringType, true) :: Nil), true) ::
StructField("structWithArrayFields", StructType(
StructField("field1", ArrayType(StringType, true), true) ::
StructField("field2", ArrayType(StringType, true), true) :: Nil), true) :: Nil)
assert(expectedSchema === jsonDF.schema)
jsonDF.createOrReplaceTempView("jsonTable")
// Access elements of a primitive array.
checkAnswer(
sql("select arrayOfString[0], arrayOfString[1], arrayOfString[2] from jsonTable"),
Row("str1", "str2", null)
)
// Access an array of null values.
checkAnswer(
sql("select arrayOfNull from jsonTable"),
Row(Seq(null, null, null, null))
)
// Access elements of a BigInteger array (we use DecimalType internally).
checkAnswer(
sql("select arrayOfBigInteger[0], arrayOfBigInteger[1], arrayOfBigInteger[2] from jsonTable"),
Row("922337203685477580700", "-922337203685477580800", null)
)
// Access elements of an array of arrays.
checkAnswer(
sql("select arrayOfArray1[0], arrayOfArray1[1] from jsonTable"),
Row(Seq("1", "2", "3"), Seq("str1", "str2"))
)
// Access elements of an array of arrays.
checkAnswer(
sql("select arrayOfArray2[0], arrayOfArray2[1] from jsonTable"),
Row(Seq("1", "2", "3"), Seq("1.1", "2.1", "3.1"))
)
// Access elements of an array inside a filed with the type of ArrayType(ArrayType).
checkAnswer(
sql("select arrayOfArray1[1][1], arrayOfArray2[1][1] from jsonTable"),
Row("str2", "2.1")
)
// Access elements of an array of structs.
checkAnswer(
sql("select arrayOfStruct[0], arrayOfStruct[1], arrayOfStruct[2], arrayOfStruct[3] " +
"from jsonTable"),
Row(
Row("true", "str1", null),
Row("false", null, null),
Row(null, null, null),
null)
)
// Access a struct and fields inside of it.
checkAnswer(
sql("select struct, struct.field1, struct.field2 from jsonTable"),
Row(
Row("true", "92233720368547758070"),
"true",
"92233720368547758070") :: Nil
)
// Access an array field of a struct.
checkAnswer(
sql("select structWithArrayFields.field1, structWithArrayFields.field2 from jsonTable"),
Row(Seq("4", "5", "6"), Seq("str1", "str2"))
)
// Access elements of an array field of a struct.
checkAnswer(
sql("select structWithArrayFields.field1[1], structWithArrayFields.field2[3] from jsonTable"),
Row("5", null)
)
}
test("Loading a JSON dataset prefersDecimal returns schema with float types as BigDecimal") {
val jsonDF = spark.read.option("prefersDecimal", "true").json(primitiveFieldAndType)
val expectedSchema = StructType(
StructField("bigInteger", DecimalType(20, 0), true) ::
StructField("boolean", BooleanType, true) ::
StructField("double", DecimalType(17, -292), true) ::
StructField("integer", LongType, true) ::
StructField("long", LongType, true) ::
StructField("null", StringType, true) ::
StructField("string", StringType, true) :: Nil)
assert(expectedSchema === jsonDF.schema)
jsonDF.createOrReplaceTempView("jsonTable")
checkAnswer(
sql("select * from jsonTable"),
Row(BigDecimal("92233720368547758070"),
true,
BigDecimal("1.7976931348623157E308"),
10,
21474836470L,
null,
"this is a simple string.")
)
}
test("Find compatible types even if inferred DecimalType is not capable of other IntegralType") {
val mixedIntegerAndDoubleRecords = Seq(
"""{"a": 3, "b": 1.1}""",
s"""{"a": 3.1, "b": 0.${"0" * 38}1}""").toDS()
val jsonDF = spark.read
.option("prefersDecimal", "true")
.json(mixedIntegerAndDoubleRecords)
// The values in `a` field will be decimals as they fit in decimal. For `b` field,
// they will be doubles as `1.0E-39D` does not fit.
val expectedSchema = StructType(
StructField("a", DecimalType(21, 1), true) ::
StructField("b", DoubleType, true) :: Nil)
assert(expectedSchema === jsonDF.schema)
checkAnswer(
jsonDF,
Row(BigDecimal("3"), 1.1D) ::
Row(BigDecimal("3.1"), 1.0E-39D) :: Nil
)
}
test("Infer big integers correctly even when it does not fit in decimal") {
val jsonDF = spark.read
.json(bigIntegerRecords)
// The value in `a` field will be a double as it does not fit in decimal. For `b` field,
// it will be a decimal as `92233720368547758070`.
val expectedSchema = StructType(
StructField("a", DoubleType, true) ::
StructField("b", DecimalType(20, 0), true) :: Nil)
assert(expectedSchema === jsonDF.schema)
checkAnswer(jsonDF, Row(1.0E38D, BigDecimal("92233720368547758070")))
}
test("Infer floating-point values correctly even when it does not fit in decimal") {
val jsonDF = spark.read
.option("prefersDecimal", "true")
.json(floatingValueRecords)
// The value in `a` field will be a double as it does not fit in decimal. For `b` field,
// it will be a decimal as `0.01` by having a precision equal to the scale.
val expectedSchema = StructType(
StructField("a", DoubleType, true) ::
StructField("b", DecimalType(2, 2), true):: Nil)
assert(expectedSchema === jsonDF.schema)
checkAnswer(jsonDF, Row(1.0E-39D, BigDecimal("0.01")))
val mergedJsonDF = spark.read
.option("prefersDecimal", "true")
.json(floatingValueRecords.union(bigIntegerRecords))
val expectedMergedSchema = StructType(
StructField("a", DoubleType, true) ::
StructField("b", DecimalType(22, 2), true):: Nil)
assert(expectedMergedSchema === mergedJsonDF.schema)
checkAnswer(
mergedJsonDF,
Row(1.0E-39D, BigDecimal("0.01")) ::
Row(1.0E38D, BigDecimal("92233720368547758070")) :: Nil
)
}
test("Loading a JSON dataset from a text file with SQL") {
val dir = Utils.createTempDir()
dir.delete()
val path = dir.toURI.toString
primitiveFieldAndType.map(record => record.replaceAll("\\n", " ")).write.text(path)
sql(
s"""
|CREATE TEMPORARY VIEW jsonTableSQL
|USING org.apache.spark.sql.json
|OPTIONS (
| path '$path'
|)
""".stripMargin)
checkAnswer(
sql("select * from jsonTableSQL"),
Row(new java.math.BigDecimal("92233720368547758070"),
true,
1.7976931348623157E308,
10,
21474836470L,
null,
"this is a simple string.")
)
}
test("Applying schemas") {
val dir = Utils.createTempDir()
dir.delete()
val path = dir.getCanonicalPath
primitiveFieldAndType.map(record => record.replaceAll("\\n", " ")).write.text(path)
val schema = StructType(
StructField("bigInteger", DecimalType.SYSTEM_DEFAULT, true) ::
StructField("boolean", BooleanType, true) ::
StructField("double", DoubleType, true) ::
StructField("integer", IntegerType, true) ::
StructField("long", LongType, true) ::
StructField("null", StringType, true) ::
StructField("string", StringType, true) :: Nil)
val jsonDF1 = spark.read.schema(schema).json(path)
assert(schema === jsonDF1.schema)
jsonDF1.createOrReplaceTempView("jsonTable1")
checkAnswer(
sql("select * from jsonTable1"),
Row(new java.math.BigDecimal("92233720368547758070"),
true,
1.7976931348623157E308,
10,
21474836470L,
null,
"this is a simple string.")
)
val jsonDF2 = spark.read.schema(schema).json(primitiveFieldAndType)
assert(schema === jsonDF2.schema)
jsonDF2.createOrReplaceTempView("jsonTable2")
checkAnswer(
sql("select * from jsonTable2"),
Row(new java.math.BigDecimal("92233720368547758070"),
true,
1.7976931348623157E308,
10,
21474836470L,
null,
"this is a simple string.")
)
}
test("Applying schemas with MapType") {
val schemaWithSimpleMap = StructType(
StructField("map", MapType(StringType, IntegerType, true), false) :: Nil)
val jsonWithSimpleMap = spark.read.schema(schemaWithSimpleMap).json(mapType1)
jsonWithSimpleMap.createOrReplaceTempView("jsonWithSimpleMap")
checkAnswer(
sql("select `map` from jsonWithSimpleMap"),
Row(Map("a" -> 1)) ::
Row(Map("b" -> 2)) ::
Row(Map("c" -> 3)) ::
Row(Map("c" -> 1, "d" -> 4)) ::
Row(Map("e" -> null)) :: Nil
)
withSQLConf(SQLConf.SUPPORT_QUOTED_REGEX_COLUMN_NAME.key -> "false") {
checkAnswer(
sql("select `map`['c'] from jsonWithSimpleMap"),
Row(null) ::
Row(null) ::
Row(3) ::
Row(1) ::
Row(null) :: Nil
)
}
val innerStruct = StructType(
StructField("field1", ArrayType(IntegerType, true), true) ::
StructField("field2", IntegerType, true) :: Nil)
val schemaWithComplexMap = StructType(
StructField("map", MapType(StringType, innerStruct, true), false) :: Nil)
val jsonWithComplexMap = spark.read.schema(schemaWithComplexMap).json(mapType2)
jsonWithComplexMap.createOrReplaceTempView("jsonWithComplexMap")
checkAnswer(
sql("select `map` from jsonWithComplexMap"),
Row(Map("a" -> Row(Seq(1, 2, 3, null), null))) ::
Row(Map("b" -> Row(null, 2))) ::
Row(Map("c" -> Row(Seq(), 4))) ::
Row(Map("c" -> Row(null, 3), "d" -> Row(Seq(null), null))) ::
Row(Map("e" -> null)) ::
Row(Map("f" -> Row(null, null))) :: Nil
)
withSQLConf(SQLConf.SUPPORT_QUOTED_REGEX_COLUMN_NAME.key -> "false") {
checkAnswer(
sql("select `map`['a'].field1, `map`['c'].field2 from jsonWithComplexMap"),
Row(Seq(1, 2, 3, null), null) ::
Row(null, null) ::
Row(null, 4) ::
Row(null, 3) ::
Row(null, null) ::
Row(null, null) :: Nil
)
}
}
test("SPARK-2096 Correctly parse dot notations") {
val jsonDF = spark.read.json(complexFieldAndType2)
jsonDF.createOrReplaceTempView("jsonTable")
checkAnswer(
sql("select arrayOfStruct[0].field1, arrayOfStruct[0].field2 from jsonTable"),
Row(true, "str1")
)
checkAnswer(
sql(
"""
|select complexArrayOfStruct[0].field1[1].inner2[0], complexArrayOfStruct[1].field2[0][1]
|from jsonTable
""".stripMargin),
Row("str2", 6)
)
}
test("SPARK-3390 Complex arrays") {
val jsonDF = spark.read.json(complexFieldAndType2)
jsonDF.createOrReplaceTempView("jsonTable")
checkAnswer(
sql(
"""
|select arrayOfArray1[0][0][0], arrayOfArray1[1][0][1], arrayOfArray1[1][1][0]
|from jsonTable
""".stripMargin),
Row(5, 7, 8)
)
checkAnswer(
sql(
"""
|select arrayOfArray2[0][0][0].inner1, arrayOfArray2[1][0],
|arrayOfArray2[1][1][1].inner2[0], arrayOfArray2[2][0][0].inner3[0][0].inner4
|from jsonTable
""".stripMargin),
Row("str1", Nil, "str4", 2)
)
}
test("SPARK-3308 Read top level JSON arrays") {
val jsonDF = spark.read.json(jsonArray)
jsonDF.createOrReplaceTempView("jsonTable")
checkAnswer(
sql(
"""
|select a, b, c
|from jsonTable
""".stripMargin),
Row("str_a_1", null, null) ::
Row("str_a_2", null, null) ::
Row(null, "str_b_3", null) ::
Row("str_a_4", "str_b_4", "str_c_4") :: Nil
)
}
test("Corrupt records: FAILFAST mode") {
// `FAILFAST` mode should throw an exception for corrupt records.
val exceptionOne = intercept[SparkException] {
spark.read
.option("mode", "FAILFAST")
.json(corruptRecords)
}.getMessage
assert(exceptionOne.contains(
"Malformed records are detected in schema inference. Parse Mode: FAILFAST."))
val exceptionTwo = intercept[SparkException] {
spark.read
.option("mode", "FAILFAST")
.schema("a string")
.json(corruptRecords)
.collect()
}.getMessage
assert(exceptionTwo.contains(
"Malformed records are detected in record parsing. Parse Mode: FAILFAST."))
}
test("Corrupt records: DROPMALFORMED mode") {
val schemaOne = StructType(
StructField("a", StringType, true) ::
StructField("b", StringType, true) ::
StructField("c", StringType, true) :: Nil)
val schemaTwo = StructType(
StructField("a", StringType, true) :: Nil)
// `DROPMALFORMED` mode should skip corrupt records
val jsonDFOne = spark.read
.option("mode", "DROPMALFORMED")
.json(corruptRecords)
checkAnswer(
jsonDFOne,
Row("str_a_4", "str_b_4", "str_c_4") :: Nil
)
assert(jsonDFOne.schema === schemaOne)
val jsonDFTwo = spark.read
.option("mode", "DROPMALFORMED")
.schema(schemaTwo)
.json(corruptRecords)
checkAnswer(
jsonDFTwo,
Row("str_a_4") :: Nil)
assert(jsonDFTwo.schema === schemaTwo)
}
test("SPARK-19641: Additional corrupt records: DROPMALFORMED mode") {
val schema = new StructType().add("dummy", StringType)
// `DROPMALFORMED` mode should skip corrupt records
val jsonDF = spark.read
.option("mode", "DROPMALFORMED")
.json(additionalCorruptRecords)
checkAnswer(
jsonDF,
Row("test"))
assert(jsonDF.schema === schema)
}
test("Corrupt records: PERMISSIVE mode, without designated column for malformed records") {
val schema = StructType(
StructField("a", StringType, true) ::
StructField("b", StringType, true) ::
StructField("c", StringType, true) :: Nil)
val jsonDF = spark.read.schema(schema).json(corruptRecords)
checkAnswer(
jsonDF.select($"a", $"b", $"c"),
Seq(
// Corrupted records are replaced with null
Row(null, null, null),
Row(null, null, null),
Row(null, null, null),
Row("str_a_4", "str_b_4", "str_c_4"),
Row(null, null, null))
)
}
test("Corrupt records: PERMISSIVE mode, with designated column for malformed records") {
// Test if we can query corrupt records.
withSQLConf(SQLConf.COLUMN_NAME_OF_CORRUPT_RECORD.key -> "_unparsed") {
val jsonDF = spark.read.json(corruptRecords)
val schema = StructType(
StructField("_unparsed", StringType, true) ::
StructField("a", StringType, true) ::
StructField("b", StringType, true) ::
StructField("c", StringType, true) :: Nil)
assert(schema === jsonDF.schema)
// In HiveContext, backticks should be used to access columns starting with a underscore.
checkAnswer(
jsonDF.select($"a", $"b", $"c", $"_unparsed"),
Row(null, null, null, "{") ::
Row(null, null, null, """{"a":1, b:2}""") ::
Row(null, null, null, """{"a":{, b:3}""") ::
Row("str_a_4", "str_b_4", "str_c_4", null) ::
Row(null, null, null, "]") :: Nil
)
checkAnswer(
jsonDF.filter($"_unparsed".isNull).select($"a", $"b", $"c"),
Row("str_a_4", "str_b_4", "str_c_4")
)
checkAnswer(
jsonDF.filter($"_unparsed".isNotNull).select($"_unparsed"),
Row("{") ::
Row("""{"a":1, b:2}""") ::
Row("""{"a":{, b:3}""") ::
Row("]") :: Nil
)
}
}
test("SPARK-13953 Rename the corrupt record field via option") {
val jsonDF = spark.read
.option("columnNameOfCorruptRecord", "_malformed")
.json(corruptRecords)
val schema = StructType(
StructField("_malformed", StringType, true) ::
StructField("a", StringType, true) ::
StructField("b", StringType, true) ::
StructField("c", StringType, true) :: Nil)
assert(schema === jsonDF.schema)
checkAnswer(
jsonDF.selectExpr("a", "b", "c", "_malformed"),
Row(null, null, null, "{") ::
Row(null, null, null, """{"a":1, b:2}""") ::
Row(null, null, null, """{"a":{, b:3}""") ::
Row("str_a_4", "str_b_4", "str_c_4", null) ::
Row(null, null, null, "]") :: Nil
)
}
test("SPARK-4068: nulls in arrays") {
val jsonDF = spark.read.json(nullsInArrays)
jsonDF.createOrReplaceTempView("jsonTable")
val schema = StructType(
StructField("field1",
ArrayType(ArrayType(ArrayType(ArrayType(StringType, true), true), true), true), true) ::
StructField("field2",
ArrayType(ArrayType(
StructType(StructField("Test", LongType, true) :: Nil), true), true), true) ::
StructField("field3",
ArrayType(ArrayType(
StructType(StructField("Test", StringType, true) :: Nil), true), true), true) ::
StructField("field4",
ArrayType(ArrayType(ArrayType(LongType, true), true), true), true) :: Nil)
assert(schema === jsonDF.schema)
checkAnswer(
sql(
"""
|SELECT field1, field2, field3, field4
|FROM jsonTable
""".stripMargin),
Row(Seq(Seq(null), Seq(Seq(Seq("Test")))), null, null, null) ::
Row(null, Seq(null, Seq(Row(1))), null, null) ::
Row(null, null, Seq(Seq(null), Seq(Row("2"))), null) ::
Row(null, null, null, Seq(Seq(null, Seq(1, 2, 3)))) :: Nil
)
}
test("SPARK-4228 DataFrame to JSON") {
val schema1 = StructType(
StructField("f1", IntegerType, false) ::
StructField("f2", StringType, false) ::
StructField("f3", BooleanType, false) ::
StructField("f4", ArrayType(StringType), nullable = true) ::
StructField("f5", IntegerType, true) :: Nil)
val rowRDD1 = unparsedStrings.map { r =>
val values = r.split(",").map(_.trim)
val v5 = try values(3).toInt catch {
case _: NumberFormatException => null
}
Row(values(0).toInt, values(1), values(2).toBoolean, r.split(",").toList, v5)
}
val df1 = spark.createDataFrame(rowRDD1, schema1)
df1.createOrReplaceTempView("applySchema1")
val df2 = df1.toDF
val result = df2.toJSON.collect()
// scalastyle:off
assert(result(0) === "{\\"f1\\":1,\\"f2\\":\\"A1\\",\\"f3\\":true,\\"f4\\":[\\"1\\",\\" A1\\",\\" true\\",\\" null\\"]}")
assert(result(3) === "{\\"f1\\":4,\\"f2\\":\\"D4\\",\\"f3\\":true,\\"f4\\":[\\"4\\",\\" D4\\",\\" true\\",\\" 2147483644\\"],\\"f5\\":2147483644}")
// scalastyle:on
val schema2 = StructType(
StructField("f1", StructType(
StructField("f11", IntegerType, false) ::
StructField("f12", BooleanType, false) :: Nil), false) ::
StructField("f2", MapType(StringType, IntegerType, true), false) :: Nil)
val rowRDD2 = unparsedStrings.map { r =>
val values = r.split(",").map(_.trim)
val v4 = try values(3).toInt catch {
case _: NumberFormatException => null
}
Row(Row(values(0).toInt, values(2).toBoolean), Map(values(1) -> v4))
}
val df3 = spark.createDataFrame(rowRDD2, schema2)
df3.createOrReplaceTempView("applySchema2")
val df4 = df3.toDF
val result2 = df4.toJSON.collect()
assert(result2(1) === "{\\"f1\\":{\\"f11\\":2,\\"f12\\":false},\\"f2\\":{\\"B2\\":null}}")
assert(result2(3) === "{\\"f1\\":{\\"f11\\":4,\\"f12\\":true},\\"f2\\":{\\"D4\\":2147483644}}")
val jsonDF = spark.read.json(primitiveFieldAndType)
val primTable = spark.read.json(jsonDF.toJSON)
primTable.createOrReplaceTempView("primitiveTable")
checkAnswer(
sql("select * from primitiveTable"),
Row(new java.math.BigDecimal("92233720368547758070"),
true,
1.7976931348623157E308,
10,
21474836470L,
"this is a simple string.")
)
val complexJsonDF = spark.read.json(complexFieldAndType1)
val compTable = spark.read.json(complexJsonDF.toJSON)
compTable.createOrReplaceTempView("complexTable")
// Access elements of a primitive array.
checkAnswer(
sql("select arrayOfString[0], arrayOfString[1], arrayOfString[2] from complexTable"),
Row("str1", "str2", null)
)
// Access an array of null values.
checkAnswer(
sql("select arrayOfNull from complexTable"),
Row(Seq(null, null, null, null))
)
// Access elements of a BigInteger array (we use DecimalType internally).
checkAnswer(
sql("select arrayOfBigInteger[0], arrayOfBigInteger[1], arrayOfBigInteger[2] " +
" from complexTable"),
Row(new java.math.BigDecimal("922337203685477580700"),
new java.math.BigDecimal("-922337203685477580800"), null)
)
// Access elements of an array of arrays.
checkAnswer(
sql("select arrayOfArray1[0], arrayOfArray1[1] from complexTable"),
Row(Seq("1", "2", "3"), Seq("str1", "str2"))
)
// Access elements of an array of arrays.
checkAnswer(
sql("select arrayOfArray2[0], arrayOfArray2[1] from complexTable"),
Row(Seq(1.0, 2.0, 3.0), Seq(1.1, 2.1, 3.1))
)
// Access elements of an array inside a filed with the type of ArrayType(ArrayType).
checkAnswer(
sql("select arrayOfArray1[1][1], arrayOfArray2[1][1] from complexTable"),
Row("str2", 2.1)
)
// Access a struct and fields inside of it.
checkAnswer(
sql("select struct, struct.field1, struct.field2 from complexTable"),
Row(
Row(true, new java.math.BigDecimal("92233720368547758070")),
true,
new java.math.BigDecimal("92233720368547758070")) :: Nil
)
// Access an array field of a struct.
checkAnswer(
sql("select structWithArrayFields.field1, structWithArrayFields.field2 from complexTable"),
Row(Seq(4, 5, 6), Seq("str1", "str2"))
)
// Access elements of an array field of a struct.
checkAnswer(
sql("select structWithArrayFields.field1[1], structWithArrayFields.field2[3] " +
"from complexTable"),
Row(5, null)
)
}
test("Dataset toJSON doesn't construct rdd") {
val containsRDD = spark.emptyDataFrame.toJSON.queryExecution.logical.find {
case ExternalRDD(_, _) => true
case _ => false
}
assert(containsRDD.isEmpty, "Expected logical plan of toJSON to not contain an RDD")
}
test("JSONRelation equality test") {
withTempPath(dir => {
val path = dir.getCanonicalFile.toURI.toString
sparkContext.parallelize(1 to 100)
.map(i => s"""{"a": 1, "b": "str$i"}""").saveAsTextFile(path)
val d1 = DataSource(
spark,
userSpecifiedSchema = None,
partitionColumns = Array.empty[String],
bucketSpec = None,
className = classOf[JsonFileFormat].getCanonicalName,
options = Map("path" -> path)).resolveRelation()
val d2 = DataSource(
spark,
userSpecifiedSchema = None,
partitionColumns = Array.empty[String],
bucketSpec = None,
className = classOf[JsonFileFormat].getCanonicalName,
options = Map("path" -> path)).resolveRelation()
assert(d1 === d2)
})
}
test("SPARK-6245 JsonInferSchema.infer on empty RDD") {
// This is really a test that it doesn't throw an exception
val options = new JSONOptions(Map.empty[String, String], "GMT")
val emptySchema = new JsonInferSchema(options).infer(
empty.rdd,
CreateJacksonParser.string)
assert(StructType(Seq()) === emptySchema)
}
test("SPARK-7565 MapType in JsonRDD") {
withSQLConf(SQLConf.COLUMN_NAME_OF_CORRUPT_RECORD.key -> "_unparsed") {
withTempDir { dir =>
val schemaWithSimpleMap = StructType(
StructField("map", MapType(StringType, IntegerType, true), false) :: Nil)
val df = spark.read.schema(schemaWithSimpleMap).json(mapType1)
val path = dir.getAbsolutePath
df.write.mode("overwrite").parquet(path)
// order of MapType is not defined
assert(spark.read.parquet(path).count() == 5)
val df2 = spark.read.json(corruptRecords)
df2.write.mode("overwrite").parquet(path)
checkAnswer(spark.read.parquet(path), df2.collect())
}
}
}
test("SPARK-8093 Erase empty structs") {
val options = new JSONOptions(Map.empty[String, String], "GMT")
val emptySchema = new JsonInferSchema(options).infer(
emptyRecords.rdd,
CreateJacksonParser.string)
assert(StructType(Seq()) === emptySchema)
}
test("JSON with Partition") {
def makePartition(rdd: RDD[String], parent: File, partName: String, partValue: Any): File = {
val p = new File(parent, s"$partName=${partValue.toString}")
rdd.saveAsTextFile(p.getCanonicalPath)
p
}
withTempPath(root => {
val d1 = new File(root, "d1=1")
// root/dt=1/col1=abc
val p1_col1 = makePartition(
sparkContext.parallelize(2 to 5).map(i => s"""{"a": 1, "b": "str$i"}"""),
d1,
"col1",
"abc")
// root/dt=1/col1=abd
val p2 = makePartition(
sparkContext.parallelize(6 to 10).map(i => s"""{"a": 1, "b": "str$i"}"""),
d1,
"col1",
"abd")
spark.read.json(root.getAbsolutePath).createOrReplaceTempView("test_myjson_with_part")
checkAnswer(sql(
"SELECT count(a) FROM test_myjson_with_part where d1 = 1 and col1='abc'"), Row(4))
checkAnswer(sql(
"SELECT count(a) FROM test_myjson_with_part where d1 = 1 and col1='abd'"), Row(5))
checkAnswer(sql(
"SELECT count(a) FROM test_myjson_with_part where d1 = 1"), Row(9))
})
}
test("SPARK-11544 test pathfilter") {
withTempPath { dir =>
val path = dir.getCanonicalPath
val df = spark.range(2)
df.write.json(path + "/p=1")
df.write.json(path + "/p=2")
assert(spark.read.json(path).count() === 4)
val extraOptions = Map(
"mapred.input.pathFilter.class" -> classOf[TestFileFilter].getName,
"mapreduce.input.pathFilter.class" -> classOf[TestFileFilter].getName
)
assert(spark.read.options(extraOptions).json(path).count() === 2)
}
}
test("SPARK-12057 additional corrupt records do not throw exceptions") {
// Test if we can query corrupt records.
withSQLConf(SQLConf.COLUMN_NAME_OF_CORRUPT_RECORD.key -> "_unparsed") {
withTempView("jsonTable") {
val schema = StructType(
StructField("_unparsed", StringType, true) ::
StructField("dummy", StringType, true) :: Nil)
{
// We need to make sure we can infer the schema.
val jsonDF = spark.read.json(additionalCorruptRecords)
assert(jsonDF.schema === schema)
}
{
val jsonDF = spark.read.schema(schema).json(additionalCorruptRecords)
jsonDF.createOrReplaceTempView("jsonTable")
// In HiveContext, backticks should be used to access columns starting with a underscore.
checkAnswer(
sql(
"""
|SELECT dummy, _unparsed
|FROM jsonTable
""".stripMargin),
Row("test", null) ::
Row(null, """[1,2,3]""") ::
Row(null, """":"test", "a":1}""") ::
Row(null, """42""") ::
Row(null, """ ","ian":"test"}""") :: Nil
)
}
}
}
}
test("Parse JSON rows having an array type and a struct type in the same field.") {
withTempDir { dir =>
val dir = Utils.createTempDir()
dir.delete()
val path = dir.getCanonicalPath
arrayAndStructRecords.map(record => record.replaceAll("\\n", " ")).write.text(path)
val schema =
StructType(
StructField("a", StructType(
StructField("b", StringType) :: Nil
)) :: Nil)
val jsonDF = spark.read.schema(schema).json(path)
assert(jsonDF.count() == 2)
}
}
test("SPARK-12872 Support to specify the option for compression codec") {
withTempDir { dir =>
val dir = Utils.createTempDir()
dir.delete()
val path = dir.getCanonicalPath
primitiveFieldAndType.map(record => record.replaceAll("\\n", " ")).write.text(path)
val jsonDF = spark.read.json(path)
val jsonDir = new File(dir, "json").getCanonicalPath
jsonDF.coalesce(1).write
.format("json")
.option("compression", "gZiP")
.save(jsonDir)
val compressedFiles = new File(jsonDir).listFiles()
assert(compressedFiles.exists(_.getName.endsWith(".json.gz")))
val jsonCopy = spark.read
.format("json")
.load(jsonDir)
assert(jsonCopy.count == jsonDF.count)
val jsonCopySome = jsonCopy.selectExpr("string", "long", "boolean")
val jsonDFSome = jsonDF.selectExpr("string", "long", "boolean")
checkAnswer(jsonCopySome, jsonDFSome)
}
}
test("SPARK-13543 Write the output as uncompressed via option()") {
val extraOptions = Map[String, String](
"mapreduce.output.fileoutputformat.compress" -> "true",
"mapreduce.output.fileoutputformat.compress.type" -> CompressionType.BLOCK.toString,
"mapreduce.output.fileoutputformat.compress.codec" -> classOf[GzipCodec].getName,
"mapreduce.map.output.compress" -> "true",
"mapreduce.map.output.compress.codec" -> classOf[GzipCodec].getName
)
withTempDir { dir =>
val dir = Utils.createTempDir()
dir.delete()
val path = dir.getCanonicalPath
primitiveFieldAndType.map(record => record.replaceAll("\\n", " ")).write.text(path)
val jsonDF = spark.read.json(path)
val jsonDir = new File(dir, "json").getCanonicalPath
jsonDF.coalesce(1).write
.format("json")
.option("compression", "none")
.options(extraOptions)
.save(jsonDir)
val compressedFiles = new File(jsonDir).listFiles()
assert(compressedFiles.exists(!_.getName.endsWith(".json.gz")))
val jsonCopy = spark.read
.format("json")
.options(extraOptions)
.load(jsonDir)
assert(jsonCopy.count == jsonDF.count)
val jsonCopySome = jsonCopy.selectExpr("string", "long", "boolean")
val jsonDFSome = jsonDF.selectExpr("string", "long", "boolean")
checkAnswer(jsonCopySome, jsonDFSome)
}
}
test("Casting long as timestamp") {
withTempView("jsonTable") {
val schema = (new StructType).add("ts", TimestampType)
val jsonDF = spark.read.schema(schema).json(timestampAsLong)
jsonDF.createOrReplaceTempView("jsonTable")
checkAnswer(
sql("select ts from jsonTable"),
Row(java.sql.Timestamp.valueOf("2016-01-02 03:04:05"))
)
}
}
test("wide nested json table") {
val nested = (1 to 100).map { i =>
s"""
|"c$i": $i
""".stripMargin
}.mkString(", ")
val json = s"""
|{"a": [{$nested}], "b": [{$nested}]}
""".stripMargin
val df = spark.read.json(Seq(json).toDS())
assert(df.schema.size === 2)
df.collect()
}
test("Write dates correctly with dateFormat option") {
val customSchema = new StructType(Array(StructField("date", DateType, true)))
withTempDir { dir =>
// With dateFormat option.
val datesWithFormatPath = s"${dir.getCanonicalPath}/datesWithFormat.json"
val datesWithFormat = spark.read
.schema(customSchema)
.option("dateFormat", "dd/MM/yyyy HH:mm")
.json(datesRecords)
datesWithFormat.write
.format("json")
.option("dateFormat", "yyyy/MM/dd")
.save(datesWithFormatPath)
// This will load back the dates as string.
val stringSchema = StructType(StructField("date", StringType, true) :: Nil)
val stringDatesWithFormat = spark.read
.schema(stringSchema)
.json(datesWithFormatPath)
val expectedStringDatesWithFormat = Seq(
Row("2015/08/26"),
Row("2014/10/27"),
Row("2016/01/28"))
checkAnswer(stringDatesWithFormat, expectedStringDatesWithFormat)
}
}
test("Write timestamps correctly with timestampFormat option") {
val customSchema = new StructType(Array(StructField("date", TimestampType, true)))
withTempDir { dir =>
// With dateFormat option.
val timestampsWithFormatPath = s"${dir.getCanonicalPath}/timestampsWithFormat.json"
val timestampsWithFormat = spark.read
.schema(customSchema)
.option("timestampFormat", "dd/MM/yyyy HH:mm")
.json(datesRecords)
timestampsWithFormat.write
.format("json")
.option("timestampFormat", "yyyy/MM/dd HH:mm")
.save(timestampsWithFormatPath)
// This will load back the timestamps as string.
val stringSchema = StructType(StructField("date", StringType, true) :: Nil)
val stringTimestampsWithFormat = spark.read
.schema(stringSchema)
.json(timestampsWithFormatPath)
val expectedStringDatesWithFormat = Seq(
Row("2015/08/26 18:00"),
Row("2014/10/27 18:30"),
Row("2016/01/28 20:00"))
checkAnswer(stringTimestampsWithFormat, expectedStringDatesWithFormat)
}
}
test("Write timestamps correctly with timestampFormat option and timeZone option") {
val customSchema = new StructType(Array(StructField("date", TimestampType, true)))
withTempDir { dir =>
// With dateFormat option and timeZone option.
val timestampsWithFormatPath = s"${dir.getCanonicalPath}/timestampsWithFormat.json"
val timestampsWithFormat = spark.read
.schema(customSchema)
.option("timestampFormat", "dd/MM/yyyy HH:mm")
.json(datesRecords)
timestampsWithFormat.write
.format("json")
.option("timestampFormat", "yyyy/MM/dd HH:mm")
.option(DateTimeUtils.TIMEZONE_OPTION, "GMT")
.save(timestampsWithFormatPath)
// This will load back the timestamps as string.
val stringSchema = StructType(StructField("date", StringType, true) :: Nil)
val stringTimestampsWithFormat = spark.read
.schema(stringSchema)
.json(timestampsWithFormatPath)
val expectedStringDatesWithFormat = Seq(
Row("2015/08/27 01:00"),
Row("2014/10/28 01:30"),
Row("2016/01/29 04:00"))
checkAnswer(stringTimestampsWithFormat, expectedStringDatesWithFormat)
val readBack = spark.read
.schema(customSchema)
.option("timestampFormat", "yyyy/MM/dd HH:mm")
.option(DateTimeUtils.TIMEZONE_OPTION, "GMT")
.json(timestampsWithFormatPath)
checkAnswer(readBack, timestampsWithFormat)
}
}
test("SPARK-18433: Improve DataSource option keys to be more case-insensitive") {
val records = Seq("""{"a": 3, "b": 1.1}""", """{"a": 3.1, "b": 0.000001}""").toDS()
val schema = StructType(
StructField("a", DecimalType(21, 1), true) ::
StructField("b", DecimalType(7, 6), true) :: Nil)
val df1 = spark.read.option("prefersDecimal", "true").json(records)
assert(df1.schema == schema)
val df2 = spark.read.option("PREfersdecimaL", "true").json(records)
assert(df2.schema == schema)
}
test("SPARK-18352: Parse normal multi-line JSON files (compressed)") {
withTempPath { dir =>
val path = dir.getCanonicalPath
primitiveFieldAndType
.toDF("value")
.write
.option("compression", "GzIp")
.text(path)
assert(new File(path).listFiles().exists(_.getName.endsWith(".gz")))
val jsonDF = spark.read.option("multiLine", true).json(path)
val jsonDir = new File(dir, "json").getCanonicalPath
jsonDF.coalesce(1).write
.option("compression", "gZiP")
.json(jsonDir)
assert(new File(jsonDir).listFiles().exists(_.getName.endsWith(".json.gz")))
val originalData = spark.read.json(primitiveFieldAndType)
checkAnswer(jsonDF, originalData)
checkAnswer(spark.read.schema(originalData.schema).json(jsonDir), originalData)
}
}
test("SPARK-18352: Parse normal multi-line JSON files (uncompressed)") {
withTempPath { dir =>
val path = dir.getCanonicalPath
primitiveFieldAndType
.toDF("value")
.write
.text(path)
val jsonDF = spark.read.option("multiLine", true).json(path)
val jsonDir = new File(dir, "json").getCanonicalPath
jsonDF.coalesce(1).write.json(jsonDir)
val compressedFiles = new File(jsonDir).listFiles()
assert(compressedFiles.exists(_.getName.endsWith(".json")))
val originalData = spark.read.json(primitiveFieldAndType)
checkAnswer(jsonDF, originalData)
checkAnswer(spark.read.schema(originalData.schema).json(jsonDir), originalData)
}
}
test("SPARK-18352: Expect one JSON document per file") {
// the json parser terminates as soon as it sees a matching END_OBJECT or END_ARRAY token.
// this might not be the optimal behavior but this test verifies that only the first value
// is parsed and the rest are discarded.
// alternatively the parser could continue parsing following objects, which may further reduce
// allocations by skipping the line reader entirely
withTempPath { dir =>
val path = dir.getCanonicalPath
spark
.createDataFrame(Seq(Tuple1("{}{invalid}")))
.coalesce(1)
.write
.text(path)
val jsonDF = spark.read.option("multiLine", true).json(path)
// no corrupt record column should be created
assert(jsonDF.schema === StructType(Seq()))
// only the first object should be read
assert(jsonDF.count() === 1)
}
}
test("SPARK-18352: Handle multi-line corrupt documents (PERMISSIVE)") {
withTempPath { dir =>
val path = dir.getCanonicalPath
val corruptRecordCount = additionalCorruptRecords.count().toInt
assert(corruptRecordCount === 5)
additionalCorruptRecords
.toDF("value")
// this is the minimum partition count that avoids hash collisions
.repartition(corruptRecordCount * 4, F.hash($"value"))
.write
.text(path)
val jsonDF = spark.read.option("multiLine", true).option("mode", "PERMISSIVE").json(path)
assert(jsonDF.count() === corruptRecordCount)
assert(jsonDF.schema === new StructType()
.add("_corrupt_record", StringType)
.add("dummy", StringType))
val counts = jsonDF
.join(
additionalCorruptRecords.toDF("value"),
F.regexp_replace($"_corrupt_record", "(^\\\\s+|\\\\s+$)", "") === F.trim($"value"),
"outer")
.agg(
F.count($"dummy").as("valid"),
F.count($"_corrupt_record").as("corrupt"),
F.count("*").as("count"))
checkAnswer(counts, Row(1, 4, 6))
}
}
test("SPARK-19641: Handle multi-line corrupt documents (DROPMALFORMED)") {
withTempPath { dir =>
val path = dir.getCanonicalPath
val corruptRecordCount = additionalCorruptRecords.count().toInt
assert(corruptRecordCount === 5)
additionalCorruptRecords
.toDF("value")
// this is the minimum partition count that avoids hash collisions
.repartition(corruptRecordCount * 4, F.hash($"value"))
.write
.text(path)
val jsonDF = spark.read.option("multiLine", true).option("mode", "DROPMALFORMED").json(path)
checkAnswer(jsonDF, Seq(Row("test")))
}
}
test("SPARK-18352: Handle multi-line corrupt documents (FAILFAST)") {
withTempPath { dir =>
val path = dir.getCanonicalPath
val corruptRecordCount = additionalCorruptRecords.count().toInt
assert(corruptRecordCount === 5)
additionalCorruptRecords
.toDF("value")
// this is the minimum partition count that avoids hash collisions
.repartition(corruptRecordCount * 4, F.hash($"value"))
.write
.text(path)
val schema = new StructType().add("dummy", StringType)
// `FAILFAST` mode should throw an exception for corrupt records.
val exceptionOne = intercept[SparkException] {
spark.read
.option("multiLine", true)
.option("mode", "FAILFAST")
.json(path)
}
assert(exceptionOne.getMessage.contains("Malformed records are detected in schema " +
"inference. Parse Mode: FAILFAST."))
val exceptionTwo = intercept[SparkException] {
spark.read
.option("multiLine", true)
.option("mode", "FAILFAST")
.schema(schema)
.json(path)
.collect()
}
assert(exceptionTwo.getMessage.contains("Malformed records are detected in record " +
"parsing. Parse Mode: FAILFAST."))
}
}
test("Throw an exception if a `columnNameOfCorruptRecord` field violates requirements") {
val columnNameOfCorruptRecord = "_unparsed"
val schema = StructType(
StructField(columnNameOfCorruptRecord, IntegerType, true) ::
StructField("a", StringType, true) ::
StructField("b", StringType, true) ::
StructField("c", StringType, true) :: Nil)
val errMsg = intercept[AnalysisException] {
spark.read
.option("mode", "Permissive")
.option("columnNameOfCorruptRecord", columnNameOfCorruptRecord)
.schema(schema)
.json(corruptRecords)
}.getMessage
assert(errMsg.startsWith("The field for corrupt records must be string type and nullable"))
// We use `PERMISSIVE` mode by default if invalid string is given.
withTempPath { dir =>
val path = dir.getCanonicalPath
corruptRecords.toDF("value").write.text(path)
val errMsg = intercept[AnalysisException] {
spark.read
.option("mode", "permm")
.option("columnNameOfCorruptRecord", columnNameOfCorruptRecord)
.schema(schema)
.json(path)
.collect
}.getMessage
assert(errMsg.startsWith("The field for corrupt records must be string type and nullable"))
}
}
test("SPARK-18772: Parse special floats correctly") {
val jsons = Seq(
"""{"a": "NaN"}""",
"""{"a": "Infinity"}""",
"""{"a": "-Infinity"}""")
// positive cases
val checks: Seq[Double => Boolean] = Seq(
_.isNaN,
_.isPosInfinity,
_.isNegInfinity)
Seq(FloatType, DoubleType).foreach { dt =>
jsons.zip(checks).foreach { case (json, check) =>
val ds = spark.read
.schema(StructType(Seq(StructField("a", dt))))
.json(Seq(json).toDS())
.select($"a".cast(DoubleType)).as[Double]
assert(check(ds.first()))
}
}
// negative cases
Seq(FloatType, DoubleType).foreach { dt =>
val lowerCasedJsons = jsons.map(_.toLowerCase(Locale.ROOT))
// The special floats are case-sensitive so these cases below throw exceptions.
lowerCasedJsons.foreach { lowerCasedJson =>
val e = intercept[SparkException] {
spark.read
.option("mode", "FAILFAST")
.schema(StructType(Seq(StructField("a", dt))))
.json(Seq(lowerCasedJson).toDS())
.collect()
}
assert(e.getMessage.contains("Cannot parse"))
}
}
}
test("SPARK-21610: Corrupt records are not handled properly when creating a dataframe " +
"from a file") {
withTempPath { dir =>
val path = dir.getCanonicalPath
val data =
"""{"field": 1}
|{"field": 2}
|{"field": "3"}""".stripMargin
Seq(data).toDF().repartition(1).write.text(path)
val schema = new StructType().add("field", ByteType).add("_corrupt_record", StringType)
// negative cases
val msg = intercept[AnalysisException] {
spark.read.schema(schema).json(path).select("_corrupt_record").collect()
}.getMessage
assert(msg.contains("only include the internal corrupt record column"))
// workaround
val df = spark.read.schema(schema).json(path).cache()
assert(df.filter($"_corrupt_record".isNotNull).count() == 1)
assert(df.filter($"_corrupt_record".isNull).count() == 2)
checkAnswer(
df.select("_corrupt_record"),
Row(null) :: Row(null) :: Row("{\\"field\\": \\"3\\"}") :: Nil
)
}
}
def testLineSeparator(lineSep: String): Unit = {
test(s"SPARK-21289: Support line separator - lineSep: '$lineSep'") {
// Read
val data =
s"""
| {"f":
|"a", "f0": 1}$lineSep{"f":
|
|"c", "f0": 2}$lineSep{"f": "d", "f0": 3}
""".stripMargin
val dataWithTrailingLineSep = s"$data$lineSep"
Seq(data, dataWithTrailingLineSep).foreach { lines =>
withTempPath { path =>
Files.write(path.toPath, lines.getBytes(StandardCharsets.UTF_8))
val df = spark.read.option("lineSep", lineSep).json(path.getAbsolutePath)
val expectedSchema =
StructType(StructField("f", StringType) :: StructField("f0", LongType) :: Nil)
checkAnswer(df, Seq(("a", 1), ("c", 2), ("d", 3)).toDF())
assert(df.schema === expectedSchema)
}
}
// Write
withTempPath { path =>
Seq("a", "b", "c").toDF("value").coalesce(1)
.write.option("lineSep", lineSep).json(path.getAbsolutePath)
val partFile = TestUtils.recursiveList(path).filter(f => f.getName.startsWith("part-")).head
val readBack = new String(Files.readAllBytes(partFile.toPath), StandardCharsets.UTF_8)
assert(
readBack === s"""{"value":"a"}$lineSep{"value":"b"}$lineSep{"value":"c"}$lineSep""")
}
// Roundtrip
withTempPath { path =>
val df = Seq("a", "b", "c").toDF()
df.write.option("lineSep", lineSep).json(path.getAbsolutePath)
val readBack = spark.read.option("lineSep", lineSep).json(path.getAbsolutePath)
checkAnswer(df, readBack)
}
}
}
// scalastyle:off nonascii
Seq("|", "^", "::", "!!!@3", 0x1E.toChar.toString, "μ").foreach { lineSep =>
testLineSeparator(lineSep)
}
// scalastyle:on nonascii
test("""SPARK-21289: Support line separator - default value \\r, \\r\\n and \\n""") {
val data =
"{\\"f\\": \\"a\\", \\"f0\\": 1}\\r{\\"f\\": \\"c\\", \\"f0\\": 2}\\r\\n{\\"f\\": \\"d\\", \\"f0\\": 3}\\n"
withTempPath { path =>
Files.write(path.toPath, data.getBytes(StandardCharsets.UTF_8))
val df = spark.read.json(path.getAbsolutePath)
val expectedSchema =
StructType(StructField("f", StringType) :: StructField("f0", LongType) :: Nil)
checkAnswer(df, Seq(("a", 1), ("c", 2), ("d", 3)).toDF())
assert(df.schema === expectedSchema)
}
}
test("SPARK-23849: schema inferring touches less data if samplingRatio < 1.0") {
// Set default values for the DataSource parameters to make sure
// that whole test file is mapped to only one partition. This will guarantee
// reliable sampling of the input file.
withSQLConf(
SQLConf.FILES_MAX_PARTITION_BYTES.key -> (128 * 1024 * 1024).toString,
SQLConf.FILES_OPEN_COST_IN_BYTES.key -> (4 * 1024 * 1024).toString
)(withTempPath { path =>
val ds = sampledTestData.coalesce(1)
ds.write.text(path.getAbsolutePath)
val readback = spark.read.option("samplingRatio", 0.1).json(path.getCanonicalPath)
assert(readback.schema == new StructType().add("f1", LongType))
})
}
test("SPARK-23849: usage of samplingRatio while parsing a dataset of strings") {
val ds = sampledTestData.coalesce(1)
val readback = spark.read.option("samplingRatio", 0.1).json(ds)
assert(readback.schema == new StructType().add("f1", LongType))
}
test("SPARK-23849: samplingRatio is out of the range (0, 1.0]") {
val ds = spark.range(0, 100, 1, 1).map(_.toString)
val errorMsg0 = intercept[IllegalArgumentException] {
spark.read.option("samplingRatio", -1).json(ds)
}.getMessage
assert(errorMsg0.contains("samplingRatio (-1.0) should be greater than 0"))
val errorMsg1 = intercept[IllegalArgumentException] {
spark.read.option("samplingRatio", 0).json(ds)
}.getMessage
assert(errorMsg1.contains("samplingRatio (0.0) should be greater than 0"))
val sampled = spark.read.option("samplingRatio", 1.0).json(ds)
assert(sampled.count() == ds.count())
}
test("SPARK-23723: json in UTF-16 with BOM") {
val fileName = "test-data/utf16WithBOM.json"
val schema = new StructType().add("firstName", StringType).add("lastName", StringType)
val jsonDF = spark.read.schema(schema)
.option("multiline", "true")
.option("encoding", "UTF-16")
.json(testFile(fileName))
checkAnswer(jsonDF, Seq(Row("Chris", "Baird"), Row("Doug", "Rood")))
}
test("SPARK-23723: multi-line json in UTF-32BE with BOM") {
val fileName = "test-data/utf32BEWithBOM.json"
val schema = new StructType().add("firstName", StringType).add("lastName", StringType)
val jsonDF = spark.read.schema(schema)
.option("multiline", "true")
.json(testFile(fileName))
checkAnswer(jsonDF, Seq(Row("Chris", "Baird")))
}
test("SPARK-23723: Use user's encoding in reading of multi-line json in UTF-16LE") {
val fileName = "test-data/utf16LE.json"
val schema = new StructType().add("firstName", StringType).add("lastName", StringType)
val jsonDF = spark.read.schema(schema)
.option("multiline", "true")
.options(Map("encoding" -> "UTF-16LE"))
.json(testFile(fileName))
checkAnswer(jsonDF, Seq(Row("Chris", "Baird")))
}
test("SPARK-23723: Unsupported encoding name") {
val invalidCharset = "UTF-128"
val exception = intercept[UnsupportedCharsetException] {
spark.read
.options(Map("encoding" -> invalidCharset, "lineSep" -> "\\n"))
.json(testFile("test-data/utf16LE.json"))
.count()
}
assert(exception.getMessage.contains(invalidCharset))
}
test("SPARK-23723: checking that the encoding option is case agnostic") {
val fileName = "test-data/utf16LE.json"
val schema = new StructType().add("firstName", StringType).add("lastName", StringType)
val jsonDF = spark.read.schema(schema)
.option("multiline", "true")
.options(Map("encoding" -> "uTf-16lE"))
.json(testFile(fileName))
checkAnswer(jsonDF, Seq(Row("Chris", "Baird")))
}
test("SPARK-23723: specified encoding is not matched to actual encoding") {
val fileName = "test-data/utf16LE.json"
val schema = new StructType().add("firstName", StringType).add("lastName", StringType)
val exception = intercept[SparkException] {
spark.read.schema(schema)
.option("mode", "FAILFAST")
.option("multiline", "true")
.options(Map("encoding" -> "UTF-16BE"))
.json(testFile(fileName))
.count()
}
val errMsg = exception.getMessage
assert(errMsg.contains("Malformed records are detected in record parsing"))
}
def checkEncoding(expectedEncoding: String, pathToJsonFiles: String,
expectedContent: String): Unit = {
val jsonFiles = new File(pathToJsonFiles)
.listFiles()
.filter(_.isFile)
.filter(_.getName.endsWith("json"))
val actualContent = jsonFiles.map { file =>
new String(Files.readAllBytes(file.toPath), expectedEncoding)
}.mkString.trim
assert(actualContent == expectedContent)
}
test("SPARK-23723: save json in UTF-32BE") {
val encoding = "UTF-32BE"
withTempPath { path =>
val df = spark.createDataset(Seq(("Dog", 42)))
df.write
.options(Map("encoding" -> encoding))
.json(path.getCanonicalPath)
checkEncoding(
expectedEncoding = encoding,
pathToJsonFiles = path.getCanonicalPath,
expectedContent = """{"_1":"Dog","_2":42}""")
}
}
test("SPARK-23723: save json in default encoding - UTF-8") {
withTempPath { path =>
val df = spark.createDataset(Seq(("Dog", 42)))
df.write.json(path.getCanonicalPath)
checkEncoding(
expectedEncoding = "UTF-8",
pathToJsonFiles = path.getCanonicalPath,
expectedContent = """{"_1":"Dog","_2":42}""")
}
}
test("SPARK-23723: wrong output encoding") {
val encoding = "UTF-128"
val exception = intercept[SparkException] {
withTempPath { path =>
val df = spark.createDataset(Seq((0)))
df.write
.options(Map("encoding" -> encoding))
.json(path.getCanonicalPath)
}
}
val baos = new ByteArrayOutputStream()
val ps = new PrintStream(baos, true, "UTF-8")
exception.printStackTrace(ps)
ps.flush()
assert(baos.toString.contains(
"java.nio.charset.UnsupportedCharsetException: UTF-128"))
}
test("SPARK-23723: read back json in UTF-16LE") {
val options = Map("encoding" -> "UTF-16LE", "lineSep" -> "\\n")
withTempPath { path =>
val ds = spark.createDataset(Seq(("a", 1), ("b", 2), ("c", 3))).repartition(2)
ds.write.options(options).json(path.getCanonicalPath)
val readBack = spark
.read
.options(options)
.json(path.getCanonicalPath)
checkAnswer(readBack.toDF(), ds.toDF())
}
}
test("SPARK-23723: write json in UTF-16/32 with multiline off") {
Seq("UTF-16", "UTF-32").foreach { encoding =>
withTempPath { path =>
val ds = spark.createDataset(Seq(("a", 1))).repartition(1)
ds.write
.option("encoding", encoding)
.option("multiline", false)
.json(path.getCanonicalPath)
val jsonFiles = path.listFiles().filter(_.getName.endsWith("json"))
jsonFiles.foreach { jsonFile =>
val readback = Files.readAllBytes(jsonFile.toPath)
val expected = ("""{"_1":"a","_2":1}""" + "\\n").getBytes(Charset.forName(encoding))
assert(readback === expected)
}
}
}
}
def checkReadJson(lineSep: String, encoding: String, inferSchema: Boolean, id: Int): Unit = {
test(s"SPARK-23724: checks reading json in ${encoding} #${id}") {
val schema = new StructType().add("f1", StringType).add("f2", IntegerType)
withTempPath { path =>
val records = List(("a", 1), ("b", 2))
val data = records
.map(rec => s"""{"f1":"${rec._1}", "f2":${rec._2}}""".getBytes(encoding))
.reduce((a1, a2) => a1 ++ lineSep.getBytes(encoding) ++ a2)
val os = new FileOutputStream(path)
os.write(data)
os.close()
val reader = if (inferSchema) {
spark.read
} else {
spark.read.schema(schema)
}
val readBack = reader
.option("encoding", encoding)
.option("lineSep", lineSep)
.json(path.getCanonicalPath)
checkAnswer(readBack, records.map(rec => Row(rec._1, rec._2)))
}
}
}
// scalastyle:off nonascii
List(
(0, "|", "UTF-8", false),
(1, "^", "UTF-16BE", true),
(2, "::", "ISO-8859-1", true),
(3, "!!!@3", "UTF-32LE", false),
(4, 0x1E.toChar.toString, "UTF-8", true),
(5, "μ", "UTF-32BE", false),
(6, "ΠΊΡΠΊΡ", "CP1251", true),
(7, "sep", "utf-8", false),
(8, "\\r\\n", "UTF-16LE", false),
(9, "\\r\\n", "utf-16be", true),
(10, "\\u000d\\u000a", "UTF-32BE", false),
(11, "\\u000a\\u000d", "UTF-8", true),
(12, "===", "US-ASCII", false),
(13, "$^+", "utf-32le", true)
).foreach {
case (testNum, sep, encoding, inferSchema) => checkReadJson(sep, encoding, inferSchema, testNum)
}
// scalastyle:on nonascii
test("SPARK-23724: lineSep should be set if encoding if different from UTF-8") {
val encoding = "UTF-16LE"
val exception = intercept[IllegalArgumentException] {
spark.read
.options(Map("encoding" -> encoding))
.json(testFile("test-data/utf16LE.json"))
.count()
}
assert(exception.getMessage.contains(
s"""The lineSep option must be specified for the $encoding encoding"""))
}
private val badJson = "\\u0000\\u0000\\u0000A\\u0001AAA"
test("SPARK-23094: permissively read JSON file with leading nulls when multiLine is enabled") {
withTempPath { tempDir =>
val path = tempDir.getAbsolutePath
Seq(badJson + """{"a":1}""").toDS().write.text(path)
val expected = s"""${badJson}{"a":1}\\n"""
val schema = new StructType().add("a", IntegerType).add("_corrupt_record", StringType)
val df = spark.read.format("json")
.option("mode", "PERMISSIVE")
.option("multiLine", true)
.option("encoding", "UTF-8")
.schema(schema).load(path)
checkAnswer(df, Row(null, expected))
}
}
test("SPARK-23094: permissively read JSON file with leading nulls when multiLine is disabled") {
withTempPath { tempDir =>
val path = tempDir.getAbsolutePath
Seq(badJson, """{"a":1}""").toDS().write.text(path)
val schema = new StructType().add("a", IntegerType).add("_corrupt_record", StringType)
val df = spark.read.format("json")
.option("mode", "PERMISSIVE")
.option("multiLine", false)
.option("encoding", "UTF-8")
.schema(schema).load(path)
checkAnswer(df, Seq(Row(1, null), Row(null, badJson)))
}
}
test("SPARK-23094: permissively parse a dataset contains JSON with leading nulls") {
checkAnswer(
spark.read.option("mode", "PERMISSIVE").option("encoding", "UTF-8").json(Seq(badJson).toDS()),
Row(badJson))
}
test("SPARK-23772 ignore column of all null values or empty array during schema inference") {
withTempPath { tempDir =>
val path = tempDir.getAbsolutePath
// primitive types
Seq(
"""{"a":null, "b":1, "c":3.0}""",
"""{"a":null, "b":null, "c":"string"}""",
"""{"a":null, "b":null, "c":null}""")
.toDS().write.text(path)
var df = spark.read.format("json")
.option("dropFieldIfAllNull", true)
.load(path)
var expectedSchema = new StructType()
.add("b", LongType).add("c", StringType)
assert(df.schema === expectedSchema)
checkAnswer(df, Row(1, "3.0") :: Row(null, "string") :: Row(null, null) :: Nil)
// arrays
Seq(
"""{"a":[2, 1], "b":[null, null], "c":null, "d":[[], [null]], "e":[[], null, [[]]]}""",
"""{"a":[null], "b":[null], "c":[], "d":[null, []], "e":null}""",
"""{"a":null, "b":null, "c":[], "d":null, "e":[null, [], null]}""")
.toDS().write.mode("overwrite").text(path)
df = spark.read.format("json")
.option("dropFieldIfAllNull", true)
.load(path)
expectedSchema = new StructType()
.add("a", ArrayType(LongType))
assert(df.schema === expectedSchema)
checkAnswer(df, Row(Array(2, 1)) :: Row(Array(null)) :: Row(null) :: Nil)
// structs
Seq(
"""{"a":{"a1": 1, "a2":"string"}, "b":{}}""",
"""{"a":{"a1": 2, "a2":null}, "b":{"b1":[null]}}""",
"""{"a":null, "b":null}""")
.toDS().write.mode("overwrite").text(path)
df = spark.read.format("json")
.option("dropFieldIfAllNull", true)
.load(path)
expectedSchema = new StructType()
.add("a", StructType(StructField("a1", LongType) :: StructField("a2", StringType)
:: Nil))
assert(df.schema === expectedSchema)
checkAnswer(df, Row(Row(1, "string")) :: Row(Row(2, null)) :: Row(null) :: Nil)
}
}
test("SPARK-24190: restrictions for JSONOptions in read") {
for (encoding <- Set("UTF-16", "UTF-32")) {
val exception = intercept[IllegalArgumentException] {
spark.read
.option("encoding", encoding)
.option("multiLine", false)
.json(testFile("test-data/utf16LE.json"))
.count()
}
assert(exception.getMessage.contains("encoding must not be included in the blacklist"))
}
}
test("count() for malformed input") {
def countForMalformedJSON(expected: Long, input: Seq[String]): Unit = {
val schema = new StructType().add("a", StringType)
val strings = spark.createDataset(input)
val df = spark.read.schema(schema).json(strings)
assert(df.count() == expected)
}
def checkCount(expected: Long): Unit = {
val validRec = """{"a":"b"}"""
val inputs = Seq(
Seq("{-}", validRec),
Seq(validRec, "?"),
Seq("}", validRec),
Seq(validRec, """{"a": [1, 2, 3]}"""),
Seq("""{"a": {"a": "b"}}""", validRec)
)
inputs.foreach { input =>
countForMalformedJSON(expected, input)
}
}
checkCount(2)
countForMalformedJSON(0, Seq(""))
}
test("SPARK-26745: count() for non-multiline input with empty lines") {
withTempPath { tempPath =>
val path = tempPath.getCanonicalPath
Seq("""{ "a" : 1 }""", "", """ { "a" : 2 }""", " \\t ")
.toDS()
.repartition(1)
.write
.text(path)
assert(spark.read.json(path).count() === 2)
}
}
test("SPARK-25040: empty strings should be disallowed") {
def failedOnEmptyString(dataType: DataType): Unit = {
val df = spark.read.schema(s"a ${dataType.catalogString}")
.option("mode", "FAILFAST").json(Seq("""{"a":""}""").toDS)
val errMessage = intercept[SparkException] {
df.collect()
}.getMessage
assert(errMessage.contains(
s"Failed to parse an empty string for data type ${dataType.catalogString}"))
}
def emptyString(dataType: DataType, expected: Any): Unit = {
val df = spark.read.schema(s"a ${dataType.catalogString}")
.option("mode", "FAILFAST").json(Seq("""{"a":""}""").toDS)
checkAnswer(df, Row(expected) :: Nil)
}
failedOnEmptyString(BooleanType)
failedOnEmptyString(ByteType)
failedOnEmptyString(ShortType)
failedOnEmptyString(IntegerType)
failedOnEmptyString(LongType)
failedOnEmptyString(FloatType)
failedOnEmptyString(DoubleType)
failedOnEmptyString(DecimalType.SYSTEM_DEFAULT)
failedOnEmptyString(TimestampType)
failedOnEmptyString(DateType)
failedOnEmptyString(ArrayType(IntegerType))
failedOnEmptyString(MapType(StringType, IntegerType, true))
failedOnEmptyString(StructType(StructField("f1", IntegerType, true) :: Nil))
emptyString(StringType, "")
emptyString(BinaryType, "".getBytes(StandardCharsets.UTF_8))
}
test("do not produce empty files for empty partitions") {
withTempPath { dir =>
val path = dir.getCanonicalPath
spark.emptyDataset[String].write.json(path)
val files = new File(path).listFiles()
assert(!files.exists(_.getName.endsWith("json")))
}
}
test("return partial result for bad records") {
val schema = "a double, b array<int>, c string, _corrupt_record string"
val badRecords = Seq(
"""{"a":"-","b":[0, 1, 2],"c":"abc"}""",
"""{"a":0.1,"b":{},"c":"def"}""").toDS()
val df = spark.read.schema(schema).json(badRecords)
checkAnswer(
df,
Row(null, Array(0, 1, 2), "abc", """{"a":"-","b":[0, 1, 2],"c":"abc"}""") ::
Row(0.1, null, "def", """{"a":0.1,"b":{},"c":"def"}""") :: Nil)
}
test("inferring timestamp type") {
def schemaOf(jsons: String*): StructType = spark.read.json(jsons.toDS).schema
assert(schemaOf(
"""{"a":"2018-12-17T10:11:12.123-01:00"}""",
"""{"a":"2018-12-16T22:23:24.123-02:00"}""") === fromDDL("a timestamp"))
assert(schemaOf("""{"a":"2018-12-17T10:11:12.123-01:00"}""", """{"a":1}""")
=== fromDDL("a string"))
assert(schemaOf("""{"a":"2018-12-17T10:11:12.123-01:00"}""", """{"a":"123"}""")
=== fromDDL("a string"))
assert(schemaOf("""{"a":"2018-12-17T10:11:12.123-01:00"}""", """{"a":null}""")
=== fromDDL("a timestamp"))
assert(schemaOf("""{"a":null}""", """{"a":"2018-12-17T10:11:12.123-01:00"}""")
=== fromDDL("a timestamp"))
}
test("roundtrip for timestamp type inferring") {
val customSchema = new StructType().add("date", TimestampType)
withTempDir { dir =>
val timestampsWithFormatPath = s"${dir.getCanonicalPath}/timestampsWithFormat.json"
val timestampsWithFormat = spark.read
.option("timestampFormat", "dd/MM/yyyy HH:mm")
.json(datesRecords)
assert(timestampsWithFormat.schema === customSchema)
timestampsWithFormat.write
.format("json")
.option("timestampFormat", "yyyy-MM-dd HH:mm:ss")
.option(DateTimeUtils.TIMEZONE_OPTION, "UTC")
.save(timestampsWithFormatPath)
val readBack = spark.read
.option("timestampFormat", "yyyy-MM-dd HH:mm:ss")
.option(DateTimeUtils.TIMEZONE_OPTION, "UTC")
.json(timestampsWithFormatPath)
assert(readBack.schema === customSchema)
checkAnswer(readBack, timestampsWithFormat)
}
}
}
| actuaryzhang/spark | sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/json/JsonSuite.scala | Scala | apache-2.0 | 91,787 |
package glint.models.client.async
import akka.actor.ActorRef
import com.typesafe.config.Config
import glint.messages.server.request.PushVectorDouble
import glint.messages.server.response.ResponseDouble
import glint.partitioning.Partitioner
/**
* Asynchronous implementation of a BigVector for doubles
*/
private[glint] class AsyncBigVectorDouble(partitioner: Partitioner,
models: Array[ActorRef],
config: Config,
keys: Long)
extends AsyncBigVector[Double, ResponseDouble, PushVectorDouble](partitioner, models, config, keys) {
/**
* Creates a push message from given sequence of keys and values
*
* @param id The identifier
* @param keys The keys
* @param values The values
* @return A PushVectorDouble message for type V
*/
@inline
override protected def toPushMessage(id: Int, keys: Array[Long], values: Array[Double]): PushVectorDouble = {
PushVectorDouble(id, keys, values)
}
/**
* Extracts a value from a given response at given index
*
* @param response The response
* @param index The index
* @return The value
*/
@inline
override protected def toValue(response: ResponseDouble, index: Int): Double = response.values(index)
}
| rjagerman/glint | src/main/scala/glint/models/client/async/AsyncBigVectorDouble.scala | Scala | mit | 1,340 |
package library
import eu.ace_design.island.dsl.DiSLand
import eu.ace_design.island.map.IslandMap
import eu.ace_design.island.map.processes.AssignPitch
import eu.ace_design.island.stdlib.WhittakerDiagrams
object Islands extends DiSLand {
val s12 = 0xFA3CC51778C0EDA6L
lazy val week12: IslandMap = {
createIsland shapedAs radial(factor = 0.42) withSize 1600 having 2000.faces builtWith Seq(
plateau(40), flowing(rivers = 15, distance = 0.75), withMoisture(soils.dry, distance = 1200),
AssignPitch, usingBiomes(WhittakerDiagrams.caribbean)) usingSeed s12
}
val s11 = 0x695D6D5C49335A7FL
lazy val week11: IslandMap = {
createIsland shapedAs radial(factor = 0.01) withSize 1600 having 2000.faces builtWith Seq(
plateau(30), flowing(rivers = 30, distance = 0.20), withMoisture(soils.wet, distance = 900),
AssignPitch, usingBiomes(WhittakerDiagrams.caribbean)) usingSeed s11
}
val s10 = 0x14380CD23C8EB97BL
lazy val week10: IslandMap = {
createIsland shapedAs radial(factor = 0.4) withSize 1600 having 2000.faces builtWith Seq(
plateau(55), flowing(rivers = 20, distance = 0.70), withMoisture(soils.wet, distance = 400),
AssignPitch, usingBiomes(WhittakerDiagrams.nordic)) usingSeed s10
}
val s09 = 0xEEC15736EA27AC82L
lazy val week09: IslandMap = {
createIsland shapedAs radial(factor = 1.65) withSize 1600 having 2000.faces builtWith Seq(
plateau(15), flowing(rivers = 15, distance = 0.70), withMoisture(soils.normal, distance = 500),
AssignPitch, usingBiomes(WhittakerDiagrams.caribbean)) usingSeed s09
}
val s08 = 0x31FE3642E86C572DL
lazy val week08: IslandMap = {
createIsland shapedAs radial(factor = 1.65) withSize 1600 having 2000.faces builtWith Seq(
plateau(20), flowing(rivers = 10, distance = 0.40), withMoisture(soils.wet, distance = 500),
AssignPitch, usingBiomes(WhittakerDiagrams.caribbean)) usingSeed s08
}
val s07 = 0xDFBF50881D18A3D1L
lazy val week07: IslandMap = {
createIsland shapedAs radial(factor = 1.45) withSize 1600 having 2000.faces builtWith Seq(
plateau(40), flowing(rivers = 40, distance = 0.40), withMoisture(soils.wet, distance = 300),
AssignPitch, usingBiomes(WhittakerDiagrams.caribbean)) usingSeed s07
}
val s06 = 0xDFBF50881D18A3D1L
lazy val week06: IslandMap = {
createIsland shapedAs radial(factor = 1.55) withSize 1600 having 2000.faces builtWith Seq(
plateau(45), flowing(rivers = 45, distance = 0.90), withMoisture(soils.wet, distance = 500),
AssignPitch, usingBiomes(WhittakerDiagrams.caribbean)) usingSeed s06
}
// Large island
val s05 = 0xF5C79AC1683D63C4L
lazy val week05: IslandMap = {
createIsland shapedAs radial(factor = 1.05) withSize 1600 having 2000.faces builtWith Seq(
plateau(30), flowing(rivers = 30, distance = 0.90), withMoisture(soils.wet, distance = 500),
AssignPitch, usingBiomes(WhittakerDiagrams.caribbean)) usingSeed s05
}
// Large island
val s02 = 0xA022872CE09E2B9CL
lazy val week02: IslandMap = {
createIsland shapedAs radial(factor = 1.01) withSize 1600 having 2000.faces builtWith Seq(
plateau(25), flowing(rivers = 20, distance = 0.95), withMoisture(soils.wet, distance = 550),
AssignPitch, usingBiomes(WhittakerDiagrams.caribbean)) usingSeed s02
}
// Very small island
val s01 = 0xC212B31BDF5A67C9L
lazy val week01: IslandMap = {
createIsland shapedAs donut(35.percent, 10.percent) withSize 1600 having 2000.faces builtWith Seq(
plateau(15), flowing(rivers = 5, distance = 0.50), withMoisture(soils.dry, distance = 550),
AssignPitch, usingBiomes(WhittakerDiagrams.caribbean)) usingSeed s01
}
// Large crater lake with flowing rivers
val s52 = 0x1E4DDC10E2F381CL
lazy val week52: IslandMap = {
createIsland shapedAs donut(80.percent, 30.percent) withSize 1600 having 2000.faces builtWith Seq(
plateau(20), flowing(rivers = 15, distance = 0.6), withMoisture(soils.normal, distance = 950),
AssignPitch, usingBiomes(WhittakerDiagrams.nordic)) usingSeed s52
}
// Small island, easy to explore with the drone.
val s50 = 0xFD4800CB733BB8FBL
lazy val week50: IslandMap = {
createIsland shapedAs radial(0.95) withSize 1600 having 2000.faces builtWith Seq(
plateau(35), flowing(rivers = 30, distance = 0.5), withMoisture(soils.wet, distance = 350),
AssignPitch, usingBiomes(WhittakerDiagrams.caribbean)) usingSeed s50
}
// Small island, easy to explore with the drone.
val s49 = 0xA43264158C840E1CL
lazy val week49: IslandMap = {
createIsland shapedAs ellipsis(50.percent, 33.percent, 75) withSize 1600 having 2000.faces builtWith Seq(
plateau(15), flowing(rivers = 20, distance = 0.9), withMoisture(soils.wet, distance = 150),
AssignPitch, usingBiomes(WhittakerDiagrams.caribbean)) usingSeed s49
}
// Small island, easy to explore with the drone.
val s48 = 0xC3033B04FFBDB180L
lazy val week48: IslandMap = {
createIsland shapedAs donut(30.percent, 8.percent) withSize 1600 having 2000.faces builtWith Seq(
plateau(10), flowing(rivers = 10, distance = 0.4), withMoisture(soils.normal, distance = 200),
AssignPitch, usingBiomes(WhittakerDiagrams.caribbean)) usingSeed s48
}
// Small island, easy to explore with the drone.
val s47 = 0x72CBC0A8BEB5F77BL
lazy val week47: IslandMap = {
createIsland shapedAs donut(40.percent, 3.percent) withSize 1600 having 2000.faces builtWith Seq(
plateau(15), flowing(rivers = 5, distance = 0.8), withMoisture(soils.wet, distance = 100),
AssignPitch, usingBiomes(WhittakerDiagrams.nordic)) usingSeed s47
}
// Big island, easy to find.
val s46 = 0xEA1353A8F444831L
lazy val week46: IslandMap = {
createIsland shapedAs ellipsis(75.percent, 44.percent, 42) withSize 1600 having 2000.faces builtWith Seq(
plateau(10), flowing(rivers = 15, distance = 0.8), withMoisture(soils.dry, distance = 500),
AssignPitch, usingBiomes(WhittakerDiagrams.nordic)) usingSeed s46
}
// Big island, easy to find.
val s45 = 0xB03CA1A997813D02L
lazy val week45: IslandMap = {
createIsland shapedAs ellipsis(75.percent, 33.percent, 291) withSize 1600 having 2000.faces builtWith Seq(
plateau(30), flowing(rivers = 5, distance = 0.4), withMoisture(soils.wet, distance = 100),
AssignPitch, usingBiomes(WhittakerDiagrams.caribbean)) usingSeed s45
}
} | mosser/QGL-16-17 | arena/src/main/scala/library/Islands.scala | Scala | lgpl-3.0 | 6,430 |
package launchers
import java.io.{BufferedWriter, File, FileWriter}
import java.text.SimpleDateFormat
import java.util.{Date, TimerTask}
import org.joda.time.{DateTime, Period}
import com.typesafe.config.{Config, ConfigFactory}
import scala.io.Source
class DailyProcessLauncher(orchestratorLauncher: MEOrchestratorLauncher, baseFolder: String, projectIds: List[String],
dailyProcessorLogPath: String) extends TimerTask {
def run {
println("Going to reprocess stuff")
val foldersToProcess : List[String] = getFoldersToProcess()
processFolders(foldersToProcess)
}
def getYesterday() : String = {
val sdf = new SimpleDateFormat("yyyy-MM-dd")
val today = new DateTime()
val yesterday = today.minusDays(1)
sdf.format(yesterday.toDate)
}
def getFoldersToProcess(): List[String] ={
val yesterdayStr : String = getYesterday()
projectIds.flatMap(x => getSubfolders(s"${baseFolder}/${x}/${yesterdayStr}"))
}
def getSubfolders(folderPath: String) : List[String] = {
List(folderPath + "/twitter")
}
def processFolders(foldersToProcess: List[String]): Unit = {
val file = new File(dailyProcessorLogPath)
val bw = new BufferedWriter(new FileWriter(file))
bw.write("Starting\\n")
for (folder <- foldersToProcess) {
val start = DateTime.now()
println(s"${start.toString} - Going to process folder ${folder}")
bw.write(s"${start.toString} - Going to process folder ${folder}\\n")
try {
val launchedStatus : Int = orchestratorLauncher.launchOrchestratorAndWait(folder)
val spent = new Period(start, DateTime.now)
if(launchedStatus==0) {
println(s"Success! Status ${launchedStatus} Took ${spent.getHours}:${spent.getMinutes}:${spent.getSeconds} with folder ${folder}")
bw.write(s"Success! Status ${launchedStatus} Took ${spent.getHours}:${spent.getMinutes}:${spent.getSeconds} with folder ${folder}\\n")
}else{
println(s"-->Error!!! Status ${launchedStatus} Took ${spent.getHours}:${spent.getMinutes}:${spent.getSeconds} with folder ${folder}")
bw.write(s"-->Error!!! Status ${launchedStatus} Took ${spent.getHours}:${spent.getMinutes}:${spent.getSeconds} with folder ${folder}\\n")
}
println("----------\\n")
bw.write("----------\\n\\n")
} catch {
case e: Exception => {
println(s"Error executing folder: ${folder}")
println(e)
println(e.getMessage)
bw.write(s"Error executing folder ${folder}\\n")
bw.write(e.toString + "\\n")
bw.write(e.getMessage + "\\n")
}
}
}
val end = DateTime.now()
println(s"${end.toString} - Finished processing")
bw.write(s"${end.toString} - Finished processing\\n")
bw.close()
}
}
| canademar/me_extractors | BRMProjectManager/src/main/scala/launchers/DailyProcessLauncher.scala | Scala | gpl-2.0 | 2,838 |
/*
* Copyright 2014 - 2015 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package slamdata
import scala.{Predef => P}
import scala.{collection => C}
import scala.collection.{immutable => I}
import scala.inline
object Predef extends LowPriorityImplicits {
type SuppressWarnings = java.lang.SuppressWarnings
type Array[T] = scala.Array[T]
val Array = scala.Array
type Boolean = scala.Boolean
type Byte = scala.Byte
type Char = scala.Char
type Double = scala.Double
val Function = scala.Function
type Int = scala.Int
val Int = scala.Int
type Long = scala.Long
val Long = scala.Long
type PartialFunction[-A, +B] = scala.PartialFunction[A, B]
val PartialFunction = scala.PartialFunction
type String = P.String
val StringContext = scala.StringContext
type Symbol = scala.Symbol
val Symbol = scala.Symbol
type Unit = scala.Unit
type Vector[+A] = scala.Vector[A]
val Vector = scala.Vector
type BigDecimal = scala.math.BigDecimal
val BigDecimal = scala.math.BigDecimal
type BigInt = scala.math.BigInt
val BigInt = scala.math.BigInt
type Iterable[+A] = C.Iterable[A]
type IndexedSeq[+A] = C.IndexedSeq[A]
type ListMap[A, +B] = I.ListMap[A, B]
val ListMap = I.ListMap
type Map[A, +B] = I.Map[A, B]
val Map = I.Map
type Set[A] = I.Set[A]
val Set = I.Set
type Seq[+A] = I.Seq[A]
type Stream[+A] = I.Stream[A]
val Stream = I.Stream
val #:: = Stream.#::
def ??? = P.???
def implicitly[T](implicit e: T) = P.implicitly[T](e)
implicit def $conforms[A] = P.$conforms[A]
implicit def ArrowAssoc[A] = P.ArrowAssoc[A] _
implicit def augmentString(x: String) = P.augmentString(x)
implicit def genericArrayOps[T] = P.genericArrayOps[T] _
implicit val wrapString = P.wrapString _
implicit val unwrapString = P.unwrapString _
@inline implicit val booleanWrapper = P.booleanWrapper _
@inline implicit val charWrapper = P.charWrapper _
@inline implicit val intWrapper = P.intWrapper _
// would rather not have these, but β¦
def print(x: scala.Any) = scala.Console.print(x)
def println() = scala.Console.println()
def println(x: scala.Any) = scala.Console.println(x)
// remove or replace these
type List[+A] = I.List[A] // use scalaz.IList instead
val List = I.List
val Nil = I.Nil
val :: = I.::
type Option[A] = scala.Option[A] // use scalaz.Maybe instead
val Option = scala.Option
val None = scala.None
val Some = scala.Some
type Nothing = scala.Nothing // make functors invariant
type Throwable = java.lang.Throwable
type RuntimeException = java.lang.RuntimeException
}
abstract class LowPriorityImplicits {
implicit def genericWrapArray[T] = P.genericWrapArray[T] _
}
| wemrysi/quasar | core/src/main/scala/slamdata/Predef.scala | Scala | apache-2.0 | 3,321 |
/***********************************************************************
* Copyright (c) 2017-2019 IBM
* Copyright (c) 2013-2019 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.cassandra.index
import java.nio.ByteBuffer
import java.nio.charset.StandardCharsets
import com.github.benmanes.caffeine.cache.{CacheLoader, Caffeine}
import org.locationtech.geomesa.cassandra.{ColumnSelect, NamedColumn, RowSelect}
import org.locationtech.geomesa.index.api._
import org.locationtech.geomesa.index.index.attribute.AttributeIndexKey
object AttributeColumnMapper {
private val cache = Caffeine.newBuilder().build(
new CacheLoader[Integer, AttributeColumnMapper]() {
override def load(shards: Integer): AttributeColumnMapper = {
val mappers = Seq.tabulate(shards) { i =>
ColumnSelect(CassandraColumnMapper.ShardColumn, i, i, startInclusive = true, endInclusive = true)
}
new AttributeColumnMapper(mappers)
}
}
)
def apply(shards: Int): AttributeColumnMapper = cache.get(shards)
}
class AttributeColumnMapper(shards: Seq[ColumnSelect]) extends CassandraColumnMapper {
private val Shard = CassandraColumnMapper.ShardColumn
private val Value = NamedColumn("attrVal", 1, "text", classOf[String])
private val Secondary = NamedColumn("secondary", 2, "blob", classOf[ByteBuffer])
private val FeatureId = CassandraColumnMapper.featureIdColumn(3)
private val Feature = CassandraColumnMapper.featureColumn(4)
override val columns: Seq[NamedColumn] = Seq(Shard, Value, Secondary, FeatureId, Feature)
override def bind(value: SingleRowKeyValue[_]): Seq[AnyRef] = {
val shard = Byte.box(if (value.shard.isEmpty) { 0 } else { value.shard.head })
val AttributeIndexKey(_, v, _) = value.key
val secondary = ByteBuffer.wrap(value.tier)
val fid = new String(value.id, StandardCharsets.UTF_8)
val Seq(feature) = value.values.map(v => ByteBuffer.wrap(v.value))
Seq(shard, v, secondary, fid, feature)
}
override def bindDelete(value: SingleRowKeyValue[_]): Seq[AnyRef] = {
val shard = Byte.box(if (value.shard.isEmpty) { 0 } else { value.shard.head })
val AttributeIndexKey(_, v, _) = value.key
val secondary = ByteBuffer.wrap(value.tier)
val fid = new String(value.id, StandardCharsets.UTF_8)
Seq(shard, v, secondary, fid)
}
override def select(range: ScanRange[_], tieredKeyRanges: Seq[ByteRange]): Seq[RowSelect] = {
val primary = range.asInstanceOf[ScanRange[AttributeIndexKey]] match {
case SingleRowRange(row) => Seq(ColumnSelect(Value, row.value, row.value, startInclusive = true, endInclusive = true))
case BoundedRange(lo, hi) => Seq(ColumnSelect(Value, lo.value, hi.value, lo.inclusive, hi.inclusive))
case LowerBoundedRange(lo) => Seq(ColumnSelect(Value, lo.value, null, lo.inclusive, endInclusive = false))
case UpperBoundedRange(hi) => Seq(ColumnSelect(Value, null, hi.value, startInclusive = false, hi.inclusive))
case PrefixRange(prefix) => Seq(ColumnSelect(Value, prefix.value, prefix.value + "zzzz", prefix.inclusive, endInclusive = false)) // TODO ?
case UnboundedRange(empty) => Seq.empty
case _ => throw new IllegalArgumentException(s"Unexpected range type $range")
}
val clause = if (tieredKeyRanges.isEmpty) { primary } else {
val minTier = ByteRange.min(tieredKeyRanges)
val maxTier = ByteRange.max(tieredKeyRanges)
primary :+ ColumnSelect(Secondary, ByteBuffer.wrap(minTier), ByteBuffer.wrap(maxTier), startInclusive = true, endInclusive = true)
}
if (clause.isEmpty) { Seq(RowSelect(clause)) } else {
shards.map(s => RowSelect(clause.+:(s)))
}
}
}
| elahrvivaz/geomesa | geomesa-cassandra/geomesa-cassandra-datastore/src/main/scala/org/locationtech/geomesa/cassandra/index/AttributeColumnMapper.scala | Scala | apache-2.0 | 4,047 |
package org.bowlerframework.squeryl
import dao.{StringKeyedDao, LongKeyedDao}
import org.scalatest.FunSuite
import org.squeryl.PrimitiveTypeMode._
import org.bowlerframework.persistence._
/**
* Created by IntelliJ IDEA.
* User: wfaler
* Date: Oct 28, 2010
* Time: 11:21:43 PM
* To change this template use File | Settings | File Templates.
*/
class SquerylDaoTest extends FunSuite with InMemoryDbTest{
import Library._
val dao = new LongKeyedDao[Author](authors)
val personDao = new StringKeyedDao[Person](people)
test("CRUD"){
startTx
transaction{
val author = new Author(0, "John","Doe", Some("johndoe@gmail.com"))
dao.create(author)
val id = author.id
val auth = dao.findById(id)
assert(auth != None)
assert(auth.get.id == id)
assert(auth.get.firstName == "John")
assert(auth.get.lastName == "Doe")
assert(auth.get.email.get == "johndoe@gmail.com")
val update = Author(id, "Wille", "Faler", None)
dao.update(update)
val updated = dao.findById(id)
assert(updated != None)
assert(updated.get.id == id)
assert(updated.get.firstName == "Wille")
assert(updated.get.lastName == "Faler")
assert(updated.get.email == None)
dao.delete(author)
assert(dao.findById(id) == None)
}
commitTx
}
test("findAll"){
startTx
transaction{
dao.create(new Author(0, "1","Doe", Some("johndoe@gmail.com")))
dao.create(new Author(0, "2","Doe", Some("johndoe@gmail.com")))
dao.create(new Author(0, "3","Doe", Some("johndoe@gmail.com")))
dao.create(new Author(0, "4","Doe", Some("johndoe@gmail.com")))
dao.create(new Author(0, "5","Doe", Some("johndoe@gmail.com")))
dao.create(new Author(0, "6","Doe", Some("johndoe@gmail.com")))
dao.create(new Author(0, "7","Doe", Some("johndoe@gmail.com")))
dao.create(new Author(0, "8","Doe", Some("johndoe@gmail.com")))
dao.create(new Author(0, "9","Doe", Some("johndoe@gmail.com")))
dao.create(new Author(0, "10","Doe", Some("johndoe@gmail.com")))
dao.create(new Author(0, "11","Doe", Some("johndoe@gmail.com")))
dao.create(new Author(0, "12","Doe", Some("johndoe@gmail.com")))
dao.create(new Author(0, "13","Doe", Some("johndoe@gmail.com")))
dao.create(new Author(0, "14","Doe", Some("johndoe@gmail.com")))
dao.create(new Author(0, "15","Doe", Some("johndoe@gmail.com")))
val all = dao.findAll()
assert(all.size == 15)
assert(all.last.firstName == "15")
assert(all.head.firstName == "1")
val firstTen = dao.findAll(0, 10)
println(firstTen.size)
assert(firstTen.size == 10)
assert(firstTen.last.firstName == "10")
assert(firstTen.head.firstName == "1")
val lastFive = dao.findAll(10, 10)
assert(lastFive.size == 5)
assert(lastFive.last.firstName == "15")
assert(lastFive.head.firstName == "11")
all.foreach(f => dao.delete(f))
}
commitTx
}
test("entity type"){
assert(dao.entityType == classOf[Author])
}
test("key type"){
assert(dao.keyType == classOf[Long])
}
test("string keyed dao"){
startTx
transaction{
val person = new Person("wille", "faler")
personDao.create(person)
val p = personDao.findById("wille")
assert(p != None)
assert(p.get.id == "wille")
assert(p.get.name == "faler")
personDao.delete(p.get)
assert(None == personDao.findById("wille"))
}
commitTx
}
test("test uniqueValidator"){
startTx
val person = new Person("wille", "faler")
val validator = new UniqueEntityValidator[Person, String]("id", personDao, {person.id})
transaction{
assert(validator.isValid)
personDao.create(person)
assert(!validator.isValid)
personDao.delete(person)
}
commitTx
}
test("test SquerylTransformer"){
startTx
transaction{
val author = new Author(0, "Jane","Doe", Some("janedoe@gmail.com"))
dao.create(author)
val id = author.id
val stringId = "" + id
val transformer = new EntityTransformer[Author, Long](dao)
val res = transformer.toValue(stringId)
assert(res != None)
assert(res.get.firstName == "Jane")
assert(res.get.lastName == "Doe")
assert(None == transformer.toValue("99999999999999999"))
dao.delete(author)
}
commitTx
}
}
| rkpandey/Bowler | squeryl-mapper/src/test/scala/org/bowlerframework/squeryl/SquerylDaoTest.scala | Scala | bsd-3-clause | 4,454 |
/* Copyright 2009-2016 EPFL, Lausanne */
object Nested3 {
def foo(a: BigInt): BigInt = {
require(a >= 0 && a <= 50)
val b = a + 2
val c = a + b
def rec1(d: BigInt): BigInt = {
require(d >= 0 && d <= 50)
val e = d + b + c
e
}
rec1(2)
} ensuring(_ > 0)
}
| regb/leon | src/test/resources/regression/verification/purescala/valid/Nested3.scala | Scala | gpl-3.0 | 302 |
/***********************************************************************
* Copyright (c) 2013-2018 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.utils.stats
import java.util.Date
import com.vividsolutions.jts.geom.Geometry
import org.junit.runner.RunWith
import org.locationtech.geomesa.curve.TimePeriod
import org.locationtech.geomesa.utils.geotools.GeoToolsDateFormat
import org.locationtech.geomesa.utils.text.WKTUtils
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
@RunWith(classOf[JUnitRunner])
class Z3HistogramTest extends Specification with StatTestHelper {
def createStat(length: Int, observe: Boolean): Z3Histogram = {
val s = Stat(sft, Stat.Z3Histogram("geom", "dtg", TimePeriod.Week, length))
if (observe) {
features.foreach { s.observe }
}
s.asInstanceOf[Z3Histogram]
}
def createStat(observe: Boolean = true): Z3Histogram = createStat(1024, observe)
def toDate(string: String): Date = Date.from(java.time.LocalDateTime.parse(string, GeoToolsDateFormat).toInstant(java.time.ZoneOffset.UTC))
def toGeom(string: String): Geometry = WKTUtils.read(string)
"HistogramZ3 stat" should {
"work with geometries and dates" >> {
"be empty initially" >> {
val stat = createStat(observe = false)
stat.isEmpty must beTrue
}
"correctly bin values" >> {
val stat = createStat()
stat.isEmpty must beFalse
forall(0 until 100) { i =>
val (w, idx) = stat.indexOf(toGeom(s"POINT(-$i ${i / 2})"), toDate(f"2012-01-01T${i%24}%02d:00:00.000Z"))
stat.count(w, idx) must beBetween(1L, 21L)
}
}
"serialize and deserialize" >> {
val stat = createStat()
val packed = StatSerializer(sft).serialize(stat)
val unpacked = StatSerializer(sft).deserialize(packed)
unpacked must beAnInstanceOf[Z3Histogram]
unpacked.asInstanceOf[Z3Histogram].geom mustEqual stat.geom
unpacked.asInstanceOf[Z3Histogram].dtg mustEqual stat.dtg
unpacked.asInstanceOf[Z3Histogram].length mustEqual stat.length
unpacked.asInstanceOf[Z3Histogram].toJson mustEqual stat.toJson
}
"serialize and deserialize empty stats" >> {
val stat = createStat(observe = false)
val packed = StatSerializer(sft).serialize(stat)
val unpacked = StatSerializer(sft).deserialize(packed)
unpacked must beAnInstanceOf[Z3Histogram]
unpacked.asInstanceOf[Z3Histogram].geom mustEqual stat.geom
unpacked.asInstanceOf[Z3Histogram].dtg mustEqual stat.dtg
unpacked.asInstanceOf[Z3Histogram].length mustEqual stat.length
unpacked.asInstanceOf[Z3Histogram].toJson mustEqual stat.toJson
}
"deserialize as immutable value" >> {
val stat = createStat()
val packed = StatSerializer(sft).serialize(stat)
val unpacked = StatSerializer(sft).deserialize(packed, immutable = true)
unpacked must beAnInstanceOf[Z3Histogram]
unpacked.asInstanceOf[Z3Histogram].geom mustEqual stat.geom
unpacked.asInstanceOf[Z3Histogram].dtg mustEqual stat.dtg
unpacked.asInstanceOf[Z3Histogram].length mustEqual stat.length
unpacked.asInstanceOf[Z3Histogram].toJson mustEqual stat.toJson
unpacked.clear must throwAn[Exception]
unpacked.+=(stat) must throwAn[Exception]
unpacked.observe(features.head) must throwAn[Exception]
unpacked.unobserve(features.head) must throwAn[Exception]
}
"clear" >> {
val stat = createStat()
stat.clear()
stat.isEmpty must beTrue
forall(0 until 100) { i =>
val (w, idx) = stat.indexOf(toGeom(s"POINT(-$i ${i / 2})"), toDate(f"2012-01-01T${i%24}%02d:00:00.000Z"))
stat.count(w, idx) mustEqual 0
}
val (w, idx) = stat.indexOf(toGeom("POINT(-180 -90)"), toDate("2012-01-01T00:00:00.000Z"))
stat.count(w, idx) mustEqual 0
}
}
}
}
| ddseapy/geomesa | geomesa-utils/src/test/scala/org/locationtech/geomesa/utils/stats/Z3HistogramTest.scala | Scala | apache-2.0 | 4,342 |
val path = scala.io.Source.fromFile("input.txt").toList
val coord = Map('>' -> 'x',
'<' -> 'x',
'^' -> 'y',
'v' -> 'y')
val dir = Map('>' -> 1,
'<' -> -1,
'^' -> 1,
'v' -> -1)
def locationsVisited(steps: List[Char]) = {
val pos = collection.mutable.Map('x' -> 0,
'y' -> 0)
val visited = collection.mutable.Set[Any]()
for (step <- steps) {
pos(coord(step)) += dir(step)
visited += pos.clone()
}
visited
}
val visited = locationsVisited(path)
println(s"part 1: ${visited.size}")
val grouped = path.grouped(2).toList
val santaSteps = grouped.map(_(0))
val robotSteps = grouped.map(_(1))
val allVisited = locationsVisited(santaSteps) | locationsVisited(robotSteps)
println(s"part 2: ${allVisited.size}")
| dbjohnson/advent-of-code | solutions/day03/solution.scala | Scala | mit | 869 |
package chapter11
trait Functor[F[_]] {
/**
*
*/
def map[A, B](fa: F[A])(a: A => B): F[B]
/**
*
*/
def distribute[A, B](fab: F[(A, B)]): (F[A], F[B]) =
(map(fab)(_._1), map(fab)(_._2))
/**
*
*/
def codistribute[A, B](e: Either[F[A], F[B]]): F[Either[A, B]] = e match {
case Left(l) => map(l)(Left(_))
case Right(r) => map(r)(Right(_))
}
} | amolnayak311/functional-programming-in-scala | src/chapter11/Functor.scala | Scala | unlicense | 439 |
/**
* Copyright (C) 2013 Carnegie Mellon University
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package tdb.ddg
import akka.pattern.ask
import java.io._
import scala.collection.mutable
import scala.concurrent.{Await, ExecutionContext}
import tdb.Context
import tdb.Constants._
import tdb.messages._
class DDGPrinter
(c: Context,
_nextName: Int,
output: BufferedWriter)
(implicit ec: ExecutionContext) {
var nextName = _nextName
val names = mutable.Map[Node, Int]()
def print(): Int = {
dotsHelper(c.ddg.root, output)
nextName
}
private def dotsHelper(time: Timestamp, output: BufferedWriter) {
val name = getName(time.node)
time.node match {
case readNode: ReadNode =>
writeShape(name, "box", "read")
case parNode: ParNode =>
writeShape(name, "triangle", "par")
case getNode: GetNode =>
writeShape(name, "square", "get")
case putNod: PutNode =>
writeShape(name, "circle", "put")
case putAllNode: PutAllNode =>
writeShape(name, "circle", "putAll")
case memoNode: MemoNode =>
writeShape(name, "diamond", "memo")
case rootNode: RootNode =>
writeShape(name, "invtriangle", "root")
case _ =>
println("didn't match " + time.node.getClass)
}
time.node match {
case parNode: ParNode =>
output.write(name + " -> " + nextName + "\\n")
val f1 = c.resolver.send(
parNode.taskId1, PrintDDGDotsMessage(nextName, output))
nextName = Await.result(f1.mapTo[Int], DURATION)
output.write(name + " -> " + nextName + "\\n")
val f2 = c.resolver.send(
parNode.taskId2, PrintDDGDotsMessage(nextName, output))
nextName = Await.result(f2.mapTo[Int], DURATION)
case _ =>
for (child <- c.ddg.ordering.getChildren(time, time.end)) {
val childName = getName(child.node)
output.write(name + " -> " + childName + "\\n")
dotsHelper(child, output)
}
}
}
private def getName(node: Node) =
if (names.contains(node)) {
names(node)
} else {
names(node) = nextName
nextName += 1
nextName - 1
}
private def writeShape(name: Int, shape: String, label: String) {
output.write(name + s" [shape=$shape label=$label]\\n")
}
}
| twmarshall/tdb | core/src/main/scala/tdb/ddg/DDGPrinter.scala | Scala | apache-2.0 | 2,856 |
package controllers.feedback
import com.fasterxml.jackson.databind.ObjectMapper
import controllers.ClaimScenarioFactory
import models.view.cache.EncryptedCacheHandling
import org.joda.time.DateTime
import org.specs2.mutable._
import utils.WithJsBrowser
import utils.pageobjects._
import utils.pageobjects.feedback.GFeedbackPage
class GFeedbackIntegrationSpec extends Specification {
sequential
val SatisfiedVSScore = "5"
section("integration", models.domain.Feedback.id)
"Feedback page" should {
"be presented" in new WithJsBrowser with PageObjects {
val page = GFeedbackPage(context)
page goToThePage()
}
"contain errors on invalid submission" in new WithJsBrowser with PageObjects {
val page = GFeedbackPage(context)
page goToThePage()
val nextPage = page submitPage()
nextPage must beAnInstanceOf[GFeedbackPage]
nextPage.source must contain("How satisfied were you with the service today? - You must complete this section")
}
"navigate to next page on valid submission" in new WithJsBrowser with PageObjects {
val page = GFeedbackPage(context)
page goToThePage()
page fillPageWith ClaimScenarioFactory.feedbackSatisfiedVS
val nextPage = page submitPage()
// default post submit url for GB is redirect to gov page
nextPage.getUrl must contain("/anonymous-feedback/thankyou")
}
"add feedback item to memcache list and set json formatted string" in new WithJsBrowser with PageObjects {
clearFBCache
val page = GFeedbackPage(context)
page goToThePage()
page fillPageWith ClaimScenarioFactory.feedbackSatisfiedVS
val nextPage = page submitPage()
val encryptedCacheHandling = new EncryptedCacheHandling() {
val cacheKey = "1234"
}
val fbkeylist = encryptedCacheHandling.getFeedbackList()
fbkeylist mustNotEqual ("")
fbkeylist must not contain (",")
val jsonString = encryptedCacheHandling.getFeedbackFromCache(fbkeylist)
jsonString must startWith("{")
jsonString must endWith("}")
}
"add feedback item to memcache with correct core data values" in new WithJsBrowser with PageObjects {
clearFBCache
val page = GFeedbackPage(context)
page goToThePage()
page fillPageWith ClaimScenarioFactory.feedbackSatisfiedVS
val nextPage = page submitPage()
val mapFromCache = getFeedbackFromCache
mapFromCache.get("satisfiedScore") mustEqual (SatisfiedVSScore)
mapFromCache.get("origin") mustEqual ("GB")
val secsOneMinuteAgo = new DateTime().minusMinutes(1).getMillis / 1000
val secsNow = new DateTime().getMillis / 1000
val dateSecs=mapFromCache.get("datesecs")
dateSecs.toLong must between(secsOneMinuteAgo, secsNow)
mapFromCache.get("useragent").toString must contain("Mozilla")
}
"add feedback item to memcache claimOrCircs set correctly for Claim" in new WithJsBrowser with PageObjects {
clearFBCache
browser.goTo("/feedback/feedback")
browser.pageSource() must contain("id=\"satisfiedAnswer_VS\"")
browser.click("#satisfiedAnswer_VS")
browser.pageSource() must contain("id=\"send\"")
browser.click("#send")
val mapFromCache = getFeedbackFromCache
mapFromCache.get("claimOrCircs") mustEqual ("Claim")
}
"add feedback item to memcache claimOrCircs set correctly for Circs" in new WithJsBrowser with PageObjects {
clearFBCache
browser.goTo("/circumstances/feedback")
browser.pageSource() must contain("id=\"satisfiedAnswer_VS\"")
browser.click("#satisfiedAnswer_VS")
browser.pageSource() must contain("id=\"send\"")
browser.click("#send")
val mapFromCache = getFeedbackFromCache
mapFromCache.get("claimOrCircs") mustEqual ("Circs")
}
}
section("integration", models.domain.ThirdParty.id)
def clearFBCache() = {
val encryptedCacheHandling = new EncryptedCacheHandling() {
val cacheKey = "1234"
}
encryptedCacheHandling.removeFeedbackList()
}
def getFeedbackFromCache() = {
val encryptedCacheHandling = new EncryptedCacheHandling() {
val cacheKey = "1234"
}
val fbkeylist = encryptedCacheHandling.getFeedbackList()
val jsonString = encryptedCacheHandling.getFeedbackFromCache(fbkeylist)
val objectMapper: ObjectMapper = new ObjectMapper
val cacheObject = objectMapper.readValue(jsonString, classOf[java.util.HashMap[String,String]])
cacheObject
}
}
| Department-for-Work-and-Pensions/ClaimCapture | c3/test/controllers/feedback/GFeedbackIntegrationSpec.scala | Scala | mit | 4,517 |
package com.wix.mysql.io
import java.io.{InputStream, OutputStream, StringReader}
import java.util.concurrent.TimeUnit
import java.util.concurrent.atomic.AtomicInteger
import org.specs2.mutable.SpecWithJUnit
class TimingOutProcessExecutorTest extends SpecWithJUnit {
"TimingOutProcessExecutor" should {
"throw an exception if command does not complete within provided timeout" in {
new TimingOutProcessExecutor("cmd").waitFor(new FakeProcess(Integer.MAX_VALUE), TimeUnit.MILLISECONDS.toNanos(1000)) must
throwA[InterruptedException].like { case e => e.getMessage must contain("Timeout of 1 sec exceeded while waiting for 'cmd'")}
}
"return process exit code if command does complete within execution bounds" in {
new TimingOutProcessExecutor("").waitFor(new FakeProcess(3), TimeUnit.MILLISECONDS.toNanos(2000)) mustEqual 0
}
}
}
class FakeProcess(val completeAfterNumberOfCalls: Int) extends Process {
val exitValueInvoctionCounter = new AtomicInteger(completeAfterNumberOfCalls)
override def exitValue(): Int = {
exitValueInvoctionCounter.decrementAndGet() match {
case 0 => 0
case _ => throw new IllegalThreadStateException()
}
}
override def destroy(): Unit = {}
override def waitFor(): Int = ???
override def getOutputStream: OutputStream = ???
override def getErrorStream: InputStream = new FakeInputStream("err")
override def getInputStream: InputStream = new FakeInputStream("err")
}
class FakeInputStream(collectedOutput: String) extends InputStream {
val output = new StringReader(collectedOutput)
override def read(): Int = output.read()
}
| wix/wix-embedded-mysql | wix-embedded-mysql/src/test/scala/com/wix/mysql/io/TimingOutProcessExecutorTest.scala | Scala | bsd-3-clause | 1,644 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy.k8s.integrationtest
import scala.collection.JavaConverters._
import io.fabric8.kubernetes.api.model._
import io.fabric8.kubernetes.api.model.storage.StorageClassBuilder
import org.scalatest.concurrent.{Eventually, PatienceConfiguration}
import org.scalatest.time.{Milliseconds, Span}
import org.apache.spark.deploy.k8s.integrationtest.KubernetesSuite._
private[spark] trait PVTestsSuite { k8sSuite: KubernetesSuite =>
import PVTestsSuite._
private def setupLocalStorage(): Unit = {
val scBuilder = new StorageClassBuilder()
.withKind("StorageClass")
.withApiVersion("storage.k8s.io/v1")
.withNewMetadata()
.withName(STORAGE_NAME)
.endMetadata()
.withProvisioner("kubernetes.io/no-provisioner")
.withVolumeBindingMode("WaitForFirstConsumer")
val pvBuilder = new PersistentVolumeBuilder()
.withKind("PersistentVolume")
.withApiVersion("v1")
.withNewMetadata()
.withName("test-local-pv")
.endMetadata()
.withNewSpec()
.withCapacity(Map("storage" -> new Quantity("1Gi")).asJava)
.withAccessModes("ReadWriteOnce")
.withPersistentVolumeReclaimPolicy("Retain")
.withStorageClassName("test-local-storage")
.withLocal(new LocalVolumeSourceBuilder().withPath(VM_PATH).build())
.withNewNodeAffinity()
.withNewRequired()
.withNodeSelectorTerms(new NodeSelectorTermBuilder()
.withMatchExpressions(new NodeSelectorRequirementBuilder()
.withKey("kubernetes.io/hostname")
.withOperator("In")
.withValues("minikube", "m01", "docker-for-desktop", "docker-desktop")
.build()).build())
.endRequired()
.endNodeAffinity()
.endSpec()
val pvcBuilder = new PersistentVolumeClaimBuilder()
.withKind("PersistentVolumeClaim")
.withApiVersion("v1")
.withNewMetadata()
.withName(PVC_NAME)
.endMetadata()
.withNewSpec()
.withAccessModes("ReadWriteOnce")
.withStorageClassName("test-local-storage")
.withResources(new ResourceRequirementsBuilder()
.withRequests(Map("storage" -> new Quantity("1Gi")).asJava).build())
.endSpec()
kubernetesTestComponents
.kubernetesClient
.storage()
.storageClasses()
.create(scBuilder.build())
kubernetesTestComponents
.kubernetesClient
.persistentVolumes()
.create(pvBuilder.build())
kubernetesTestComponents
.kubernetesClient
.persistentVolumeClaims()
.create(pvcBuilder.build())
}
private def deleteLocalStorage(): Unit = {
kubernetesTestComponents
.kubernetesClient
.persistentVolumeClaims()
.withName(PVC_NAME)
.delete()
kubernetesTestComponents
.kubernetesClient
.persistentVolumes()
.withName(PV_NAME)
.delete()
kubernetesTestComponents
.kubernetesClient
.storage()
.storageClasses()
.withName(STORAGE_NAME)
.delete()
}
private def checkPVs(pod: Pod, file: String) = {
Eventually.eventually(TIMEOUT, INTERVAL) {
implicit val podName: String = pod.getMetadata.getName
implicit val components: KubernetesTestComponents = kubernetesTestComponents
val contents = Utils.executeCommand("cat", s"$CONTAINER_MOUNT_PATH/$file")
assert(contents.toString.trim.equals(FILE_CONTENTS))
}
}
test("PVs with local storage", pvTestTag, MinikubeTag) {
sparkAppConf
.set(s"spark.kubernetes.driver.volumes.persistentVolumeClaim.data.mount.path",
CONTAINER_MOUNT_PATH)
.set(s"spark.kubernetes.driver.volumes.persistentVolumeClaim.data.options.claimName",
PVC_NAME)
.set(s"spark.kubernetes.executor.volumes.persistentVolumeClaim.data.mount.path",
CONTAINER_MOUNT_PATH)
.set(s"spark.kubernetes.executor.volumes.persistentVolumeClaim.data.options.claimName",
PVC_NAME)
val file = Utils.createTempFile(FILE_CONTENTS, HOST_PATH)
try {
setupLocalStorage()
runDFSReadWriteAndVerifyCompletion(
FILE_CONTENTS.split(" ").length,
driverPodChecker = (driverPod: Pod) => {
doBasicDriverPodCheck(driverPod)
checkPVs(driverPod, file)
},
executorPodChecker = (executorPod: Pod) => {
doBasicExecutorPodCheck(executorPod)
checkPVs(executorPod, file)
},
appArgs = Array(s"$CONTAINER_MOUNT_PATH/$file", s"$CONTAINER_MOUNT_PATH"),
interval = Some(PV_TESTS_INTERVAL)
)
} finally {
// make sure this always runs
deleteLocalStorage()
}
}
}
private[spark] object PVTestsSuite {
val STORAGE_NAME = "test-local-storage"
val PV_NAME = "test-local-pv"
val PVC_NAME = "test-local-pvc"
val CONTAINER_MOUNT_PATH = "/opt/spark/pv-tests"
val HOST_PATH = sys.env.getOrElse("PVC_TESTS_HOST_PATH", "/tmp")
val VM_PATH = sys.env.getOrElse("PVC_TESTS_VM_PATH", "/tmp")
val FILE_CONTENTS = "test PVs"
val PV_TESTS_INTERVAL = PatienceConfiguration.Interval(Span(10, Milliseconds))
}
| wangmiao1981/spark | resource-managers/kubernetes/integration-tests/src/test/scala/org/apache/spark/deploy/k8s/integrationtest/PVTestsSuite.scala | Scala | apache-2.0 | 5,971 |
package definiti.core.validation.controls
import definiti.common.ast._
import definiti.common.control.{Control, ControlLevel, ControlResult}
import definiti.core.validation.helpers.TopLevelParameterControlHelper
private[core] object VerificationParameterUsableControl extends Control[Root] with TopLevelParameterControlHelper {
override val description: String = "Check if parameter types in verification are valid"
override val defaultLevel: ControlLevel.Value = ControlLevel.error
override def control(root: Root, library: Library): ControlResult = {
ControlResult.squash {
extractParameters(library)
.map(controlParameter(_, library))
}
}
private def extractParameters(library: Library): Seq[ParameterInfo] = {
library.verifications.flatMap { verification =>
verification.parameters.map(ParameterInfo(verification.fullName, _))
}
}
}
| definiti/definiti-core | src/main/scala/definiti/core/validation/controls/VerificationParameterUsableControl.scala | Scala | mit | 890 |
package edu.emory.mathcs.ir.liveqa.scoring.features
import edu.emory.mathcs.ir.liveqa.base.AnswerCandidate._
import edu.emory.mathcs.ir.liveqa.base.{AnswerCandidate, Question}
/**
* Created by dsavenk on 5/29/16.
*/
class SourceFeatures extends FeatureCalculation {
/**
* Compute a set of features for the given answer candidate.
*
* @param question Current question, for which the candidate was generated.
* @param answer Answer candidate to compute features for.
* @return A map from feature names to the corresponding values.
*/
override def computeFeatures(question: Question,
answer: AnswerCandidate): Map[String, Float] = {
Map[String, Float](answer.answerType match {
case WEB => "SourceWeb" -> 1f
case YAHOO_ANSWERS => "SourceYahooAnswers" -> 1f
case ANSWERS_COM => "SourceAnswers" -> 1f
case EHOW => "SourceEhow" -> 1f
case WEBMD => "SourceWebmd" -> 1f
case WIKIHOW => "SourceWikihow" -> 1f
case _ => "Other" -> 1f
})
}
}
| emory-irlab/liveqa | src/main/scala/edu/emory/mathcs/ir/liveqa/scoring/features/SourceFeatures.scala | Scala | mit | 1,050 |
/*
* Copyright 2001-2015 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest.prop
import org.scalactic.anyvals._
import org.scalatest.FunSpec
import org.scalatest.Matchers
import org.scalatest.exceptions.TestFailedException
class PrettyFunction0Spec extends FunSpec with Matchers with GeneratorDrivenPropertyChecks {
describe("A PrettyFunction0") {
it("should return the constant passed to its constructor") {
forAll { (i: Int) =>
val f = new PrettyFunction0(i)
f() shouldBe i
}
}
it("should have a pretty toString") {
forAll { (i: Int) =>
val f = new PrettyFunction0(i)
f.toString shouldBe s"() => $i"
}
}
it("should offer an apply method in its companion object") {
forAll { (i: Int) =>
val f = PrettyFunction0(i)
f() shouldBe i
f.toString shouldBe s"() => $i"
}
}
it("should offer an equals method that compares the constant result") {
forAll { (i: Int) =>
val f = PrettyFunction0(i)
val g = PrettyFunction0(i)
f shouldEqual g
}
}
it("should offer a hashCode method that returns the hashCode of the constant result") {
forAll { (i: Int) =>
val f = PrettyFunction0(i)
val g = PrettyFunction0(i)
f.hashCode shouldEqual g.hashCode
}
}
}
}
| dotty-staging/scalatest | scalatest-test/src/test/scala/org/scalatest/prop/PrettyFunction0Spec.scala | Scala | apache-2.0 | 1,895 |
import stainless.lang._
object Aliasing {
case class Box(var value: BigInt) extends AnyHeapRef
def aliased(b1: Box, b2: Box): (BigInt, BigInt) = {
require(!(b1 refEq b2))
reads(Set(b1, b2))
modifies(Set(b2))
val b1old = b1.value
b2.value += 1
val b1new = b1.value
(b1old, b1new)
} ensuring(res =>
res._1 == res._2
)
}
| epfl-lara/stainless | frontends/benchmarks/full-imperative/valid/Aliasing.scala | Scala | apache-2.0 | 361 |
package com.landoop.streamreactor.connect.hive.sink.mapper
import org.apache.kafka.connect.data.{SchemaBuilder, Struct}
import org.scalatest.funsuite.AnyFunSuite
import org.scalatest.matchers.should.Matchers
import scala.collection.JavaConverters._
class MetastoreSchemaAlignMapperTest extends AnyFunSuite with Matchers {
test("pad optional missing fields with null") {
val recordSchema = SchemaBuilder.struct()
.field("a", SchemaBuilder.string().required().build())
.field("b", SchemaBuilder.string().required().build())
.field("c", SchemaBuilder.string().required().build())
.build()
val struct = new Struct(recordSchema).put("a", "a").put("b", "b").put("c", "c")
val metastoreSchema = SchemaBuilder.struct()
.field("a", SchemaBuilder.string().required().build())
.field("b", SchemaBuilder.string().required().build())
.field("c", SchemaBuilder.string().required().build())
.field("z", SchemaBuilder.string().optional().build())
.build()
val output = new MetastoreSchemaAlignMapper(metastoreSchema).map(struct)
output.schema().fields().asScala.map(_.name) shouldBe Seq("a", "b", "c", "z")
}
test("drop fields not specified in metastore") {
val recordSchema = SchemaBuilder.struct()
.field("a", SchemaBuilder.string().required().build())
.field("b", SchemaBuilder.string().required().build())
.field("c", SchemaBuilder.string().required().build())
.build()
val struct = new Struct(recordSchema).put("a", "a").put("b", "b").put("c", "c")
val metastoreSchema = SchemaBuilder.struct()
.field("a", SchemaBuilder.string().required().build())
.field("b", SchemaBuilder.string().required().build())
.build()
val output = new MetastoreSchemaAlignMapper(metastoreSchema).map(struct)
output.schema().fields().asScala.map(_.name) shouldBe Seq("a", "b")
}
}
| datamountaineer/stream-reactor | kafka-connect-hive/src/test/scala/com/landoop/streamreactor/connect/hive/sink/mapper/MetastoreSchemaAlignMapperTest.scala | Scala | apache-2.0 | 1,904 |
package scalax.hash
package benchmark
object Random {
def bytes(n: Int) = {
var a = new Array[Byte](n)
util.Random.nextBytes(a)
a
}
def MB = bytes(1048576)
}
| wookietreiber/scala-hash | benchmarks/main/scala/Random.scala | Scala | bsd-2-clause | 178 |
package com.twitter.zk.coordination
import com.twitter.concurrent.Permit
import com.twitter.util.{Future, Promise, Return, Throw}
import com.twitter.zk.{StateEvent, ZkClient, ZNode}
import java.nio.charset.Charset
import java.util.concurrent.{ConcurrentLinkedQueue, RejectedExecutionException}
import org.apache.zookeeper.KeeperException.NoNodeException
import org.apache.zookeeper.{CreateMode, KeeperException}
/**
* ZkAsyncSemaphore is a distributed semaphore with asynchronous execution.
* Grabbing a permit constitutes a vote on the number of permits the semaphore
* can permit and returns a Future[Permit]. If consensus on the number of permits
* is lost, an exception is raised when acquiring a permit (so expect it).
*
* Care must be taken to handle zookeeper client session expiry. A ZkAsyncSemaphore cannot
* be used after the zookeeper session has expired. Likewise, any permits acquired
* via the session must be considered invalid. Additionally, it is the client's responsibility
* to determine if a permit is still valid in the case that the zookeeper client becomes
* disconnected.
*
* Attempts to clone AsyncSemaphore
*
* Ex.
* {{{
* implicit val timer = new JavaTimer(true)
* val connector = NativeConnector("localhost:2181", 5.seconds, 10.minutes)
* val zk = ZkClient(connector).withRetryPolicy(RetryPolicy.Basic(3))
* val path = "/testing/twitter/service/charm/shards"
* val sem = new ZkAsyncSemaphore(zk, path, 4)
*
* sem.acquire flatMap { permit =>
* Future { ... } ensure { permit.release }
* } // handle { ... }
* }}}
*/
class ZkAsyncSemaphore(
zk: ZkClient,
path: String,
numPermits: Int,
maxWaiters: Option[Int] = None) {
import ZkAsyncSemaphore._
require(numPermits > 0)
require(maxWaiters.getOrElse(0) >= 0)
private[this] val pathSeparator = "/"
private[this] val permitPrefix = "permit-"
private[this] val permitNodePathPrefix = Seq(path, permitPrefix).mkString(pathSeparator)
private[this] val futureSemaphoreNode = createSemaphoreNode()
private[this] val waitq = new ConcurrentLinkedQueue[(Promise[ZkSemaphorePermit], ZNode)]
private[this] class ZkSemaphorePermit(node: ZNode) extends Permit {
val zkPath: String = node.path
val sequenceNumber: Int = sequenceNumberOf(zkPath)
override def release(): Unit = {
node.delete()
}
}
@volatile
var numWaiters: Int = 0
@volatile
var numPermitsAvailable: Int = numPermits
def acquire(): Future[Permit] = synchronized {
val futurePermit = futureSemaphoreNode flatMap { semaphoreNode =>
zk(permitNodePathPrefix).create(
data = numPermits.toString.getBytes(Charset.forName("UTF8")),
mode = CreateMode.EPHEMERAL_SEQUENTIAL
)
}
futurePermit flatMap { permitNode =>
val mySequenceNumber = sequenceNumberOf(permitNode.path)
permitNodes() flatMap { permits =>
val sequenceNumbers = permits map { child => sequenceNumberOf(child.path) }
getConsensusNumPermits(permits) flatMap { consensusNumPermits =>
if (consensusNumPermits != numPermits) {
throw ZkAsyncSemaphore.PermitMismatchException(
"Attempted to create semaphore of %d permits when consensus is %d"
.format(numPermits, consensusNumPermits)
)
}
if (permits.size < numPermits) {
Future.value(new ZkSemaphorePermit(permitNode))
} else if (mySequenceNumber <= sequenceNumbers(numPermits - 1)) {
Future.value(new ZkSemaphorePermit(permitNode))
} else {
maxWaiters match {
case Some(max) if (waitq.size >= max) => {
MaxWaitersExceededException
}
case _ => {
val promise = new Promise[ZkSemaphorePermit]
waitq.add((promise, permitNode))
promise
}
}
}
} onFailure {
case err => {
permitNode.delete()
Future.exception(err)
}
}
}
}
}
/**
* Create the zookeeper path for this semaphore and set up handlers for all client events:
* - Monitor the tree for changes (for handling waiters, updating accounting)
* - Reject all current waiters if our session expires.
* - Check waiters if the client has reconnected and our session is still valid.
*/
private[this] def createSemaphoreNode(): Future[ZNode] = {
safeCreate(path) map { semaphoreNode =>
zk() map { client =>
// client is always connected here.
monitorSemaphore(semaphoreNode)
zk onSessionEvent {
case StateEvent.Expired => rejectWaitQueue()
case StateEvent.Connected => {
permitNodes() map { nodes => checkWaiters(nodes) }
monitorSemaphore(semaphoreNode)
}
}
}
semaphoreNode
}
}
/**
* Create intermediate zookeeper nodes as required so that the specified path exists.
*
* @param path The zookeeper node path to create.
* @return A Future ZNode that is satisfied when the full path exists.
*/
private[this] def safeCreate(path: String): Future[ZNode] = {
val nodes = path.split(pathSeparator) filter { !_.isEmpty }
val head = Future.value(zk(pathSeparator + nodes.head))
nodes.tail.foldLeft(head) { (futureParent, child) =>
futureParent flatMap { parent =>
val newParent = parent(child)
newParent.create() rescue {
case err: KeeperException.NodeExistsException => Future.value(newParent)
}
}
}
}
/**
* Return ''all'' permit requests for this semaphore.
*
* @return A Future sequence of ZNodes that exist for this semaphore (each a request for a Permit)
* The sequence includes both nodes that have entered the semaphore as well as waiters.
*/
private[this] def permitNodes(): Future[Seq[ZNode]] = {
futureSemaphoreNode.flatMap { semaphoreNode =>
semaphoreNode.getChildren() map { zop =>
zop.children.toSeq filter { child =>
child.path.startsWith(permitNodePathPrefix)
} sortBy (child => sequenceNumberOf(child.path))
}
}
}
/**
* Continuously monitor the semaphore node for changes. If there are permit promises in the waitq,
* check if the earliest can be fulfilled. Assumes the monitor cycles once per change (does not
* coalesce) and therefore needs only check head of queue.
*
* @param node The semaphore node parent of the permit children to monitor.
*/
private[this] def monitorSemaphore(node: ZNode) = {
val monitor = node.getChildren.monitor()
monitor foreach { tryChildren => tryChildren map { zop => checkWaiters(zop.children.toSeq) } }
}
/**
* Check the wait queue for waiters the can be satisfied with a Permit.
*
* @param nodes A sequence of ZNodes to check the wait queue against.
*/
private[this] def checkWaiters(nodes: Seq[ZNode]) = {
nodes.size match {
case length if length <= numPermits => {
numPermitsAvailable = numPermits - length
numWaiters = 0
}
case length => {
numPermitsAvailable = 0
numWaiters = length - numPermits
}
}
val permits = nodes filter { child =>
child.path.startsWith(permitNodePathPrefix)
} sortBy (child => sequenceNumberOf(child.path))
val ids = permits map { child => sequenceNumberOf(child.path) }
val waitqIterator = waitq.iterator()
while (waitqIterator.hasNext) {
val (promise, permitNode) = waitqIterator.next()
val id = sequenceNumberOf(permitNode.path)
if (!permits.contains(permitNode)) {
promise.setException(
PermitNodeException(
"Node for this permit has been deleted (client released, session expired, or tree was clobbered)."
)
)
} else if (permits.size < numPermits) {
promise.setValue(new ZkSemaphorePermit(permitNode))
waitqIterator.remove()
} else if (id <= ids(numPermits - 1)) {
promise.setValue(new ZkSemaphorePermit(permitNode))
waitqIterator.remove()
}
}
}
/**
* Reject all waiting requests for permits. This must be done when the zookeeper client session
* has expired.
*/
private[this] def rejectWaitQueue() = {
val waitqIterator = waitq.iterator()
while (waitqIterator.hasNext) {
val (promise, _) = waitqIterator.next()
waitqIterator.remove()
promise.setException(PermitNodeException("ZooKeeper client session expired."))
}
}
/**
* Determine what the consensus of clients believe numPermits should be. The '''data''' section for
* each node in {{{permits}}} must contain a UTF-8 string representation of an integer, specifying
* the value of numPermits the client's semaphore instance was created with. These are considered
* votes for the consensus on numPermits. An exception is thrown if there is no consensus (two leading
* groups with the same cardinality).
*
* @param permits A sequence of ZNodes used to vote on the number of Permits that this semaphore
* will provide.
* @return A Future Int representing the number of Permits that may be provided by this semaphore.
* @throws LackOfConsensusException When there is no consensus.
*/
private[this] def getConsensusNumPermits(permits: Seq[ZNode]): Future[Int] = {
Future.collect(permits map numPermitsOf) map { purportedNumPermits =>
val groupedByNumPermits = purportedNumPermits filter { i =>
0 < i
} groupBy { i => i }
val permitsToBelievers = groupedByNumPermits map {
case (permits, believers) => (permits, believers.size)
}
val (numPermitsInMax, numBelieversOfMax) = permitsToBelievers.maxBy {
case (_, believers) => believers
}
val cardinalityOfMax = permitsToBelievers.values.count(_ == numBelieversOfMax)
if (cardinalityOfMax == 1) {
// Consensus
numPermitsInMax
} else {
// No consensus or this vote breaks consensus (two votes in discord)
throw LackOfConsensusException(
"Cannot create semaphore with %d permits. Loss of consensus on %d permits."
.format(numPermits, numPermitsInMax)
)
}
}
}
private[this] def sequenceNumberOf(path: String): Int = {
if (!path.startsWith(permitNodePathPrefix))
throw new Exception("Path does not match the permit node prefix")
path.substring(permitNodePathPrefix.length).toInt
}
private[coordination] def numPermitsOf(node: ZNode): Future[Int] = {
node.getData().transform {
case Return(data: ZNode.Data) =>
try {
Future.value(new String(data.bytes, Charset.forName("UTF8")).toInt)
} catch {
case err: NumberFormatException => Future.value(-1)
}
case Throw(t: NoNodeException) =>
// This permit was released (i.e. after we got the list of permits).
Future.value(-1)
case Throw(t) => Future.exception(t)
}
}
}
object ZkAsyncSemaphore {
case class LackOfConsensusException(msg: String) extends Exception(msg)
case class PermitMismatchException(msg: String) extends Exception(msg)
case class PermitNodeException(msg: String) extends Exception(msg)
private val MaxWaitersExceededException =
Future.exception(new RejectedExecutionException("Max waiters exceeded"))
}
| twitter/util | util-zk/src/main/scala/com/twitter/zk/coordination/ZkAsyncSemaphore.scala | Scala | apache-2.0 | 11,447 |
package com.twitter.finagle.util
import java.net.{InetAddress, InetSocketAddress, SocketAddress, UnknownHostException}
import scala.collection.breakOut
object InetSocketAddressUtil {
type HostPort = (String, Int)
private[finagle] val unconnected =
new SocketAddress { override def toString = "unconnected" }
/** converts 0.0.0.0 -> public ip in bound ip */
def toPublic(bound: SocketAddress): SocketAddress = {
bound match {
case addr: InetSocketAddress if addr.getAddress().isAnyLocalAddress() =>
val host = try InetAddress.getLocalHost() catch {
case _: UnknownHostException => InetAddress.getLoopbackAddress
}
new InetSocketAddress(host, addr.getPort())
case _ => bound
}
}
/**
* Parses a comma or space-delimited string of hostname and port pairs into scala pairs.
* For example,
*
* InetSocketAddressUtil.parseHostPorts("127.0.0.1:11211") => Seq(("127.0.0.1", 11211))
*
* @param hosts a comma or space-delimited string of hostname and port pairs.
* @throws IllegalArgumentException if host and port are not both present
*
*/
def parseHostPorts(hosts: String): Seq[HostPort] =
hosts split Array(' ', ',') filter (_.nonEmpty) map (_.split(":")) map { hp =>
require(hp.length == 2, "You must specify host and port")
hp match {
case Array(host, "*") => (host, 0)
case Array(host, portStr) => (host, portStr.toInt)
case _ => throw new IllegalArgumentException("Malformed host/port specification: " + hosts)
}
}
/**
* Resolves a sequence of host port pairs into a set of socket addresses. For example,
*
* InetSocketAddressUtil.resolveHostPorts(Seq(("127.0.0.1", 11211))) = Set(new InetSocketAddress("127.0.0.1", 11211))
*
* @param hostPorts a sequence of host port pairs
* @throws java.net.UnknownHostException if some host cannot be resolved
*/
def resolveHostPorts(hostPorts: Seq[HostPort]): Set[SocketAddress] =
resolveHostPortsSeq(hostPorts).flatten.toSet
private[finagle] def resolveHostPortsSeq(hostPorts: Seq[HostPort]): Seq[Seq[SocketAddress]] =
hostPorts map { case (host, port) =>
InetAddress.getAllByName(host).map { addr =>
new InetSocketAddress(addr, port)
}(breakOut)
}
/**
* Parses a comma or space-delimited string of hostname and port pairs. For example,
*
* InetSocketAddressUtil.parseHosts("127.0.0.1:11211") => Seq(new InetSocketAddress("127.0.0.1", 11211))
*
* @param hosts a comma or space-delimited string of hostname and port pairs. Or, if it is
* ":*" then an a single InetSocketAddress using an ephemeral port will be returned.
*
* @throws IllegalArgumentException if host and port are not both present
*/
def parseHosts(hosts: String): Seq[InetSocketAddress] = {
if (hosts == ":*") return Seq(new InetSocketAddress(0))
parseHostPorts(hosts).map[InetSocketAddress, List[InetSocketAddress]] { case (host, port) =>
if (host == "")
new InetSocketAddress(port)
else
new InetSocketAddress(host, port)
}(breakOut)
}
}
| koshelev/finagle | finagle-core/src/main/scala/com/twitter/finagle/util/InetSocketAddressUtil.scala | Scala | apache-2.0 | 3,150 |
// scalac: -Xplugin:. -Xplugin-list
package sample
// just a sample that is compiled with the sample plugin enabled
object Sample extends App {
}
| lrytz/scala | test/files/neg/t6446-list/sample_2.scala | Scala | apache-2.0 | 147 |
object Test {
def main(args: Array[String]): Unit = {
import collection.mutable._
val x4 = LinkedList[Int](1)
println(x4)
val y4 = LinkedList[Int](1)
println(y4)
println(x4 equals y4) // or (y4 equals x4)
}
}
| yusuke2255/dotty | tests/pending/run/t2212.scala | Scala | bsd-3-clause | 237 |
object WithStat {
val a = new A
a.<caret>
class A {
def fboo = ???
def fbar = ???
}
}
| whorbowicz/intellij-scala | testdata/completion3/order/WithStat.scala | Scala | apache-2.0 | 103 |
/*
* Copyright (c) 2013 Christos KK Loverdos
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.ckkloverdos.thrift3r.codec
package collection
import com.ckkloverdos.thrift3r.BinReprType
import com.ckkloverdos.thrift3r.collection.builder.CollectionBuilderFactory
import com.ckkloverdos.thrift3r.protocol.Protocol
import com.ckkloverdos.thrift3r.protocol.helper.ProtocolHelpers
import com.google.common.reflect.TypeToken
import scala.collection.GenMap
/**
* Codec for Scala Maps.
*
* @author Christos KK Loverdos <loverdos@gmail.com>
*/
case class ScalaMapCodec[A, B, M](
typeToken: TypeToken[GenMap[A, B]],
keyCodec: Codec[A],
valueCodec: Codec[B],
meta: M,
builderFactory: CollectionBuilderFactory[(A, B), GenMap[A, B], M]
) extends Codec[GenMap[A, B]] with UnsupportedDirectStringTransformations[GenMap[A, B]] {
def binReprType = BinReprType.MAP
def encode(protocol: Protocol, map: GenMap[A, B]) {
ProtocolHelpers.writeMap(protocol, keyCodec, valueCodec, map)
}
def decode(protocol: Protocol) = {
val builder = builderFactory.newBuilder(meta)
ProtocolHelpers.readMap[A, B](protocol, keyCodec, valueCodec, builder.add(_))
builder.build
}
}
| loverdos/thrift3r | src/main/scala/com/ckkloverdos/thrift3r/codec/collection/ScalaMapCodec.scala | Scala | apache-2.0 | 1,711 |
/*
* Copyright 2015 β 2016 Paul Horn
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package de.knutwalker.akka.typed
import akka.actor.ActorSystem
import akka.util.Timeout
import scala.concurrent.Await
import scala.concurrent.duration.Duration
import scala.reflect.ClassTag
object Shutdown {
def apply(system: ActorSystem): Unit = {
Await.result(system.terminate(), Duration.Inf)
()
}
}
object TimeoutMessage {
def apply[A](ref: ActorRef[A])(implicit ct: ClassTag[A], timeout: Timeout): String = {
s"""Ask timed out on [$ref] after [${timeout.duration.toMillis} ms]. Sender[null] sent message of type "${ct.runtimeClass.getName}"."""
}
}
| knutwalker/typed-actors | tests/src/test/scala-akka-2.4.x/de/knutwalker/akka/typed/Shutdown.scala | Scala | apache-2.0 | 1,181 |
import java.io.File
import org.slf4j.LoggerFactory
import play.api._
object Global extends GlobalSettings {
override def onLoadConfig(config: Configuration, path: File, classloader: ClassLoader, mode: Mode.Mode): Configuration = {
LoggerFactory.getLogger(getClass).info("Application configuration loading")
val applicationDir = config.getString("appDir") match {
case Some(pathString) => pathString
case None => config.getString("user.dir").getOrElse(path) + "/migthycrawler"
}
val file = new File(applicationDir)
file.mkdirs()
val from: Configuration = Configuration.from(Map("appDir" -> applicationDir))
super.onLoadConfig( config ++ from, path, classloader, mode)
}
}
| computerlove/Mightycrawler-Launcer | app/Global.scala | Scala | apache-2.0 | 717 |
package aia.next
import akka.actor._
object Shopper {
trait Command {
def shopperId: Long
}
}
| RayRoestenburg/akka-in-action | chapter-looking-ahead/src/main/scala/aia/next/Shopper.scala | Scala | mit | 107 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.shuffle.sort
import org.mockito.Mockito._
import org.mockito.invocation.InvocationOnMock
import org.mockito.stubbing.Answer
import org.scalatest.Matchers
import org.apache.spark._
import org.apache.spark.serializer.{JavaSerializer, KryoSerializer, Serializer}
/**
* Tests for the fallback logic in UnsafeShuffleManager. Actual tests of shuffling data are
* performed in other suites.
*/
class SortShuffleManagerSuite extends SparkFunSuite with Matchers {
import SortShuffleManager.canUseSerializedShuffle
private class RuntimeExceptionAnswer extends Answer[Object] {
override def answer(invocation: InvocationOnMock): Object = {
throw new RuntimeException("Called non-stubbed method, " + invocation.getMethod.getName)
}
}
private def shuffleDep(
partitioner: Partitioner,
serializer: Serializer,
keyOrdering: Option[Ordering[Any]],
aggregator: Option[Aggregator[Any, Any, Any]],
mapSideCombine: Boolean): ShuffleDependency[Any, Any, Any] = {
val dep = mock(classOf[ShuffleDependency[Any, Any, Any]], new RuntimeExceptionAnswer())
doReturn(0).when(dep).shuffleId
doReturn(partitioner).when(dep).partitioner
doReturn(serializer).when(dep).serializer
doReturn(keyOrdering).when(dep).keyOrdering
doReturn(aggregator).when(dep).aggregator
doReturn(mapSideCombine).when(dep).mapSideCombine
dep
}
test("supported shuffle dependencies for serialized shuffle") {
val kryo = new KryoSerializer(new SparkConf())
assert(canUseSerializedShuffle(shuffleDep(
partitioner = new HashPartitioner(2),
serializer = kryo,
keyOrdering = None,
aggregator = None,
mapSideCombine = false
)))
val rangePartitioner = mock(classOf[RangePartitioner[Any, Any]])
when(rangePartitioner.numPartitions).thenReturn(2)
assert(canUseSerializedShuffle(shuffleDep(
partitioner = rangePartitioner,
serializer = kryo,
keyOrdering = None,
aggregator = None,
mapSideCombine = false
)))
// Shuffles with key orderings are supported as long as no aggregator is specified
assert(canUseSerializedShuffle(shuffleDep(
partitioner = new HashPartitioner(2),
serializer = kryo,
keyOrdering = Some(mock(classOf[Ordering[Any]])),
aggregator = None,
mapSideCombine = false
)))
}
test("unsupported shuffle dependencies for serialized shuffle") {
val kryo = new KryoSerializer(new SparkConf())
val java = new JavaSerializer(new SparkConf())
// We only support serializers that support object relocation
assert(!canUseSerializedShuffle(shuffleDep(
partitioner = new HashPartitioner(2),
serializer = java,
keyOrdering = None,
aggregator = None,
mapSideCombine = false
)))
// The serialized shuffle path do not support shuffles with more than 16 million output
// partitions, due to a limitation in its sorter implementation.
assert(!canUseSerializedShuffle(shuffleDep(
partitioner = new HashPartitioner(
SortShuffleManager.MAX_SHUFFLE_OUTPUT_PARTITIONS_FOR_SERIALIZED_MODE + 1),
serializer = kryo,
keyOrdering = None,
aggregator = None,
mapSideCombine = false
)))
// We do not support shuffles that perform aggregation
assert(!canUseSerializedShuffle(shuffleDep(
partitioner = new HashPartitioner(2),
serializer = kryo,
keyOrdering = None,
aggregator = Some(mock(classOf[Aggregator[Any, Any, Any]])),
mapSideCombine = false
)))
assert(!canUseSerializedShuffle(shuffleDep(
partitioner = new HashPartitioner(2),
serializer = kryo,
keyOrdering = Some(mock(classOf[Ordering[Any]])),
aggregator = Some(mock(classOf[Aggregator[Any, Any, Any]])),
mapSideCombine = true
)))
}
}
| akopich/spark | core/src/test/scala/org/apache/spark/shuffle/sort/SortShuffleManagerSuite.scala | Scala | apache-2.0 | 4,665 |
package fly.play.s3
import java.util.Date
import scala.collection.JavaConversions.asScalaBuffer
import scala.collection.JavaConversions.mapAsScalaMap
import scala.concurrent.Future
import play.api.libs.concurrent.Execution.Implicits.defaultContext
import play.api.libs.ws.Response
import fly.play.s3.acl.ACLList
import scala.xml.Elem
import play.api.libs.json.JsValue
import play.api.libs.json.JsObject
import fly.play.s3.upload.PolicyBuilder
import fly.play.s3.upload.Condition
/**
* Representation of a bucket
*
* @param bucketName The name of the bucket needed to create a Bucket representation
* @param delimiter A delimiter to use for this Bucket instance, default is a / (slash)
*
*/
case class Bucket(
name: String,
delimiter: Option[String] = Some("/"),
s3: S3) {
/**
* Creates an authenticated url for an item with the given name
*
* @param itemName The item for which the url should be created
* @param expires The expiration in seconds from now
*/
def url(itemName: String, expires: Long, method: String = "GET", contentMD5: String = "", contentType: String = ""): String =
s3.url(name, itemName, ((new Date).getTime / 1000) + expires, method, contentMD5, contentType)
/**
* Creates an unsigned url for the given item name
*
* @param itemName The item for which the url should be created
*/
def url(itemName: String): String =
s3.url(name, itemName)
/**
* Utility method to create a policy builder for this bucket
*
* @param expires The date this policy expires
*/
def uploadPolicy(expiration: Date): PolicyBuilder =
PolicyBuilder(name, expiration)(s3.s3Signer)
/**
* Retrieves a single item with the given name
*
* @param itemName The name of the item you want to retrieve
*/
def get(itemName: String): Future[BucketFile] =
s3.get(name, Some(itemName), None, None) map S3Response { (status, response) =>
val headers = extractHeaders(response)
BucketFile(itemName,
headers("Content-Type"),
response.ahcResponse.getResponseBodyAsBytes,
None,
Some(headers))
}
/**
* Lists the contents of the bucket
*/
def list: Future[Iterable[BucketItem]] =
s3.get(name, None, None, delimiter) map listResponse
/**
* Lists the contents of a 'directory' in the bucket
*/
def list(prefix: String): Future[Iterable[BucketItem]] =
s3.get(name, None, Some(prefix), delimiter) map listResponse
/**
* @see add
*/
def + = add _
/**
* Adds a file to this bucket
*
* @param bucketFile A representation of the file
*/
def add(bucketFile: BucketFile): Future[Unit] =
s3.put(name, bucketFile) map unitResponse
/**
* @see remove
*/
def - = remove _
/**
* Removes a file from this bucket
*
* @param itemName The name of the file that needs to be removed
*/
def remove(itemName: String): Future[Unit] =
s3.delete(name, itemName) map unitResponse
/**
* Creates a new instance of the Bucket with another delimiter
*/
def withDelimiter(delimiter: String): Bucket = copy(delimiter = Some(delimiter))
/**
* Creates a new instance of the Bucket with another delimiter
*/
def withDelimiter(delimiter: Option[String]): Bucket = copy(delimiter = delimiter)
/**
* Allows you to rename a file within this bucket. It will actually do a copy and
* a remove.
*
* @param sourceItemName The old name of the item
* @param destinationItemName The new name of the item
* @param acl The ACL for the new item, default is PUBLIC_READ
*/
def rename(sourceItemName: String, destinationItemName: String, acl: ACL = PUBLIC_READ): Future[Unit] = {
val copyResult = s3.putCopy(name, sourceItemName, name, destinationItemName, acl) map unitResponse
copyResult.flatMap { response =>
remove(sourceItemName)
}
}
/**
* Initiates a multipart upload
*
* @param bucketFile A representation of the file
*
* @return The upload id
*/
def initiateMultipartUpload(bucketFile: BucketFile): Future[BucketFileUploadTicket] = {
val multipartUpload = s3.initiateMultipartUpload(name, bucketFile)
multipartUpload map S3Response { (status, response) =>
val uploadId = (response.xml \\ "UploadId").text
BucketFileUploadTicket(bucketFile.name, uploadId)
}
}
/**
* Aborts a multipart upload
*
* @param uploadTicket The ticket acquired from initiateMultipartUpload
*
*/
def abortMultipartUpload(uploadTicket: BucketFileUploadTicket): Future[Unit] =
s3.abortMultipartUpload(name, uploadTicket) map unitResponse
/**
* Uploads a part in the multipart upload
*
* @param uploadTicket The ticket acquired from initiateMultipartUpload
* @param bucketFilePart The part that you want to upload
*/
def uploadPart(uploadTicket: BucketFileUploadTicket, bucketFilePart: BucketFilePart): Future[BucketFilePartUploadTicket] = {
val uploadPart = s3.uploadPart(name, uploadTicket, bucketFilePart)
uploadPart map S3Response { (status, response) =>
val headers = extractHeaders(response)
BucketFilePartUploadTicket(bucketFilePart.partNumber, headers("ETag"))
}
}
/**
* Completes a multipart upload
*
* @param uploadTicket The ticket acquired from initiateMultipartUpload
* @param partUploadTickets The tickets acquired from uploadPart
*/
def completeMultipartUpload(uploadTicket: BucketFileUploadTicket, partUploadTickets: Seq[BucketFilePartUploadTicket]): Future[Unit] =
s3.completeMultipartUpload(name, uploadTicket, partUploadTickets) map unitResponse
/**
* Updates the ACL of given item
*
* @param itemName The name of file that needs to be updated
* @param acl The ACL
*/
def updateAcl(itemName: String, acl: ACL): Future[Unit] =
s3.putAcl(name, itemName, acl) map unitResponse
/**
* Retrieves the ACL
*
* @param itemName The name of the file that you want to retrieve the ACL for
*/
def getAcl(itemName: String): Future[ACLList] =
s3.getAcl(name, itemName) map aclListResponse
private def extractHeaders(response: Response) = {
for {
(key, value) <- response.ahcResponse.getHeaders.toMap
if (value.size > 0)
} yield key -> value.head
}
private def listResponse =
S3Response { (status, response) =>
val xml = response.xml
/* files */ (xml \\ "Contents").map(n => BucketItem((n \\ "Key").text, false)) ++
/* folders */ (xml \\ "CommonPrefixes").map(n => BucketItem((n \\ "Prefix").text, true))
} _
private def aclListResponse =
S3Response { (status, response) =>
val xml = response.xml
ACLList((xml \\ "AccessControlList").head.asInstanceOf[Elem])
} _
private def unitResponse = S3Response { (status, response) => } _
}
/**
* Representation of an element in a bucket as the result of a call to the list method
*/
case class BucketItem(name: String, isVirtual: Boolean)
/**
* Representation of a file, used in get and add methods of the bucket
*/
case class BucketFile(name: String, contentType: String, content: Array[Byte] = Array.empty, acl: Option[ACL] = None, headers: Option[Map[String, String]] = None)
case class BucketFileUploadTicket(name: String, uploadId: String)
case class BucketFilePart(partNumber: Int, content: Array[Byte])
case class BucketFilePartUploadTicket(partNumber: Int, eTag: String) {
def toXml = <Part><PartNumber>{ partNumber }</PartNumber><ETag>{ eTag }</ETag></Part>
}
case object PRIVATE extends ACL("private")
case object PUBLIC_READ extends ACL("public-read")
case object PUBLIC_READ_WRITE extends ACL("public-read-write")
case object AUTHENTICATED_READ extends ACL("authenticated-read")
case object BUCKET_OWNER_READ extends ACL("bucket-owner-read")
case object BUCKET_OWNER_FULL_CONTROL extends ACL("bucket-owner-full-control")
sealed abstract class ACL(val value: String)
| judu/play-s3 | app/fly/play/s3/Bucket.scala | Scala | mit | 7,932 |
object Test extends App {
def testAnonFunClass(o: AnyRef, sp: Boolean = false) = {
val isSpecialized = o.getClass.getSuperclass.getName contains "$sp"
val isDelambdafyMethod = o.getClass.getName contains "$lambda$"
assert(
// delambdafy:method doesn't currently emit specialized anonymous function classes
if (sp) (isSpecialized || isDelambdafyMethod) else !isSpecialized,
o.getClass.getName)
val Some(f) = o.getClass.getDeclaredFields.find(_.getName == "serialVersionUID")
f.setAccessible(true)
assert(f.getLong(null) == 0L)
}
def testIndyLambda(o: AnyRef, sp: Boolean = false) = {
val isSpecialized = o.getClass.getInterfaces.exists(_.getName contains "$sp")
assert(sp == isSpecialized, o.getClass.getName)
}
testIndyLambda(() => (), sp = true)
testIndyLambda(() => 1, sp = true)
testIndyLambda(() => "")
testIndyLambda((x: Int) => x, sp = true)
testIndyLambda((x: Boolean) => x)
testIndyLambda((x: Int) => "")
testIndyLambda((x1: Int, x2: Int) => 0d, sp = true)
testIndyLambda((x1: Int, x2: AnyRef) => 0d)
testIndyLambda((x1: Any, x2: Any) => x1)
// scala> println((for (i <- 3 to 22) yield (for (j <- 1 to i) yield s"x$j: Int").mkString(" testIndyLambda((", ", ", ") => x1)")).mkString("\\n"))
testIndyLambda((x1: Int, x2: Int, x3: Int) => x1)
testIndyLambda((x1: Int, x2: Int, x3: Int, x4: Int) => x1)
testIndyLambda((x1: Int, x2: Int, x3: Int, x4: Int, x5: Int) => x1)
testIndyLambda((x1: Int, x2: Int, x3: Int, x4: Int, x5: Int, x6: Int) => x1)
testIndyLambda((x1: Int, x2: Int, x3: Int, x4: Int, x5: Int, x6: Int, x7: Int) => x1)
testIndyLambda((x1: Int, x2: Int, x3: Int, x4: Int, x5: Int, x6: Int, x7: Int, x8: Int) => x1)
testIndyLambda((x1: Int, x2: Int, x3: Int, x4: Int, x5: Int, x6: Int, x7: Int, x8: Int, x9: Int) => x1)
testIndyLambda((x1: Int, x2: Int, x3: Int, x4: Int, x5: Int, x6: Int, x7: Int, x8: Int, x9: Int, x10: Int) => x1)
testIndyLambda((x1: Int, x2: Int, x3: Int, x4: Int, x5: Int, x6: Int, x7: Int, x8: Int, x9: Int, x10: Int, x11: Int) => x1)
testIndyLambda((x1: Int, x2: Int, x3: Int, x4: Int, x5: Int, x6: Int, x7: Int, x8: Int, x9: Int, x10: Int, x11: Int, x12: Int) => x1)
testIndyLambda((x1: Int, x2: Int, x3: Int, x4: Int, x5: Int, x6: Int, x7: Int, x8: Int, x9: Int, x10: Int, x11: Int, x12: Int, x13: Int) => x1)
testIndyLambda((x1: Int, x2: Int, x3: Int, x4: Int, x5: Int, x6: Int, x7: Int, x8: Int, x9: Int, x10: Int, x11: Int, x12: Int, x13: Int, x14: Int) => x1)
testIndyLambda((x1: Int, x2: Int, x3: Int, x4: Int, x5: Int, x6: Int, x7: Int, x8: Int, x9: Int, x10: Int, x11: Int, x12: Int, x13: Int, x14: Int, x15: Int) => x1)
testIndyLambda((x1: Int, x2: Int, x3: Int, x4: Int, x5: Int, x6: Int, x7: Int, x8: Int, x9: Int, x10: Int, x11: Int, x12: Int, x13: Int, x14: Int, x15: Int, x16: Int) => x1)
testIndyLambda((x1: Int, x2: Int, x3: Int, x4: Int, x5: Int, x6: Int, x7: Int, x8: Int, x9: Int, x10: Int, x11: Int, x12: Int, x13: Int, x14: Int, x15: Int, x16: Int, x17: Int) => x1)
testIndyLambda((x1: Int, x2: Int, x3: Int, x4: Int, x5: Int, x6: Int, x7: Int, x8: Int, x9: Int, x10: Int, x11: Int, x12: Int, x13: Int, x14: Int, x15: Int, x16: Int, x17: Int, x18: Int) => x1)
testIndyLambda((x1: Int, x2: Int, x3: Int, x4: Int, x5: Int, x6: Int, x7: Int, x8: Int, x9: Int, x10: Int, x11: Int, x12: Int, x13: Int, x14: Int, x15: Int, x16: Int, x17: Int, x18: Int, x19: Int) => x1)
testIndyLambda((x1: Int, x2: Int, x3: Int, x4: Int, x5: Int, x6: Int, x7: Int, x8: Int, x9: Int, x10: Int, x11: Int, x12: Int, x13: Int, x14: Int, x15: Int, x16: Int, x17: Int, x18: Int, x19: Int, x20: Int) => x1)
testIndyLambda((x1: Int, x2: Int, x3: Int, x4: Int, x5: Int, x6: Int, x7: Int, x8: Int, x9: Int, x10: Int, x11: Int, x12: Int, x13: Int, x14: Int, x15: Int, x16: Int, x17: Int, x18: Int, x19: Int, x20: Int, x21: Int) => x1)
testIndyLambda((x1: Int, x2: Int, x3: Int, x4: Int, x5: Int, x6: Int, x7: Int, x8: Int, x9: Int, x10: Int, x11: Int, x12: Int, x13: Int, x14: Int, x15: Int, x16: Int, x17: Int, x18: Int, x19: Int, x20: Int, x21: Int, x22: Int) => x1)
testAnonFunClass({
case x: Int => x
}: PartialFunction[Int, Int], sp = true)
testAnonFunClass({
case x: Int => x
}: PartialFunction[Any, Any])
testAnonFunClass({
case x: Int => ()
}: PartialFunction[Int, Unit], sp = true)
testAnonFunClass({
case x: String => 1
}: PartialFunction[String, Int])
testAnonFunClass({
case x: String => ()
}: PartialFunction[String, Unit])
testAnonFunClass({
case x: String => x
}: PartialFunction[String, String])
}
| scala/scala | test/files/run/t8960.scala | Scala | apache-2.0 | 4,630 |
// GENERATED CODE: DO NOT EDIT
package org.usagram.clarify
case class Validity2[+V1, +V2](_1: Definite[V1], _2: Definite[V2])
extends Validity with Product2[Definite[V1], Definite[V2]] {
val values = Seq(_1, _2)
def resolve[R](resolve: (V1, V2) => R): R =
if (isValid) {
resolve(_1.value, _2.value)
}
else {
throw new InvalidValueException(invalidValues)
}
}
| takkkun/clarify | core/src/main/scala/org/usagram/clarify/Validity2.scala | Scala | mit | 398 |
/*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package swave.core.text
import scala.concurrent.duration._
import scodec.bits.ByteVector
import swave.compat.scodec._
import swave.core._
import swave.core.util._
class TextTransformationSpec extends SwaveSpec {
implicit val env = StreamEnv()
import env.defaultDispatcher
val largeText =
"""Es war einmal, zur Zeit t=t0, ein armer rechtschaffener Vierpol namens Eddy Wirbelstrom. Er bewohnte einen
|bescheiden mΓΆbilierten Hohlraum im Dielektrikum mit flieΓend kalten und warmen SΓ€ttigungsstrom. Leider muΓte er
|wΓ€hrend der kalten Jahreszeit fΓΌr die ErwΓ€rmung der Sperrschicht noch extra bezahlen. Seinen Lebensunterhalt
|bestritt er mit einer VerstΓ€rkerzucht auf Transistorbasis.
|
|Eddy Wirbelstrom liebte mit der ganzen Kraft seiner Γbertragungsfunktion - Ionchen!
|
|Die induktivste Spule mit dem kleinsten Verlustwinkel im ganzen Kreise und Tochter der einfluΓreichen EMKs.
|Ihr remanenter FerritkΓΆrper, ihre symmetrischen Netzintegrale, ihre ΓΌberaus harmonischen Oberwellen - besonders
|der Sinus - beeindruckten selbst die SuszeptibilitΓ€t ausgedienter Leidener Flaschen, was viel heiΓen will.
|Die jungfrΓ€ulichen Kurven Ionchens waren auch wirklich sehr steil.
|
|Ionchens Vater, Cosinus Phi, ein bekannter industrieller Leistungsfaktor, hatte allerdings bereits konkrete
|SchaltplΓ€ne fΓΌr die Zukunft t >> t0 seiner Tochter. Sie sollte nur einer anerkannten KapazitΓ€t mit
|ausgeprΓ€gtem Nennwert angeschlossen werden, aber wie so oft wΓ€hrend der Lebensdauer L hatte auch diese Masche
|einen Knoten, denn der Zufallstrieb wollte es anders.
|
|Als Ionchen eines Tages, zur Zeit t=t1, auf ihrem Picofarad vom Frisiersalon nach Hause fuhr (sie hatte sich eine
|neue Stehwelle legen lassen), da geriet ihr ein SΓ€gezahn in die Siebkette. Aber Eddy Wirbelstrom, der die Gegend
|periodisch frequentierte, eilte mit minimaler Laufzeit hinzu, und es gelang ihm, Ionchens Kippschwingung noch
|vor dem Maximum der Amplitude abzufangen, gleichzurichten und so die Resonanzkatastrophe zu verhindern.
|
|Es ist sicherlich nicht dem Zufall z1 zuzuschreiben, daΓ sie sich schon zur Zeit t = t1 + dt wiedersahen.
|Eddy lud Ionchen zum Abendessen ins "Goldene Integral" ein. Aber das Integral war wie immer geschlossen.
|"Macht nichts", sagte Ionchen, "ich habe zu Mittag gut gegessen und die SΓ€ttigungsinduktion hat bis jetzt
|angehalten. AuΓerdem muΓ ich auf meine Feldlinie achten." Unter irgendeinem Vorwand lud Eddy sie dann zu einer
|Rundfahrt im Rotor ein. Aber Ionchen lehnte ab: "Mir wird bei der zweiten Ableitung immer so leicht ΓΌbel."
|So unternahmen sie, ganz entgegen den SchaltplΓ€nen von Vater Cosinus Phi, einen kleinen Frequenzgang entlang dem
|nahegelegenen StreufluΓ.
|
|Der Abend senkte sich ΓΌber die komplexe Ebene und im imaginΓ€ren Raum erglΓ€nzten die Sternschaltungen.
|Eddy und Ionchen genossen die Isolierung vom lauten Getriebe der Welt und lieΓen ihre Blicke gegen 0 laufen.
|Ein einsamer Modulationsbrummer flog vorbei, sanft plΓ€tscherten die elektromagnetischen Wellen und leise sang
|eine EntstΓΆrdrossel.
|
|Als sie an der Wheatston-BrΓΌcke angelangt waren, dort, wo der Blindstrom in den StreufluΓ mΓΌndet, lehnten sie
|sich ans Gitter. Da nahm Eddy Wirbelstrom seinen ganzen Durchgriff zusammen und emittierte: "Bei GauΓ!",
|worauf Ionchen hauchte: "Deine lose RΓΌckkopplung hat es mir angetan." Ihr Kilohertz schlug heftig.
|
|Der Informationsgehalt dieser Nachricht durchflutete Eddy. Die Summe ΓΌber alle Theta, von Theta = 0 bis zu diesem
|Ereignis war zu konvergent und beide entglitten der Kontrolle ihrer Zeitkonstanten.
|Im Γberschwange des jungen GlΓΌcks erreichten sie vollausgesteuert die Endstufen.
|Und wenn sie nicht gedΓ€mpft wurden, so schwingen sie heute noch.""".stripMargin.replace("\\r\\n", "\\n")
"TextTransformations" - {
"utf8decode" in {
for (_ <- 1 to 10) {
val random = XorShiftRandom()
Spout(largeText.getBytes(UTF8).iterator)
.injectSequential()
.flatMap(_.take(random.nextLong(32)).drainToVector(32).map(ByteVector(_)))
.utf8Decode
.async()
.drainToMkString(1000)
.await(3.seconds) shouldEqual largeText
}
}
"utf8encode" in {
for (_ <- 1 to 10) {
val random = XorShiftRandom()
Spout(largeText.iterator)
.injectSequential()
.flatMap(_.take(random.nextLong(32)).drainToMkString(32))
.utf8Encode
.async()
.drainFolding(ByteVector.empty)(_ ++ _)
.await() shouldEqual ByteVector(largeText getBytes UTF8)
}
}
"lines" in {
for (_ <- 1 to 1) {
val random = XorShiftRandom()
Spout(largeText.iterator)
.injectSequential()
.flatMap(_.take(random.nextLong(32)).drainToMkString(32))
.lines
.async()
.drainToVector(1000)
.await() shouldEqual largeText.split('\\n')
}
}
}
}
| sirthias/swave | core-tests/src/test/scala/swave/core/text/TextTransformationSpec.scala | Scala | mpl-2.0 | 5,412 |
package org.scalawiki.dto
import java.time.ZonedDateTime
import org.apache.commons.codec.digest.DigestUtils
case class Revision(
revId: Option[Long] = None,
pageId: Option[Long] = None,
parentId: Option[Long] = None,
user: Option[Contributor] = None,
timestamp: Option[ZonedDateTime] = None,
comment: Option[String] = None,
content: Option[String] = None,
size: Option[Long] = None,
sha1: Option[String] = None,
textId: Option[Long] = None
) {
// def this(revId: Int, parentId: Option[Int] = None, user: Option[Contributor] = None, timestamp: Option[DateTime] = None,
// comment: Option[String] = None, content: Option[String] = None, size: Option[Int] = None, sha1: Option[String] = None) = {
// this(revId.toLong, parentId.map(_.toLong), user, timestamp, comment, content, size, sha1)
// }
def id = revId
def isNewPage = parentId.contains(0)
def withContent(content: String*) = copy(content = Some(content.mkString("\\n")))
def withText(text: String*) = copy(content = Some(text.mkString("\\n")))
def withIds(revId: Long, parentId: Long = 0) = copy(revId = Some(revId), parentId = Some(parentId))
def withUser(userId: Long, login: String) = copy(user = Some(new User(Some(userId), Some(login))))
def withComment(comment: String) = copy(comment = Some(comment))
def withTimeStamp(timestamp: ZonedDateTime = ZonedDateTime.now) = copy(timestamp = Some(timestamp))
def withoutContent = copy(content = None)
}
object Revision {
def many(texts: String*) = texts
.zip(texts.size to 1 by -1)
.map{ case (text, index) =>
new Revision(
revId = Some(index),
pageId = Some(1L),
parentId = Some(index - 1),
content = Some(text),
size = Some(text.length),
sha1 = Some(DigestUtils.sha1Hex(text))
)
}
def one(text: String) =
new Revision(
revId = None,
pageId = None,
parentId = None,
content = Some(text),
size = Some(text.length),
sha1 = Some(DigestUtils.sha1Hex(text))
)
def apply(revId: Long, pageId: Long) = new Revision(Some(revId), Some(pageId), None)
}
| intracer/scalawiki | scalawiki-core/src/main/scala/org/scalawiki/dto/Revision.scala | Scala | apache-2.0 | 2,333 |
/**
* Copyright (c) 2002-2012 "Neo Technology,"
* Network Engine for Objects in Lund AB [http://neotechnology.com]
*
* This file is part of Neo4j.
*
* Neo4j is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.neo4j.cypher.internal.pipes
import org.neo4j.cypher.internal.symbols._
import org.neo4j.cypher.internal.commands.expressions.Expression
class ExtractPipe(source: Pipe, val expressions: Map[String, Expression]) extends PipeWithSource(source) {
val symbols: SymbolTable = {
val newIdentifiers = expressions.map {
case (name, expression) => name -> expression.getType(source.symbols)
}
source.symbols.add(newIdentifiers)
}
def createResults(state: QueryState) = source.createResults(state).map(subgraph => {
expressions.foreach {
case (name, expression) =>
subgraph += name -> expression(subgraph)
}
subgraph
})
override def executionPlan(): String = source.executionPlan() + "\\r\\nExtract([" + source.symbols.keys.mkString(",") + "] => [" + expressions.keys.mkString(", ") + "])"
def assertTypes(symbols: SymbolTable) {
expressions.foreach(_._2.assertTypes(symbols))
}
}
| dksaputra/community | cypher/src/main/scala/org/neo4j/cypher/internal/pipes/ExtractPipe.scala | Scala | gpl-3.0 | 1,746 |
package controllers
import play.api.mvc._
import play.twirl.api.Html
import lila.api.Context
import lila.app._
import lila.game.{ GameRepo, Game => GameModel, Pov }
import views._
object Tv extends LilaController {
def index = onChannel(lila.tv.Tv.Channel.Best.key)
def onChannel(chanKey: String) = Open { implicit ctx =>
(lila.tv.Tv.Channel.byKey get chanKey).fold(notFound)(lichessTv)
}
def sides(chanKey: String, gameId: String, color: String) = Open { implicit ctx =>
lila.tv.Tv.Channel.byKey get chanKey match {
case None => notFound
case Some(channel) =>
OptionFuResult(GameRepo.pov(gameId, color)) { pov =>
Env.tv.tv.getChampions zip
Env.game.crosstableApi(pov.game) map {
case (champions, crosstable) => Ok(html.tv.sides(channel, champions, pov, crosstable, streams = Nil))
}
}
}
}
private def lichessTv(channel: lila.tv.Tv.Channel)(implicit ctx: Context) =
OptionFuResult(Env.tv.tv getGame channel) { game =>
val flip = getBool("flip")
val pov = flip.fold(Pov second game, Pov first game)
val onTv = lila.round.OnTv(channel.key, flip)
negotiate(
html = {
Env.api.roundApi.watcher(pov, lila.api.Mobile.Api.currentVersion, tv = onTv.some) zip
Env.game.crosstableApi(game) zip
Env.tv.tv.getChampions map {
case ((data, cross), champions) =>
Ok(html.tv.index(channel, champions, pov, data, cross, flip))
}
},
api = apiVersion => Env.api.roundApi.watcher(pov, apiVersion, tv = onTv.some) map { Ok(_) }
)
}
def games = gamesChannel(lila.tv.Tv.Channel.Best.key)
def gamesChannel(chanKey: String) = Open { implicit ctx =>
(lila.tv.Tv.Channel.byKey get chanKey).fold(notFound)(lichessGames)
}
private def lichessGames(channel: lila.tv.Tv.Channel)(implicit ctx: Context) =
Env.tv.tv.getChampions zip
Env.tv.tv.getGames(channel, 9) map {
case (champs, games) =>
Ok(html.tv.games(channel, games map lila.game.Pov.first, champs))
}
def streamIn(id: String) = Open { implicit ctx =>
Env.tv.streamsOnAir flatMap { streams =>
streams find (_.id == id) match {
case None => notFound
case Some(s) => fuccess(Ok(html.tv.stream(s, streams filterNot (_.id == id))))
}
}
}
def streamOut = Action.async {
import makeTimeout.short
import akka.pattern.ask
import lila.round.TvBroadcast
import play.api.libs.EventSource
implicit val encoder = play.api.libs.Comet.CometMessage.jsonMessages
Env.round.tvBroadcast ? TvBroadcast.GetEnumerator mapTo
manifest[TvBroadcast.EnumeratorType] map { enum =>
Ok.chunked(enum &> EventSource()).as("text/event-stream")
}
}
def streamConfig = Auth { implicit ctx =>
me =>
Env.tv.streamerList.store.get.map { text =>
Ok(html.tv.streamConfig(Env.tv.streamerList.form.fill(text)))
}
}
def streamConfigSave = SecureBody(_.StreamConfig) { implicit ctx =>
me =>
implicit val req = ctx.body
FormFuResult(Env.tv.streamerList.form) { err =>
fuccess(html.tv.streamConfig(err))
} { text =>
Env.tv.streamerList.store.set(text) >>
Env.mod.logApi.streamConfig(me.username) inject Redirect(routes.Tv.streamConfig)
}
}
def embed = Action { req =>
Ok {
val bg = get("bg", req) | "light"
val theme = get("theme", req) | "brown"
val url = s"""${req.domain + routes.Tv.frame}?bg=$bg&theme=$theme"""
s"""document.write("<iframe src='http://$url&embed=" + document.domain + "' class='lichess-tv-iframe' allowtransparency='true' frameBorder='0' style='width: 224px; height: 264px;' title='Lichess free online chess'></iframe>");"""
} as JAVASCRIPT withHeaders (CACHE_CONTROL -> "max-age=86400")
}
def frame = Action.async { req =>
Env.tv.tv.getBest map {
case None => NotFound
case Some(game) => Ok(views.html.tv.embed(
Pov first game,
get("bg", req) | "light",
lila.pref.Theme(~get("theme", req)).cssClass
))
}
}
}
| TangentialAlan/lila | app/controllers/Tv.scala | Scala | mit | 4,177 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.api.test
import java.util.Properties
import java.lang.{Integer, IllegalArgumentException}
import org.apache.kafka.clients.producer._
import org.scalatest.junit.JUnit3Suite
import org.junit.Test
import org.junit.Assert._
import kafka.server.{KafkaConfig, KafkaServer}
import kafka.utils.{Utils, TestUtils}
import kafka.zk.ZooKeeperTestHarness
import kafka.consumer.SimpleConsumer
import kafka.api.FetchRequestBuilder
import kafka.message.Message
class ProducerSendTest extends JUnit3Suite with ZooKeeperTestHarness {
private val brokerId1 = 0
private val brokerId2 = 1
private val ports = TestUtils.choosePorts(2)
private val (port1, port2) = (ports(0), ports(1))
private var server1: KafkaServer = null
private var server2: KafkaServer = null
private var servers = List.empty[KafkaServer]
private var consumer1: SimpleConsumer = null
private var consumer2: SimpleConsumer = null
private val props1 = TestUtils.createBrokerConfig(brokerId1, port1)
private val props2 = TestUtils.createBrokerConfig(brokerId2, port2)
props1.put("num.partitions", "4")
props2.put("num.partitions", "4")
private val config1 = new KafkaConfig(props1)
private val config2 = new KafkaConfig(props2)
private val topic = "topic"
private val numRecords = 100
override def setUp() {
super.setUp()
// set up 2 brokers with 4 partitions each
server1 = TestUtils.createServer(config1)
server2 = TestUtils.createServer(config2)
servers = List(server1,server2)
// TODO: we need to migrate to new consumers when 0.9 is final
consumer1 = new SimpleConsumer("localhost", port1, 100, 1024*1024, "")
consumer2 = new SimpleConsumer("localhost", port2, 100, 1024*1024, "")
}
override def tearDown() {
server1.shutdown
server2.shutdown
Utils.rm(server1.config.logDirs)
Utils.rm(server2.config.logDirs)
super.tearDown()
}
class CheckErrorCallback extends Callback {
def onCompletion(metadata: RecordMetadata, exception: Exception) {
if (exception != null)
fail("Send callback returns the following exception", exception)
}
}
/**
* testSendOffset checks the basic send API behavior
*
* 1. Send with null key/value/partition-id should be accepted; send with null topic should be rejected.
* 2. Last message of the non-blocking send should return the correct offset metadata
*/
@Test
def testSendOffset() {
var producer = TestUtils.createNewProducer(TestUtils.getBrokerListStrFromConfigs(Seq(config1, config2)))
val callback = new CheckErrorCallback
try {
// create topic
TestUtils.createTopic(zkClient, topic, 1, 2, servers)
// send a normal record
val record0 = new ProducerRecord(topic, new Integer(0), "key".getBytes, "value".getBytes)
assertEquals("Should have offset 0", 0L, producer.send(record0, callback).get.offset)
// send a record with null value should be ok
val record1 = new ProducerRecord(topic, new Integer(0), "key".getBytes, null)
assertEquals("Should have offset 1", 1L, producer.send(record1, callback).get.offset)
// send a record with null key should be ok
val record2 = new ProducerRecord(topic, new Integer(0), null, "value".getBytes)
assertEquals("Should have offset 2", 2L, producer.send(record2, callback).get.offset)
// send a record with null part id should be ok
val record3 = new ProducerRecord(topic, null, "key".getBytes, "value".getBytes)
assertEquals("Should have offset 3", 3L, producer.send(record3, callback).get.offset)
// send a record with null topic should fail
try {
val record4 = new ProducerRecord(null, new Integer(0), "key".getBytes, "value".getBytes)
producer.send(record4, callback)
fail("Should not allow sending a record without topic")
} catch {
case iae: IllegalArgumentException => // this is ok
case e: Throwable => fail("Only expecting IllegalArgumentException", e)
}
// non-blocking send a list of records
for (i <- 1 to numRecords)
producer.send(record0)
// check that all messages have been acked via offset
assertEquals("Should have offset " + (numRecords + 4), numRecords + 4L, producer.send(record0, callback).get.offset)
} finally {
if (producer != null) {
producer.close()
producer = null
}
}
}
/**
* testClose checks the closing behavior
*
* After close() returns, all messages should be sent with correct returned offset metadata
*/
@Test
def testClose() {
var producer = TestUtils.createNewProducer(TestUtils.getBrokerListStrFromConfigs(Seq(config1, config2)))
try {
// create topic
TestUtils.createTopic(zkClient, topic, 1, 2, servers)
// non-blocking send a list of records
val record0 = new ProducerRecord(topic, null, "key".getBytes, "value".getBytes)
for (i <- 1 to numRecords)
producer.send(record0)
val response0 = producer.send(record0)
// close the producer
producer.close()
producer = null
// check that all messages have been acked via offset,
// this also checks that messages with same key go to the same partition
assertTrue("The last message should be acked before producer is shutdown", response0.isDone)
assertEquals("Should have offset " + numRecords, numRecords.toLong, response0.get.offset)
} finally {
if (producer != null) {
producer.close()
producer = null
}
}
}
/**
* testSendToPartition checks the partitioning behavior
*
* The specified partition-id should be respected
*/
@Test
def testSendToPartition() {
var producer = TestUtils.createNewProducer(TestUtils.getBrokerListStrFromConfigs(Seq(config1, config2)))
try {
// create topic
val leaders = TestUtils.createTopic(zkClient, topic, 2, 2, servers)
val partition = 1
// make sure leaders exist
val leader1 = leaders(partition)
assertTrue("Leader for topic \\"topic\\" partition 1 should exist", leader1.isDefined)
val responses =
for (i <- 1 to numRecords)
yield producer.send(new ProducerRecord(topic, partition, null, ("value" + i).getBytes))
val futures = responses.toList
futures.map(_.get)
for (future <- futures)
assertTrue("Request should have completed", future.isDone)
// make sure all of them end up in the same partition with increasing offset values
for ((future, offset) <- futures zip (0 until numRecords)) {
assertEquals(offset.toLong, future.get.offset)
assertEquals(topic, future.get.topic)
assertEquals(partition, future.get.partition)
}
// make sure the fetched messages also respect the partitioning and ordering
val fetchResponse1 = if(leader1.get == server1.config.brokerId) {
consumer1.fetch(new FetchRequestBuilder().addFetch(topic, partition, 0, Int.MaxValue).build())
} else {
consumer2.fetch(new FetchRequestBuilder().addFetch(topic, partition, 0, Int.MaxValue).build())
}
val messageSet1 = fetchResponse1.messageSet(topic, partition).iterator.toBuffer
assertEquals("Should have fetched " + numRecords + " messages", numRecords, messageSet1.size)
// TODO: also check topic and partition after they are added in the return messageSet
for (i <- 0 to numRecords - 1) {
assertEquals(new Message(bytes = ("value" + (i + 1)).getBytes), messageSet1(i).message)
assertEquals(i.toLong, messageSet1(i).offset)
}
} finally {
if (producer != null) {
producer.close()
producer = null
}
}
}
/**
* testAutoCreateTopic
*
* The topic should be created upon sending the first message
*/
@Test
def testAutoCreateTopic() {
var producer = TestUtils.createNewProducer(TestUtils.getBrokerListStrFromConfigs(Seq(config1, config2)),
retries = 5)
try {
// Send a message to auto-create the topic
val record = new ProducerRecord(topic, null, "key".getBytes, "value".getBytes)
assertEquals("Should have offset 0", 0L, producer.send(record).get.offset)
// double check that the topic is created with leader elected
TestUtils.waitUntilLeaderIsElectedOrChanged(zkClient, topic, 0)
} finally {
if (producer != null) {
producer.close()
producer = null
}
}
}
} | stealthly/kafka | core/src/test/scala/integration/kafka/api/ProducerSendTest.scala | Scala | apache-2.0 | 9,345 |
package com.containant.heuristics
import scala.reflect.ClassTag
import scala.collection.mutable
import com.containant.LBNF
trait RandomHeuristic extends Heuristic {
val _iterations: Int = 100
val _recursionDepth: Int = 10
val RNG: java.util.Random = new java.util.Random(0xDEADBEEF)
def apply(tgrammar: LBNF)(
tfitness: tgrammar.SyntaxTree => Double,
target: tgrammar.Sort
): Option[tgrammar.SyntaxTree] = {
val process = new Process {
override val grammar = tgrammar
override val fitness = tfitness.asInstanceOf[grammar.SyntaxTree => Double]
}
process.iterate(target.asInstanceOf[process.grammar.Sort]).asInstanceOf[Option[tgrammar.SyntaxTree]]
}
trait Process {
val grammar: LBNF
val fitness: grammar.SyntaxTree => Double
private type Sort = grammar.Sort
private type Label = grammar.Label
def choose(sort: Sort): Label = {
val labels = grammar.labels(sort)
// println( s"Choose sort: $sort, labels: ${labels}" )
labels(RNG.nextInt(labels.size))
}
def chooseDeep(sort: Sort): Label = {
val labels = grammar.labels(sort).filter( l => grammar.rule(l).length <= 1 )
if (labels.size == 0) throw new Exception("Recursion depth exceeded, cannot continue on " + sort)
labels(RNG.nextInt(labels.size))
}
def step(depth: Int, sort: Sort): grammar.SyntaxTree = {
val label = if (depth < _recursionDepth) choose(sort) else chooseDeep(sort)
val subtrees = grammar.rule(label).map { s =>
step(depth+1,s)
}
grammar.SyntaxTree(label, subtrees)
}
def iterate(sort: Sort): Option[grammar.SyntaxTree] = {
var best: Option[grammar.SyntaxTree] = None
var fbest: Double = 0
for(i <- 1 to _iterations) {
val current = step(0,sort)
val fcurrent = fitness(current)
if( fcurrent > fbest ) {
best = Some(current)
fbest = fcurrent
}
}
best
}
} // end process
}
| zaklogician/ContainAnt-devel | src/main/scala/com/containant/heuristics/RandomHeuristic.scala | Scala | bsd-3-clause | 2,036 |
package lila.app
package templating
import lila.api.Context
import play.api.data._
import play.twirl.api.Html
trait FormHelper { self: I18nHelper =>
private val errNames = Map(
"error.minLength" -> trans.textIsTooShort,
"error.maxLength" -> trans.textIsTooLong,
"captcha.fail" -> trans.notACheckmate)
def errMsg(form: Field)(implicit ctx: Context): Html = errMsg(form.errors)
def errMsg(form: Form[_])(implicit ctx: Context): Html = errMsg(form.errors)
def errMsg(errors: Seq[FormError])(implicit ctx: Context): Html = Html {
errors map { e =>
val msg = transKey(e.message, e.args) match {
case m if m == e.message => errNames.get(e.message).fold(e.message)(_.str())
case m => m
}
s"""<p class="error">$msg</p>"""
} mkString
}
val booleanChoices = Seq("true" -> "Yes", "false" -> "No")
}
| clarkerubber/lila | app/templating/FormHelper.scala | Scala | agpl-3.0 | 879 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.util
import org.apache.spark.sql.{CarbonEnv, SparkSession}
import org.apache.spark.sql.catalyst.TableIdentifier
import org.apache.carbondata.api.CarbonStore
/**
* delete segments by id list
*/
// scalastyle:off
object DeleteSegmentById {
def extractSegmentIds(segmentIds: String): Seq[String] = {
segmentIds.split(",").toSeq
}
def deleteSegmentById(spark: SparkSession, dbName: String, tableName: String,
segmentIds: Seq[String]): Unit = {
TableAPIUtil.validateTableExists(spark, dbName, tableName)
val carbonTable = CarbonEnv.getCarbonTable(Some(dbName), tableName)(spark)
CarbonStore.deleteLoadById(segmentIds, dbName, tableName, carbonTable)
}
def main(args: Array[String]): Unit = {
if (args.length < 3) {
System.err.println(
"Usage: DeleteSegmentByID <store path> <table name> <segment id list>")
System.exit(1)
}
val storePath = TableAPIUtil.escape(args(0))
val (dbName, tableName) = TableAPIUtil.parseSchemaName(TableAPIUtil.escape(args(1)))
val segmentIds = extractSegmentIds(TableAPIUtil.escape(args(2)))
val spark = TableAPIUtil.spark(storePath, s"DeleteSegmentById: $dbName.$tableName")
deleteSegmentById(spark, dbName, tableName, segmentIds)
}
}
| jackylk/incubator-carbondata | integration/spark/src/main/scala/org/apache/spark/util/DeleteSegmentById.scala | Scala | apache-2.0 | 2,077 |
// Load CMT tables from the datafiles generated
// Start the spark shell using
// ./spark-shell --master spark://localhost:7077 --packages com.databricks:spark-csv_2.11:1.2.0 --executor-memory 4g --driver-memory 1g
// sc is an existing SparkContext.
val sqlContext = new org.apache.spark.sql.SQLContext(sc)
// this is used to implicitly convert an RDD to a DataFrame.
import sqlContext.implicits._
import org.apache.spark.sql.SaveMode
val PATH = "hdfs://localhost:9000/raw"
sqlContext.sql(s"""CREATE TEMPORARY TABLE mapmatch_history (mh_id int, mh_dataset_id string,
mh_uploadtime string, mh_runtime string,
mh_trip_start string, mh_data_count_minutes string,
mh_data_count_accel_samples string, mh_data_count_netloc_samples string,
mh_data_count_gps_samples string, mh_observed_sample_rate string,
mh_distance_mapmatched_km string, mh_distance_gps_km string,
mh_ground_truth_present string, mh_timing_mapmatch string,
mh_distance_pct_path_error string, mh_build_version string,
mh_timing_queue_wait string, mh_data_trip_length string,
mh_battery_maximum_level string, mh_battery_minimum_level string,
mh_battery_drain_rate_per_hour string, mh_battery_plugged_duration_hours string,
mh_battery_delay_from_drive_end_seconds string, mh_startlat string,
mh_startlon string, mh_endlat string,
mh_endlon string, mh_data_count_output_gps_speeding_points string,
mh_speeding_slow_gps_points string, mh_speeding_10kmh_gps_points string,
mh_speeding_20kmh_gps_points string, mh_speeding_40kmh_gps_points string,
mh_speeding_80kmh_gps_points string, mh_output_accel_valid_minutes string,
mh_output_gps_moving_minutes string, mh_output_gps_moving_and_accel_valid_minutes string,
mh_data_time_till_first_gps_minutes string, mh_score_di_accel string,
mh_score_di_brake string, mh_score_di_turn string,
mh_score_di_car_motion string, mh_score_di_phone_motion string,
mh_score_di_speeding string, mh_score_di_night string,
mh_star_rating string, mh_trip_end string,
mh_score_di_car_motion_with_accel string, mh_score_di_car_motion_with_speeding string,
mh_score_di_distance_km_with_accel string, mh_score_di_distance_km_with_speeding string,
mh_score_accel_per_sec_ntile string, mh_score_brake_per_sec_ntile string,
mh_score_turn_per_sec_ntile string, mh_score_speeding_per_sec_ntile string,
mh_score_phone_motion_per_sec_ntile string, mh_score_accel_per_km_ntile string,
mh_score_brake_per_km_ntile string, mh_score_turn_per_km_ntile string,
mh_score_speeding_per_km_ntile string, mh_score_phone_motion_per_km_ntile string,
mh_score string, mh_distance_prepended_km string,
mh_recording_start string, mh_score_di_distance_km string,
mh_recording_end string, mh_recording_startlat string,
mh_recording_startlon string, mh_display_distance_km string,
mh_display_trip_start string, mh_display_startlat string,
mh_display_startlon string, mh_data_count_gyro_samples string,
mh_star_rating_accel string, mh_star_rating_brake string,
mh_star_rating_turn string, mh_star_rating_speeding string,
mh_star_rating_phone_motion string, mh_is_night string,
mh_battery_total_drain string, mh_battery_total_drain_duration_hours string,
mh_score_smoothness string, mh_score_awareness string,
mh_star_rating_night string, mh_star_rating_smoothness string,
mh_star_rating_awareness string, mh_hide string,
mh_data_count_tag_accel_samples string, mh_quat_i string,
mh_quat_j string, mh_quat_k string,
mh_quat_r string, mh_passenger_star_rating string,
mh_suspension_damping_ratio string, mh_suspension_natural_frequency string,
mh_suspension_fit_error string, mh_driving string,
mh_trip_mode string, mh_classification_confidence string,
mh_gk_trip_mode string, mh_gk_confidence string,
mh_offroad_trip_mode string, mh_offroad_confidence string,
mh_driver_confidence string, mh_timing_processing_preprocessing string,
mh_timing_processing_gatekeeper string, mh_timing_processing_accelpipeline string,
mh_timing_processing_offroad string, mh_timing_processing_suspension string,
mh_timing_processing_scoring string, mh_timing_processing_hitchhiker string,
mh_data_count_obd_samples string, mh_data_count_pressure_samples string,
mh_raw_sampling_mode string, mh_data_count_magnetometer_samples string,
mh_location_disabled_date string)
USING com.databricks.spark.csv
OPTIONS (path "$PATH/mh.tbl", header "false", delimiter "\\t")""")
sqlContext.sql(s"""CREATE TEMPORARY TABLE mapmatch_history_latest (mhl_dataset_id int, mhl_mapmatch_history_id int
)
USING com.databricks.spark.csv
OPTIONS (path "$PATH/mhl.tbl", header "false", delimiter "\\t")""")
sqlContext.sql(s"""CREATE TEMPORARY TABLE sf_datasets (sf_id int, sf_uploadtime string,
sf_deviceid string, sf_driveid string,
sf_state string, sf_dest_server string,
sf_companyid string, sf_hardware_manufacturer string,
sf_hardware_model string, sf_hardware_bootloader string,
sf_hardware_build string, sf_hardware_carrier string,
sf_android_fw_version string, sf_android_api_version string,
sf_android_codename string, sf_android_baseband string,
sf_raw_hardware_string string, sf_raw_os_string string,
sf_utc_offset_with_dst string, sf_app_version string,
sf_file_format string, sf_start_reason string,
sf_stop_reason string, sf_previous_driveid string,
sf_userid string, sf_tag_mac_address string,
sf_tag_trip_number string, sf_primary_driver_app_user_id string,
sf_tag_last_connection_number string, sf_gps_points_lsh_key_1 string,
sf_gps_points_lsh_key_2 string, sf_gps_points_lsh_key_3 string,
sf_hidden_by_support string)
USING com.databricks.spark.csv
OPTIONS (path "$PATH/sf.tbl", header "false", delimiter "\\t")""")
//INFO: Query_sf:sf|6:INT:30:EQ
val a = sqlContext.sql(s"""SELECT COUNT(*)
FROM mapmatch_history JOIN mapmatch_history_latest ON mh_id = mhl_mapmatch_history_id JOIN sf_datasets ON sf_id = mhl_dataset_id
WHERE sf_companyid = 30""")
// 35
//INFO: Query_MH:mh|85:STRING:f:LEQ
//INFO: Query_sf:sf|6:INT:41:EQ
val b = sqlContext.sql(s"""SELECT COUNT(*)
FROM mapmatch_history JOIN mapmatch_history_latest ON mh_id = mhl_mapmatch_history_id JOIN sf_datasets ON sf_id = mhl_dataset_id
WHERE sf_companyid = 41 and mh_hide <= "f" """)
// 4
//INFO: Query_MH:mh|112:STRING:f:LEQ
//INFO: Query_sf:sf|6:INT:41:EQ
val c = sqlContext.sql(s"""SELECT COUNT(*)
FROM mapmatch_history JOIN mapmatch_history_latest ON mh_id = mhl_mapmatch_history_id JOIN sf_datasets ON sf_id = mhl_dataset_id
WHERE sf_companyid = 41 and mh_raw_sampling_mode <= "f" """)
// 7
//INFO: Query_MH:mh|112:STRING:f:LEQ;85:STRING:f:LEQ
//INFO: Query_sf:sf|6:INT:41:EQ
val d = sqlContext.sql(s"""SELECT COUNT(*)
FROM mapmatch_history JOIN mapmatch_history_latest ON mh_id = mhl_mapmatch_history_id JOIN sf_datasets ON sf_id = mhl_dataset_id
WHERE sf_companyid = 41 and mh_hide <= "f" and mh_raw_sampling_mode <= "f" """)
// 2
//INFO: Query_MH:mh|85:STRING:f:LEQ
//INFO: Query_sf:sf|6:INT:39:EQ
val e = sqlContext.sql(s"""SELECT COUNT(*)
FROM mapmatch_history JOIN mapmatch_history_latest ON mh_id = mhl_mapmatch_history_id JOIN sf_datasets ON sf_id = mhl_dataset_id
WHERE sf_companyid = 39 and mh_hide <= "f" """)
// 4 | mitdbg/mdindex | src/test/scala/cmt_join.scala | Scala | mit | 7,192 |
package com.github.vonnagy.service.container.core
import java.util.concurrent.TimeUnit
import akka.actor.ActorSystem
import com.github.vonnagy.service.container.log.LoggingAdapter
import scala.concurrent.Await
import scala.concurrent.duration.Duration
import scala.sys.ShutdownHookThread
/**
* This trait implements the termination handler to stop the system when the JVM exits.
*/
trait SystemShutdown extends LoggingAdapter {
def system: ActorSystem
/**
* Ensure that the constructed ActorSystem is shut down when the JVM shuts down
*/
var shutdownHook: Option[ShutdownHookThread] = Some(sys.addShutdownHook {
log.info("Shutdown hook called: Shutting down the actor system")
shutdownActorSystem(true) {}
})
/**
* Shutdown the actor system
*/
private[container] def shutdownActorSystem(fromHook: Boolean = false)(f: => Unit): Unit = {
try {
// Remove the hook
if (shutdownHook.isDefined && !fromHook) {
shutdownHook.get.remove
}
shutdownHook = None
log.info("Shutting down the actor system")
system.terminate()
// Wait for termination if it is not already complete
Await.result(system.whenTerminated, Duration.apply(30, TimeUnit.SECONDS))
log.info("The actor system has terminated")
}
catch {
case t: Throwable =>
log.error(s"The actor system could not be shutdown: ${t.getMessage}", t)
}
// Call the passed function
f
}
}
| vonnagy/service-container | service-container/src/main/scala/com/github/vonnagy/service/container/core/SystemShutdown.scala | Scala | apache-2.0 | 1,480 |
package applicant.ml.rnn
import org.deeplearning4j.nn.api.Model
import org.deeplearning4j.nn.multilayer.MultiLayerNetwork
import org.deeplearning4j.optimize.api.IterationListener
import org.nd4j.linalg.factory.Nd4j
import org.slf4j.{Logger, LoggerFactory}
import org.apache.commons.io.FileUtils
import java.io.{DataOutputStream, File}
import java.nio.file.{Files, Paths}
/**
* IterationListener that logs the current score and stores the current model state to file.
*/
class PersistIterationListener(json: String, coefficients: String) extends IterationListener {
val log: Logger = LoggerFactory.getLogger(getClass())
var isInvoked: Boolean = false
var iterCount: Long = 0
var bestScore: Double = -1
var threshold: Int = 60
/**
* {@inheritDoc}
*/
def invoked(): Boolean = {
return isInvoked
}
/**
* {@inheritDoc}
*/
def invoke() {
this.isInvoked = true
}
/**
* {@inheritDoc}
*/
def iterationDone(model: Model, iteration: Int) {
invoke()
val score: Double = model.score()
log.info("Score at iteration " + iterCount + " is " + score)
// Check and persist model
save(model, score)
iterCount += 1
}
/**
* Saves a MultiLayerNetwork to file. This method will only save the model if the current
* score is below a threshold and it's the best score this method has seend.
*
* @param model NN model
* @param score current model score
*/
def save(model: Model, score: Double) {
if (model.isInstanceOf[MultiLayerNetwork]) {
val net: MultiLayerNetwork = model.asInstanceOf[MultiLayerNetwork]
if (score < threshold && (score < bestScore || bestScore == -1)) {
log.info("Persisting model parameters to: " + coefficients)
log.info("Persisting model configuration to: " + json)
log.info("Model score is: " + score)
bestScore = score
try {
// Write the network parameters:
val dos: DataOutputStream = new DataOutputStream(Files.newOutputStream(Paths.get(coefficients)))
Nd4j.write(net.params(), dos)
// Write the network configuration:
FileUtils.write(new File(json), net.getLayerWiseConfigurations().toJson())
}
catch {
case ex: Exception =>
log.error(ex.getMessage(), ex)
}
}
}
}
}
| dataworks/internship-2016 | etl/src/scala/applicant/ml/rnn/PersistIterationListener.scala | Scala | apache-2.0 | 2,597 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib.linalg
import java.util
import java.lang.{Double => JavaDouble, Integer => JavaInteger, Iterable => JavaIterable}
import scala.annotation.varargs
import scala.collection.JavaConverters._
import breeze.linalg.{DenseVector => BDV, SparseVector => BSV, Vector => BV}
import org.apache.spark.SparkException
import org.apache.spark.annotation.DeveloperApi
import org.apache.spark.mllib.util.NumericParser
import org.apache.spark.sql.Row
import org.apache.spark.sql.catalyst.expressions.GenericMutableRow
import org.apache.spark.sql.types._
/**
* Represents a numeric vector, whose index type is Int and value type is Double.
*
* Note: Users should not implement this interface.
*/
@SQLUserDefinedType(udt = classOf[VectorUDT])
sealed trait Vector extends Serializable {
/**
* Size of the vector.
*/
def size: Int
/**
* Converts the instance to a double array.
*/
def toArray: Array[Double]
override def equals(other: Any): Boolean = {
other match {
case v2: Vector =>
if (this.size != v2.size) return false
(this, v2) match {
case (s1: SparseVector, s2: SparseVector) =>
Vectors.equals(s1.indices, s1.values, s2.indices, s2.values)
case (s1: SparseVector, d1: DenseVector) =>
Vectors.equals(s1.indices, s1.values, 0 until d1.size, d1.values)
case (d1: DenseVector, s1: SparseVector) =>
Vectors.equals(0 until d1.size, d1.values, s1.indices, s1.values)
case (_, _) => util.Arrays.equals(this.toArray, v2.toArray)
}
case _ => false
}
}
/**
* Returns a hash code value for the vector. The hash code is based on its size and its nonzeros
* in the first 16 entries, using a hash algorithm similar to [[java.util.Arrays.hashCode]].
*/
override def hashCode(): Int = {
// This is a reference implementation. It calls return in foreachActive, which is slow.
// Subclasses should override it with optimized implementation.
var result: Int = 31 + size
this.foreachActive { (index, value) =>
if (index < 16) {
// ignore explicit 0 for comparison between sparse and dense
if (value != 0) {
result = 31 * result + index
val bits = java.lang.Double.doubleToLongBits(value)
result = 31 * result + (bits ^ (bits >>> 32)).toInt
}
} else {
return result
}
}
result
}
/**
* Converts the instance to a breeze vector.
*/
private[spark] def toBreeze: BV[Double]
/**
* Gets the value of the ith element.
* @param i index
*/
def apply(i: Int): Double = toBreeze(i)
/**
* Makes a deep copy of this vector.
*/
def copy: Vector = {
throw new NotImplementedError(s"copy is not implemented for ${this.getClass}.")
}
/**
* Applies a function `f` to all the active elements of dense and sparse vector.
*
* @param f the function takes two parameters where the first parameter is the index of
* the vector with type `Int`, and the second parameter is the corresponding value
* with type `Double`.
*/
private[spark] def foreachActive(f: (Int, Double) => Unit)
/**
* Number of active entries. An "active entry" is an element which is explicitly stored,
* regardless of its value. Note that inactive entries have value 0.
*/
def numActives: Int
/**
* Number of nonzero elements. This scans all active values and count nonzeros.
*/
def numNonzeros: Int
/**
* Converts this vector to a sparse vector with all explicit zeros removed.
*/
def toSparse: SparseVector
/**
* Converts this vector to a dense vector.
*/
def toDense: DenseVector = new DenseVector(this.toArray)
/**
* Returns a vector in either dense or sparse format, whichever uses less storage.
*/
def compressed: Vector = {
val nnz = numNonzeros
// A dense vector needs 8 * size + 8 bytes, while a sparse vector needs 12 * nnz + 20 bytes.
if (1.5 * (nnz + 1.0) < size) {
toSparse
} else {
toDense
}
}
}
/**
* :: DeveloperApi ::
*
* User-defined type for [[Vector]] which allows easy interaction with SQL
* via [[org.apache.spark.sql.DataFrame]].
*
* NOTE: This is currently private[spark] but will be made public later once it is stabilized.
*/
@DeveloperApi
private[spark] class VectorUDT extends UserDefinedType[Vector] {
override def sqlType: StructType = {
// type: 0 = sparse, 1 = dense
// We only use "values" for dense vectors, and "size", "indices", and "values" for sparse
// vectors. The "values" field is nullable because we might want to add binary vectors later,
// which uses "size" and "indices", but not "values".
StructType(Seq(
StructField("type", ByteType, nullable = false),
StructField("size", IntegerType, nullable = true),
StructField("indices", ArrayType(IntegerType, containsNull = false), nullable = true),
StructField("values", ArrayType(DoubleType, containsNull = false), nullable = true)))
}
override def serialize(obj: Any): Row = {
obj match {
case SparseVector(size, indices, values) =>
val row = new GenericMutableRow(4)
row.setByte(0, 0)
row.setInt(1, size)
row.update(2, indices.toSeq)
row.update(3, values.toSeq)
row
case DenseVector(values) =>
val row = new GenericMutableRow(4)
row.setByte(0, 1)
row.setNullAt(1)
row.setNullAt(2)
row.update(3, values.toSeq)
row
// TODO: There are bugs in UDT serialization because we don't have a clear separation between
// TODO: internal SQL types and language specific types (including UDT). UDT serialize and
// TODO: deserialize may get called twice. See SPARK-7186.
case row: Row =>
row
}
}
override def deserialize(datum: Any): Vector = {
datum match {
case row: Row =>
require(row.length == 4,
s"VectorUDT.deserialize given row with length ${row.length} but requires length == 4")
val tpe = row.getByte(0)
tpe match {
case 0 =>
val size = row.getInt(1)
val indices = row.getAs[Iterable[Int]](2).toArray
val values = row.getAs[Iterable[Double]](3).toArray
new SparseVector(size, indices, values)
case 1 =>
val values = row.getAs[Iterable[Double]](3).toArray
new DenseVector(values)
}
// TODO: There are bugs in UDT serialization because we don't have a clear separation between
// TODO: internal SQL types and language specific types (including UDT). UDT serialize and
// TODO: deserialize may get called twice. See SPARK-7186.
case v: Vector =>
v
}
}
override def pyUDT: String = "pyspark.mllib.linalg.VectorUDT"
override def userClass: Class[Vector] = classOf[Vector]
override def equals(o: Any): Boolean = {
o match {
case v: VectorUDT => true
case _ => false
}
}
override def hashCode: Int = 7919
override def typeName: String = "vector"
private[spark] override def asNullable: VectorUDT = this
}
/**
* Factory methods for [[org.apache.spark.mllib.linalg.Vector]].
* We don't use the name `Vector` because Scala imports
* [[scala.collection.immutable.Vector]] by default.
*/
object Vectors {
/**
* Creates a dense vector from its values.
*/
@varargs
def dense(firstValue: Double, otherValues: Double*): Vector =
new DenseVector((firstValue +: otherValues).toArray)
// A dummy implicit is used to avoid signature collision with the one generated by @varargs.
/**
* Creates a dense vector from a double array.
*/
def dense(values: Array[Double]): Vector = new DenseVector(values)
/**
* Creates a sparse vector providing its index array and value array.
*
* @param size vector size.
* @param indices index array, must be strictly increasing.
* @param values value array, must have the same length as indices.
*/
def sparse(size: Int, indices: Array[Int], values: Array[Double]): Vector =
new SparseVector(size, indices, values)
/**
* Creates a sparse vector using unordered (index, value) pairs.
*
* @param size vector size.
* @param elements vector elements in (index, value) pairs.
*/
def sparse(size: Int, elements: Seq[(Int, Double)]): Vector = {
require(size > 0, "The size of the requested sparse vector must be greater than 0.")
val (indices, values) = elements.sortBy(_._1).unzip
var prev = -1
indices.foreach { i =>
require(prev < i, s"Found duplicate indices: $i.")
prev = i
}
require(prev < size, s"You may not write an element to index $prev because the declared " +
s"size of your vector is $size")
new SparseVector(size, indices.toArray, values.toArray)
}
/**
* Creates a sparse vector using unordered (index, value) pairs in a Java friendly way.
*
* @param size vector size.
* @param elements vector elements in (index, value) pairs.
*/
def sparse(size: Int, elements: JavaIterable[(JavaInteger, JavaDouble)]): Vector = {
sparse(size, elements.asScala.map { case (i, x) =>
(i.intValue(), x.doubleValue())
}.toSeq)
}
/**
* Creates a vector of all zeros.
*
* @param size vector size
* @return a zero vector
*/
def zeros(size: Int): Vector = {
new DenseVector(new Array[Double](size))
}
/**
* Parses a string resulted from [[Vector.toString]] into a [[Vector]].
*/
def parse(s: String): Vector = {
parseNumeric(NumericParser.parse(s))
}
private[mllib] def parseNumeric(any: Any): Vector = {
any match {
case values: Array[Double] =>
Vectors.dense(values)
case Seq(size: Double, indices: Array[Double], values: Array[Double]) =>
Vectors.sparse(size.toInt, indices.map(_.toInt), values)
case other =>
throw new SparkException(s"Cannot parse $other.")
}
}
/**
* Creates a vector instance from a breeze vector.
*/
private[spark] def fromBreeze(breezeVector: BV[Double]): Vector = {
breezeVector match {
case v: BDV[Double] =>
if (v.offset == 0 && v.stride == 1 && v.length == v.data.length) {
new DenseVector(v.data)
} else {
new DenseVector(v.toArray) // Can't use underlying array directly, so make a new one
}
case v: BSV[Double] =>
if (v.index.length == v.used) {
new SparseVector(v.length, v.index, v.data)
} else {
new SparseVector(v.length, v.index.slice(0, v.used), v.data.slice(0, v.used))
}
case v: BV[_] =>
sys.error("Unsupported Breeze vector type: " + v.getClass.getName)
}
}
/**
* Returns the p-norm of this vector.
* @param vector input vector.
* @param p norm.
* @return norm in L^p^ space.
*/
def norm(vector: Vector, p: Double): Double = {
require(p >= 1.0, "To compute the p-norm of the vector, we require that you specify a p>=1. " +
s"You specified p=$p.")
val values = vector match {
case DenseVector(vs) => vs
case SparseVector(n, ids, vs) => vs
case v => throw new IllegalArgumentException("Do not support vector type " + v.getClass)
}
val size = values.length
if (p == 1) {
var sum = 0.0
var i = 0
while (i < size) {
sum += math.abs(values(i))
i += 1
}
sum
} else if (p == 2) {
var sum = 0.0
var i = 0
while (i < size) {
sum += values(i) * values(i)
i += 1
}
math.sqrt(sum)
} else if (p == Double.PositiveInfinity) {
var max = 0.0
var i = 0
while (i < size) {
val value = math.abs(values(i))
if (value > max) max = value
i += 1
}
max
} else {
var sum = 0.0
var i = 0
while (i < size) {
sum += math.pow(math.abs(values(i)), p)
i += 1
}
math.pow(sum, 1.0 / p)
}
}
/**
* Returns the squared distance between two Vectors.
* @param v1 first Vector.
* @param v2 second Vector.
* @return squared distance between two Vectors.
*/
def sqdist(v1: Vector, v2: Vector): Double = {
require(v1.size == v2.size, s"Vector dimensions do not match: Dim(v1)=${v1.size} and Dim(v2)" +
s"=${v2.size}.")
var squaredDistance = 0.0
(v1, v2) match {
case (v1: SparseVector, v2: SparseVector) =>
val v1Values = v1.values
val v1Indices = v1.indices
val v2Values = v2.values
val v2Indices = v2.indices
val nnzv1 = v1Indices.length
val nnzv2 = v2Indices.length
var kv1 = 0
var kv2 = 0
while (kv1 < nnzv1 || kv2 < nnzv2) {
var score = 0.0
if (kv2 >= nnzv2 || (kv1 < nnzv1 && v1Indices(kv1) < v2Indices(kv2))) {
score = v1Values(kv1)
kv1 += 1
} else if (kv1 >= nnzv1 || (kv2 < nnzv2 && v2Indices(kv2) < v1Indices(kv1))) {
score = v2Values(kv2)
kv2 += 1
} else {
score = v1Values(kv1) - v2Values(kv2)
kv1 += 1
kv2 += 1
}
squaredDistance += score * score
}
case (v1: SparseVector, v2: DenseVector) =>
squaredDistance = sqdist(v1, v2)
case (v1: DenseVector, v2: SparseVector) =>
squaredDistance = sqdist(v2, v1)
case (DenseVector(vv1), DenseVector(vv2)) =>
var kv = 0
val sz = vv1.length
while (kv < sz) {
val score = vv1(kv) - vv2(kv)
squaredDistance += score * score
kv += 1
}
case _ =>
throw new IllegalArgumentException("Do not support vector type " + v1.getClass +
" and " + v2.getClass)
}
squaredDistance
}
/**
* Returns the squared distance between DenseVector and SparseVector.
*/
private[mllib] def sqdist(v1: SparseVector, v2: DenseVector): Double = {
var kv1 = 0
var kv2 = 0
val indices = v1.indices
var squaredDistance = 0.0
val nnzv1 = indices.length
val nnzv2 = v2.size
var iv1 = if (nnzv1 > 0) indices(kv1) else -1
while (kv2 < nnzv2) {
var score = 0.0
if (kv2 != iv1) {
score = v2(kv2)
} else {
score = v1.values(kv1) - v2(kv2)
if (kv1 < nnzv1 - 1) {
kv1 += 1
iv1 = indices(kv1)
}
}
squaredDistance += score * score
kv2 += 1
}
squaredDistance
}
/**
* Check equality between sparse/dense vectors
*/
private[mllib] def equals(
v1Indices: IndexedSeq[Int],
v1Values: Array[Double],
v2Indices: IndexedSeq[Int],
v2Values: Array[Double]): Boolean = {
val v1Size = v1Values.length
val v2Size = v2Values.length
var k1 = 0
var k2 = 0
var allEqual = true
while (allEqual) {
while (k1 < v1Size && v1Values(k1) == 0) k1 += 1
while (k2 < v2Size && v2Values(k2) == 0) k2 += 1
if (k1 >= v1Size || k2 >= v2Size) {
return k1 >= v1Size && k2 >= v2Size // check end alignment
}
allEqual = v1Indices(k1) == v2Indices(k2) && v1Values(k1) == v2Values(k2)
k1 += 1
k2 += 1
}
allEqual
}
}
/**
* A dense vector represented by a value array.
*/
@SQLUserDefinedType(udt = classOf[VectorUDT])
class DenseVector(val values: Array[Double]) extends Vector {
override def size: Int = values.length
override def toString: String = values.mkString("[", ",", "]")
override def toArray: Array[Double] = values
private[spark] override def toBreeze: BV[Double] = new BDV[Double](values)
override def apply(i: Int): Double = values(i)
override def copy: DenseVector = {
new DenseVector(values.clone())
}
private[spark] override def foreachActive(f: (Int, Double) => Unit) = {
var i = 0
val localValuesSize = values.length
val localValues = values
while (i < localValuesSize) {
f(i, localValues(i))
i += 1
}
}
override def hashCode(): Int = {
var result: Int = 31 + size
var i = 0
val end = math.min(values.length, 16)
while (i < end) {
val v = values(i)
if (v != 0.0) {
result = 31 * result + i
val bits = java.lang.Double.doubleToLongBits(values(i))
result = 31 * result + (bits ^ (bits >>> 32)).toInt
}
i += 1
}
result
}
override def numActives: Int = size
override def numNonzeros: Int = {
// same as values.count(_ != 0.0) but faster
var nnz = 0
values.foreach { v =>
if (v != 0.0) {
nnz += 1
}
}
nnz
}
override def toSparse: SparseVector = {
val nnz = numNonzeros
val ii = new Array[Int](nnz)
val vv = new Array[Double](nnz)
var k = 0
foreachActive { (i, v) =>
if (v != 0) {
ii(k) = i
vv(k) = v
k += 1
}
}
new SparseVector(size, ii, vv)
}
/**
* Find the index of a maximal element. Returns the first maximal element in case of a tie.
* Returns -1 if vector has length 0.
*/
private[spark] def argmax: Int = {
if (size == 0) {
-1
} else {
var maxIdx = 0
var maxValue = values(0)
var i = 1
while (i < size) {
if (values(i) > maxValue) {
maxIdx = i
maxValue = values(i)
}
i += 1
}
maxIdx
}
}
}
object DenseVector {
/** Extracts the value array from a dense vector. */
def unapply(dv: DenseVector): Option[Array[Double]] = Some(dv.values)
}
/**
* A sparse vector represented by an index array and an value array.
*
* @param size size of the vector.
* @param indices index array, assume to be strictly increasing.
* @param values value array, must have the same length as the index array.
*/
@SQLUserDefinedType(udt = classOf[VectorUDT])
class SparseVector(
override val size: Int,
val indices: Array[Int],
val values: Array[Double]) extends Vector {
require(indices.length == values.length, "Sparse vectors require that the dimension of the" +
s" indices match the dimension of the values. You provided ${indices.length} indices and " +
s" ${values.length} values.")
override def toString: String =
s"($size,${indices.mkString("[", ",", "]")},${values.mkString("[", ",", "]")})"
override def toArray: Array[Double] = {
val data = new Array[Double](size)
var i = 0
val nnz = indices.length
while (i < nnz) {
data(indices(i)) = values(i)
i += 1
}
data
}
override def copy: SparseVector = {
new SparseVector(size, indices.clone(), values.clone())
}
private[spark] override def toBreeze: BV[Double] = new BSV[Double](indices, values, size)
private[spark] override def foreachActive(f: (Int, Double) => Unit) = {
var i = 0
val localValuesSize = values.length
val localIndices = indices
val localValues = values
while (i < localValuesSize) {
f(localIndices(i), localValues(i))
i += 1
}
}
override def hashCode(): Int = {
var result: Int = 31 + size
val end = values.length
var continue = true
var k = 0
while ((k < end) & continue) {
val i = indices(k)
if (i < 16) {
val v = values(k)
if (v != 0.0) {
result = 31 * result + i
val bits = java.lang.Double.doubleToLongBits(v)
result = 31 * result + (bits ^ (bits >>> 32)).toInt
}
} else {
continue = false
}
k += 1
}
result
}
override def numActives: Int = values.length
override def numNonzeros: Int = {
var nnz = 0
values.foreach { v =>
if (v != 0.0) {
nnz += 1
}
}
nnz
}
override def toSparse: SparseVector = {
val nnz = numNonzeros
if (nnz == numActives) {
this
} else {
val ii = new Array[Int](nnz)
val vv = new Array[Double](nnz)
var k = 0
foreachActive { (i, v) =>
if (v != 0.0) {
ii(k) = i
vv(k) = v
k += 1
}
}
new SparseVector(size, ii, vv)
}
}
}
object SparseVector {
def unapply(sv: SparseVector): Option[(Int, Array[Int], Array[Double])] =
Some((sv.size, sv.indices, sv.values))
}
| andrewor14/iolap | mllib/src/main/scala/org/apache/spark/mllib/linalg/Vectors.scala | Scala | apache-2.0 | 21,167 |
/*
* Copyright (c) 2016 Tinkoff
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ru.tinkoff.aerospikeexamples.example
import com.aerospike.client.Host
import com.aerospike.client.async.{AsyncClient, AsyncClientPolicy}
import com.typesafe.config.{Config, ConfigFactory}
import ru.tinkoff.aerospike.dsl.SpikeImpl
import ru.tinkoff.aerospikemacro.domain.DBCredentials
import scala.concurrent.ExecutionContext
import scala.util.{Failure, Success, Try}
/**
* @author MarinaSigaeva
* @since 20.10.16
*/
object AClient {
val config: Config = ConfigFactory.load()
val hosts: List[String] = Try(List(config.getString("ru-tinkoff-aerospike-dsl.example-host")))
.getOrElse(throw new Exception("Add host for aerospike in application.conf file"))
val port: Int = Try(config.getInt("ru-tinkoff-aerospike-dsl.example-port"))
.getOrElse(throw new Exception("Add host for aerospike in application.conf file"))
val namespace: String = Try(config.getString("ru-tinkoff-aerospike-dsl.keyWrapper-namespace"))
.getOrElse(throw new Exception("Add namespace for aerospike in application.conf file"))
val setName: String = Try(config.getString("ru-tinkoff-aerospike-dsl.keyWrapper-setName"))
.getOrElse(throw new Exception("Add setName for aerospike in application.conf file"))
def dbc = DBCredentials(namespace, setName)
def client: AsyncClient = create(hosts)
def create(hs: List[String]): AsyncClient =
Try(new AsyncClient(new AsyncClientPolicy, hs.map(new Host(_, port)): _*)) match {
case Success(c) => c
case Failure(th) => throw th
}
def spikeImpl(implicit ex: ExecutionContext) = new SpikeImpl(client)(ex)
}
| TinkoffCreditSystems/aerospike-scala | aerospike-scala-example/src/main/scala/ru/tinkoff/aerospikeexamples/example/AClient.scala | Scala | apache-2.0 | 2,183 |
/*
* Copyright (c) 2014 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the Apache License Version 2.0 for the specific language governing permissions and limitations there under.
*/
package com.snowplowanalytics
package snowplow
package enrich
package common
package enrichments
package registry
// Java
import java.net.URI
import java.lang.{Byte => JByte}
// Apache Commons Codec
import org.apache.commons.codec.binary.Base64
// Scalaz
import scalaz._
import Scalaz._
// json4s
import org.json4s.jackson.JsonMethods.parse
// Iglu
import com.snowplowanalytics.iglu.client.SchemaKey
// Scala-Forex
import com.snowplowanalytics.forex.oerclient.DeveloperAccount
// Specs2
import org.specs2.mutable.Specification
import org.specs2.scalaz.ValidationMatchers
/**
* Tests enrichmentConfigs
*/
class EnrichmentConfigsSpec extends Specification with ValidationMatchers {
"Parsing a valid anon_ip enrichment JSON" should {
"successfully construct an AnonIpEnrichment case class" in {
val ipAnonJson = parse("""{
"enabled": true,
"parameters": {
"anonOctets": 2
}
}""")
val schemaKey = SchemaKey("com.snowplowanalytics.snowplow", "anon_ip", "jsonschema", "1-0-0")
val result = AnonIpEnrichment.parse(ipAnonJson, schemaKey)
result must beSuccessful(AnonIpEnrichment(AnonOctets(2)))
}
}
"Parsing a valid ip_lookups enrichment JSON" should {
"successfully construct a GeoIpEnrichment case class" in {
val ipToGeoJson = parse("""{
"enabled": true,
"parameters": {
"geo": {
"database": "GeoIPCity.dat",
"uri": "http://snowplow-hosted-assets.s3.amazonaws.com/third-party/maxmind"
},
"isp": {
"database": "GeoIPISP.dat",
"uri": "http://snowplow-hosted-assets.s3.amazonaws.com/third-party/maxmind"
}
}
}""")
val schemaKey = SchemaKey("com.snowplowanalytics.snowplow", "ip_lookups", "jsonschema", "1-0-0")
val expected = IpLookupsEnrichment(Some("geo", new URI("http://snowplow-hosted-assets.s3.amazonaws.com/third-party/maxmind/GeoIPCity.dat"), "GeoIPCity.dat"),
Some("isp", new URI("http://snowplow-hosted-assets.s3.amazonaws.com/third-party/maxmind/GeoIPISP.dat"), "GeoIPISP.dat"),
None, None, None, true)
val result = IpLookupsEnrichment.parse(ipToGeoJson, schemaKey, true)
result must beSuccessful(expected)
}
}
"Parsing a valid referer_parser enrichment JSON" should {
"successfully construct a RefererParserEnrichment case class" in {
val refererParserJson = parse("""{
"enabled": true,
"parameters": {
"internalDomains": [
"www.subdomain1.snowplowanalytics.com",
"www.subdomain2.snowplowanalytics.com"
]
}
}""")
val schemaKey = SchemaKey("com.snowplowanalytics.snowplow", "referer_parser", "jsonschema", "1-0-0")
val expected = RefererParserEnrichment(List("www.subdomain1.snowplowanalytics.com", "www.subdomain2.snowplowanalytics.com"))
val result = RefererParserEnrichment.parse(refererParserJson, schemaKey)
result must beSuccessful(expected)
}
}
"Parsing a valid campaign_attribution enrichment JSON" should {
"successfully construct a CampaignAttributionEnrichment case class" in {
val campaignAttributionEnrichmentJson = parse("""{
"enabled": true,
"parameters": {
"mapping": "static",
"fields": {
"mktMedium": ["utm_medium", "medium"],
"mktSource": ["utm_source", "source"],
"mktTerm": ["utm_term"],
"mktContent": [],
"mktCampaign": ["utm _ campaign", "CID", "legacy-campaign!?-`@#$%^&*()=\\\\][}{/.,<>~|"],
"mktClickId": {
"customclid": "Custom",
"gclid": "Override"
}
}
}
}""")
val schemaKey = SchemaKey("com.snowplowanalytics.snowplow", "campaign_attribution", "jsonschema", "1-0-0")
val expected = CampaignAttributionEnrichment(
List("utm_medium", "medium"),
List("utm_source", "source"),
List("utm_term"),
List(),
List("utm _ campaign", "CID", "legacy-campaign!?-`@#$%^&*()=\\\\][}{/.,<>~|"),
List(
"gclid" -> "Override",
"msclkid" -> "Microsoft",
"dclid" -> "DoubleClick",
"customclid" -> "Custom"
)
)
val result = CampaignAttributionEnrichment.parse(campaignAttributionEnrichmentJson, schemaKey)
result must beSuccessful(expected)
}
}
"Parsing a valid user_agent_utils_config enrichment JSON" should {
"successfully construct a UserAgentUtilsEnrichment case object" in {
val userAgentUtilsEnrichmentJson = parse("""{
"enabled": true,
"parameters": {
}
}""")
val schemaKey = SchemaKey("com.snowplowanalytics.snowplow", "user_agent_utils_config", "jsonschema", "1-0-0")
val result = UserAgentUtilsEnrichmentConfig.parse(userAgentUtilsEnrichmentJson, schemaKey)
result must beSuccessful(UserAgentUtilsEnrichment)
}
}
"Parsing a valid ua_parser_config enrichment JSON" should {
"successfully construct a UaParserEnrichment case object" in {
val uaParserEnrichmentJson = parse("""{
"enabled": true,
"parameters": {
}
}""")
val schemaKey = SchemaKey("com.snowplowanalytics.snowplow", "ua_parser_config", "jsonschema", "1-0-0")
val result = UaParserEnrichmentConfig.parse(uaParserEnrichmentJson, schemaKey)
result must beSuccessful(UaParserEnrichment)
}
}
"Parsing a valid currency_convert_config enrichment JSON" should {
"successfully construct a CurrencyConversionEnrichment case object" in {
val currencyConversionEnrichmentJson = parse("""{
"enabled": true,
"parameters": {
"accountType": "DEVELOPER",
"apiKey": "---",
"baseCurrency": "EUR",
"rateAt": "EOD_PRIOR"
}
}""")
val schemaKey = SchemaKey("com.snowplowanalytics.snowplow", "currency_conversion_config", "jsonschema", "1-0-0")
val result = CurrencyConversionEnrichmentConfig.parse(currencyConversionEnrichmentJson, schemaKey)
result must beSuccessful(CurrencyConversionEnrichment(DeveloperAccount, "---", "EUR", "EOD_PRIOR"))
}
}
"Parsing a valid javascript_script_config enrichment JSON" should {
"successfully construct a JavascriptScriptEnrichment case class" in {
val script =
s"""|function process(event) {
| return [];
|}
|""".stripMargin
val javascriptScriptEnrichmentJson = {
val encoder = new Base64(true)
val encoded = new String(encoder.encode(script.getBytes)).trim // Newline being appended by some Base64 versions
parse(s"""{
"enabled": true,
"parameters": {
"script": "${encoded}"
}
}""")
}
val schemaKey = SchemaKey("com.snowplowanalytics.snowplow", "javascript_script_config", "jsonschema", "1-0-0")
// val expected = JavascriptScriptEnrichment(JavascriptScriptEnrichmentConfig.compile(script).toOption.get)
val result = JavascriptScriptEnrichmentConfig.parse(javascriptScriptEnrichmentJson, schemaKey)
result must beSuccessful // TODO: check the result's contents by evaluating some JavaScript
}
}
}
| conversionlogic/snowplow | 3-enrich/scala-common-enrich/src/test/scala/com.snowplowanalytics.snowplow.enrich.common/enrichments/registry/EnrichmentConfigsSpec.scala | Scala | apache-2.0 | 8,135 |
package io.apibuilder.rewriter
import io.apibuilder.builders.{ApiBuilderServiceBuilders, MultiServiceBuilders}
import io.apibuilder.spec.v0.models.{Operation, Resource}
import org.scalatest.matchers.must.Matchers
import org.scalatest.wordspec.AnyWordSpec
class FilterOperationsRewriterSpec extends AnyWordSpec with Matchers
with ApiBuilderServiceBuilders
with MultiServiceBuilders
{
private[this] def op(attributeName: Option[String]): Operation = {
makeOperation(
attributes = attributeName.toSeq.map { n => makeAttribute(n) },
)
}
// Filters resources based on presence of an attribute
private[this] def rewrite(resources: Seq[Resource]) = {
FilterOperationsRewriter { op =>
Some(op).filter(_.attributes.nonEmpty)
}.rewrite(
makeMultiService(
makeService(resources = resources)
)
).services().map(_.service).flatMap(_.resources)
}
"operations" must {
"remove resources when all their operations are filtered" in {
rewrite(
Seq(
makeResource(operations = Seq(op(None)))
)
) must be(Nil)
}
"keeps resources when all their operations are accepted" in {
rewrite(
Seq(
makeResource(operations = Seq(op(Some(random()))))
)
) match {
case r :: Nil => {
r.operations.size must be(1)
}
case other => sys.error(s"Expected 1 resource but found ${other.size}")
}
}
"keeps resources when at least one operation has an attribute" in {
rewrite(
Seq(
makeResource(operations = Seq(
op(None),
op(Some(random())))
)
)
) match {
case r :: Nil => {
r.operations.size must be(1)
r.operations.head.attributes.size must be(1)
}
case other => sys.error(s"Expected 1 resource but found ${other.size}")
}
}
}
}
| flowcommerce/lib-apidoc-json-validation | src/test/scala/io/apibuilder/rewriter/FilterOperationsRewriterSpec.scala | Scala | mit | 1,926 |
package com.arcusys.learn.liferay.update.version270.slide
import com.arcusys.valamis.persistence.common.DbNameUtils._
import scala.slick.driver.JdbcProfile
trait SlideTableComponent {
protected val driver: JdbcProfile
import driver.simple._
case class SlideSet(id: Option[Long] = None,
title: String,
description: String,
courseId: Long,
logo: Option[String] = None,
isTemplate: Boolean = false,
isSelectedContinuity: Boolean = false,
themeId: Option[Long] = None,
duration: Option[Long] = None,
scoreLimit: Option[Double] = None,
topDownNavigation:Boolean = false)
class SlideSetTable(tag: Tag) extends Table[SlideSet](tag, tblName("SLIDE_SET")) {
def id = column[Long]("ID", O.PrimaryKey, O.AutoInc)
def title = column[String]("TITLE")
def description = column[String]("DESCRIPTION", O.DBType(varCharMax))
def courseId = column[Long]("COURSE_ID")
def logo = column[Option[String]]("LOGO")
def isTemplate = column[Boolean]("IS_TEMPLATE")
def isSelectedContinuity = column[Boolean]("IS_SELECTED_CONTINUITY")
def themeId = column[Option[Long]]("THEME_ID")
def duration = column[Option[Long]]("DURATION")
def scoreLimit = column[Option[Double]]("SCORE_LIMIT")
def topDownNavigation = column[Boolean]("TOP_DOWN_NAVIGATION")
def * = (id.?, title, description, courseId, logo, isTemplate, isSelectedContinuity, themeId, duration, scoreLimit, topDownNavigation) <>(SlideSet.tupled, SlideSet.unapply)
}
case class Slide(id: Option[Long] = None,
title: String,
bgColor: Option[String] = None,
bgImage: Option[String] = None,
font: Option[String] = None,
questionFont: Option[String] = None,
answerFont: Option[String] = None,
answerBg: Option[String] = None,
duration: Option[String] = None,
leftSlideId: Option[Long] = None,
topSlideId: Option[Long] = None,
slideSetId: Long,
statementVerb: Option[String] = None,
statementObject: Option[String] = None,
statementCategoryId: Option[String] = None,
isTemplate: Boolean = false,
isLessonSummary: Boolean = false)
class SlideTable(tag: Tag) extends Table[Slide](tag, tblName("SLIDE")) {
def id = column[Long]("ID", O.PrimaryKey, O.AutoInc)
def title = column[String]("TITLE")
def bgColor = column[Option[String]]("BG_COLOR")
def bgImage = column[Option[String]]("BG_IMAGE")
def font = column[Option[String]]("FONT")
def questionFont = column[Option[String]]("QUESTION_FONT")
def answerFont = column[Option[String]]("ANSWER_FONT")
def answerBg = column[Option[String]]("ANSWER_BG")
def duration = column[Option[String]]("DURATION")
def leftSlideId = column[Option[Long]]("LEFT_SLIDE_ID")
def topSlideId = column[Option[Long]]("TOP_SLIDE_ID")
def slideSetId = column[Long]("SLIDE_SET_ID")
def statementVerb = column[Option[String]]("STATEMENT_VERB")
def statementObject = column[Option[String]]("STATEMENT_OBJECT")
def statementCategoryId = column[Option[String]]("STATEMENT_CATEGORY_ID")
def isTemplate = column[Boolean]("IS_TEMPLATE")
def isLessonSummary = column[Boolean]("IS_LESSON_SUMMARY")
def * = (
id.?,
title,
bgColor,
bgImage,
font,
questionFont,
answerFont,
answerBg,
duration,
leftSlideId,
topSlideId,
slideSetId,
statementVerb,
statementObject,
statementCategoryId,
isTemplate,
isLessonSummary) <>(Slide.tupled, Slide.unapply)
}
case class SlideElement(id: Option[Long] = None,
zIndex: String,
content: String,
slideEntityType: String,
slideId: Long,
correctLinkedSlideId: Option[Long] = None,
incorrectLinkedSlideId: Option[Long] = None,
notifyCorrectAnswer: Option[Boolean] = None)
class SlideElementTable(tag : Tag) extends Table[SlideElement](tag, tblName("SLIDE_ELEMENT")) {
def id = column[Long]("ID", O.PrimaryKey, O.AutoInc)
def zIndex = column[String]("Z_INDEX")
def content = column[String]("CONTENT", O.DBType(varCharMax))
def slideEntityType = column[String]("SLIDE_ENTITY_TYPE")
def slideId = column[Long]("SLIDE_ID")
def correctLinkedSlideId = column[Option[Long]]("CORRECT_LINKED_SLIDE_ID")
def incorrectLinkedSlideId = column[Option[Long]]("INCORRECT_LINKED_SLIDE_ID")
def notifyCorrectAnswer = column[Option[Boolean]]("NOTIFY_CORRECT_ANSWER")
def * = (
id.?,
zIndex,
content,
slideEntityType,
slideId,
correctLinkedSlideId,
incorrectLinkedSlideId,
notifyCorrectAnswer) <>(SlideElement.tupled, SlideElement.unapply)
}
case class SlideElementProperty(slideElementId: Long,
deviceId: Long,
key: String,
value: String)
class SlideElementPropertyTable(tag : Tag) extends Table[SlideElementProperty](tag, tblName("SLIDE_ELEMENT_PROPERTY")) {
def slideElementId = column[Long]("SLIDE_ELEMENT_ID")
def deviceId = column[Long]("DEVICE_ID")
def key = column[String]("DATA_KEY", O.Length(254, true))
def value = column[String]("DATA_VALUE", O.Length(254, true))
def pk = primaryKey("PK_PROPERTY", (slideElementId, deviceId, key))
def * = (slideElementId, deviceId, key, value) <> (SlideElementProperty.tupled, SlideElementProperty.unapply)
}
val slideElements = TableQuery[SlideElementTable]
val slideSets = TableQuery[SlideSetTable]
val slides = TableQuery[SlideTable]
val slideElementProperties = TableQuery[SlideElementPropertyTable]
}
| igor-borisov/valamis | learn-portlet/src/main/scala/com/arcusys/learn/liferay/update/version270/slide/SlideTableComponent.scala | Scala | gpl-3.0 | 6,188 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.streaming.dstream
import scala.reflect.ClassTag
import org.apache.spark.rdd.RDD
import org.apache.spark.streaming.{Duration, Time}
import org.apache.spark.streaming.scheduler.Job
/**
* An internal DStream used to represent output operations like DStream.foreachRDD.
* @param parent Parent DStream
* @param foreachFunc Function to apply on each RDD generated by the parent DStream
* @param displayInnerRDDOps Whether the detailed callsites and scopes of the RDDs generated
* by `foreachFunc` will be displayed in the UI; only the scope and
* callsite of `DStream.foreachRDD` will be displayed.
*/
private[streaming]
class ForEachDStream[T: ClassTag] (
parent: DStream[T],
foreachFunc: (RDD[T], Time) => Unit,
displayInnerRDDOps: Boolean
) extends DStream[Unit](parent.ssc) {
override def dependencies: List[DStream[_]] = List(parent)
override def slideDuration: Duration = parent.slideDuration
override def compute(validTime: Time): Option[RDD[Unit]] = None
override def generateJob(time: Time): Option[Job] = {
parent.getOrCompute(time) match {
case Some(rdd) =>
val jobFunc = () => createRDDWithLocalProperties(time, displayInnerRDDOps) {
foreachFunc(rdd, time)
}
Some(new Job(time, jobFunc))
case None => None
}
}
}
| bravo-zhang/spark | streaming/src/main/scala/org/apache/spark/streaming/dstream/ForEachDStream.scala | Scala | apache-2.0 | 2,198 |
package io.reactors
package remote
import io.reactors.common.Cell
import io.reactors.marshal.Marshalee
import io.reactors.test._
import org.scalacheck.Prop.forAllNoShrink
import org.scalacheck.Properties
import org.scalatest.FunSuite
import scala.collection._
class RuntimeMarshalerTest extends FunSuite {
test("marshal empty non-final class") {
val buffer = DataBuffer.streaming(128)
RuntimeMarshaler.marshal(new NonFinalEmpty, buffer)
println(buffer.input.byteString)
val obj = RuntimeMarshaler.unmarshal[NonFinalEmpty](buffer)
assert(obj.isInstanceOf[NonFinalEmpty])
}
test("marshal empty final class") {
val buffer = DataBuffer.streaming(128)
RuntimeMarshaler.marshal(new FinalEmpty, buffer)
println(buffer.input.byteString)
val obj = RuntimeMarshaler.unmarshal[FinalEmpty](buffer)
assert(obj.isInstanceOf[FinalEmpty])
}
test("marshal single integer field non-final class") {
val buffer = DataBuffer.streaming(128)
RuntimeMarshaler.marshal(new NonFinalSingleInt(15), buffer)
println(buffer.input.byteString)
val obj = RuntimeMarshaler.unmarshal[NonFinalSingleInt](buffer)
assert(obj.x == 15)
}
test("marshal single integer field final class") {
val buffer = DataBuffer.streaming(128)
RuntimeMarshaler.marshal(new FinalSingleInt(15), buffer)
println(buffer.input.byteString)
val obj = RuntimeMarshaler.unmarshal[FinalSingleInt](buffer)
assert(obj.x == 15)
}
test("marshal single long field class") {
val buffer = DataBuffer.streaming(128)
RuntimeMarshaler.marshal(new SingleLong(15), buffer)
println(buffer.input.byteString)
val obj = RuntimeMarshaler.unmarshal[SingleLong](buffer)
assert(obj.x == 15)
}
test("marshal single int field class, when buffer is small") {
val buffer = DataBuffer.streaming(128)
RuntimeMarshaler.marshal(new FinalSingleInt(15), buffer)
println(buffer.input.byteString)
val obj = RuntimeMarshaler.unmarshal[FinalSingleInt](buffer)
assert(obj.x == 15)
}
test("marshal single long field class, when buffer is small") {
val buffer = DataBuffer.streaming(128)
RuntimeMarshaler.marshal(new SingleLong(15), buffer)
println(buffer.input.byteString)
val obj = RuntimeMarshaler.unmarshal[SingleLong](buffer)
assert(obj.x == 15)
}
test("marshal single double field class") {
val buffer = DataBuffer.streaming(128)
RuntimeMarshaler.marshal(new SingleDouble(15.0), buffer)
println(buffer.input.byteString)
val obj = RuntimeMarshaler.unmarshal[SingleDouble](buffer)
assert(obj.x == 15.0)
}
test("marshal single double field class, when buffer is small") {
val buffer = DataBuffer.streaming(128)
RuntimeMarshaler.marshal(new SingleDouble(15.0), buffer)
println(buffer.input.byteString)
val obj = RuntimeMarshaler.unmarshal[SingleDouble](buffer)
assert(obj.x == 15.0)
}
test("marshal single float field class") {
val buffer = DataBuffer.streaming(128)
RuntimeMarshaler.marshal(new SingleFloat(15.0f), buffer)
println(buffer.input.byteString)
val obj = RuntimeMarshaler.unmarshal[SingleFloat](buffer)
assert(obj.x == 15.0f)
}
test("marshal single float field class, when buffer is small") {
val buffer = DataBuffer.streaming(128)
RuntimeMarshaler.marshal(new SingleFloat(15.0f), buffer)
println(buffer.input.byteString)
val obj = RuntimeMarshaler.unmarshal[SingleFloat](buffer)
assert(obj.x == 15.0f)
}
test("marshal single byte field class") {
val buffer = DataBuffer.streaming(128)
RuntimeMarshaler.marshal(new SingleByte(7), buffer)
println(buffer.input.byteString)
val obj = RuntimeMarshaler.unmarshal[SingleByte](buffer)
assert(obj.x == 7)
}
test("marshal single boolean field class") {
val buffer = DataBuffer.streaming(128)
RuntimeMarshaler.marshal(new SingleBoolean(true), buffer)
println(buffer.input.byteString)
val obj = RuntimeMarshaler.unmarshal[SingleBoolean](buffer)
assert(obj.x == true)
}
test("marshal single char field class") {
val buffer = DataBuffer.streaming(128)
RuntimeMarshaler.marshal(new SingleChar('a'), buffer)
println(buffer.input.byteString)
val obj = RuntimeMarshaler.unmarshal[SingleChar](buffer)
assert(obj.x == 'a')
}
test("marshal single short field class") {
val buffer = DataBuffer.streaming(128)
RuntimeMarshaler.marshal(new SingleShort(17), buffer)
println(buffer.input.byteString)
val obj = RuntimeMarshaler.unmarshal[SingleShort](buffer)
assert(obj.x == 17)
}
test("marshal mixed primitive field class") {
val buffer = DataBuffer.streaming(128)
RuntimeMarshaler.marshal(new MixedPrimitives(17, 9, 2.1, true, 8.11f, 'd'), buffer)
println(buffer.input.byteString)
val obj = RuntimeMarshaler.unmarshal[MixedPrimitives](buffer)
assert(obj.x == 17)
assert(obj.y == 9)
assert(obj.z == 2.1)
assert(obj.b == true)
assert(obj.f == 8.11f)
assert(obj.c == 'd')
}
test("marshal object with a final class object field") {
val buffer = DataBuffer.streaming(128)
RuntimeMarshaler.marshal(new FinalClassObject(new FinalSingleInt(17)), buffer)
println(buffer.input.byteString)
val obj = RuntimeMarshaler.unmarshal[FinalClassObject](buffer)
assert(obj.inner.x == 17)
}
test("marshal recursive object") {
val buffer = DataBuffer.streaming(128)
RuntimeMarshaler.marshal(
new RecursiveObject(7, new RecursiveObject(5, null)), buffer)
println(buffer.input.byteString)
val obj = RuntimeMarshaler.unmarshal[RecursiveObject](buffer)
assert(obj.x == 7 && obj.tail.x == 5 && obj.tail.tail == null)
}
test("marshal null") {
val buffer = DataBuffer.streaming(128)
RuntimeMarshaler.marshal(null, buffer)
println(buffer.input.byteString)
val obj = RuntimeMarshaler.unmarshal[AnyRef](buffer)
assert(obj == null)
}
test("marshal a cyclic object") {
val buffer = DataBuffer.streaming(128)
val cyclic = new RecursiveObject(7, null)
cyclic.tail = cyclic
RuntimeMarshaler.marshal(cyclic, buffer)
println(buffer.input.byteString)
val obj = RuntimeMarshaler.unmarshal[RecursiveObject](buffer)
assert(obj.tail eq obj)
assert(obj.x == 7)
}
test("marshal a cyclic pair of objects") {
val buffer = DataBuffer.streaming(128)
val a = new RecursiveObject(7, null)
val b = new RecursiveObject(11, null)
a.tail = b
b.tail = a
RuntimeMarshaler.marshal(a, buffer)
println(buffer.input.byteString)
val obj = RuntimeMarshaler.unmarshal[RecursiveObject](buffer)
assert(obj.x == 7)
assert(obj.tail.x == 11)
assert(obj.tail.tail eq obj)
}
test("marshal an inherited class") {
val buffer = DataBuffer.streaming(128)
val obj = new InheritedClass(17, 11)
RuntimeMarshaler.marshal(obj, buffer)
println(buffer.input.byteString)
val result = RuntimeMarshaler.unmarshal[InheritedClass](buffer)
assert(result.y == 17)
assert(result.x == 11)
}
test("marshal an object pair") {
val buffer = DataBuffer.streaming(128)
val pair = new CyclicObjectPair(7,
new CyclicObjectPair(11, null, null),
new CyclicObjectPair(17, null, null)
)
pair.o1.o1 = pair
pair.o2.o2 = pair
RuntimeMarshaler.marshal(pair, buffer)
println(buffer.input.byteString)
val obj = RuntimeMarshaler.unmarshal[CyclicObjectPair](buffer)
assert(obj.x == 7)
assert(obj.o1.x == 11)
assert(obj.o2.x == 17)
assert(obj.o1.o1 == obj)
assert(obj.o2.o2 == obj)
}
test("marshal an object with an array") {
val buffer = DataBuffer.streaming(128)
val input = new ArrayObject(10)
for (i <- 0 until 10) input.array(i) = i + 11
RuntimeMarshaler.marshal(input, buffer)
println(buffer.input.byteString)
val obj = RuntimeMarshaler.unmarshal[ArrayObject](buffer)
assert(obj.array != null)
for (i <- 0 until 10) assert(input.array(i) == i + 11)
}
test("marshal an object with a big array") {
val buffer = DataBuffer.streaming(128)
val input = new ArrayObject(256)
for (i <- 0 until 256) input.array(i) = i + 17
RuntimeMarshaler.marshal(input, buffer)
println(buffer.input.byteString)
val obj = RuntimeMarshaler.unmarshal[ArrayObject](buffer)
assert(obj.array != null)
for (i <- 0 until 256) assert(input.array(i) == i + 17)
}
test("marshal an object with a null array") {
val buffer = DataBuffer.streaming(128)
val input = new VarArrayObject(null)
RuntimeMarshaler.marshal(input, buffer)
println(buffer.input.byteString)
val obj = RuntimeMarshaler.unmarshal[VarArrayObject](buffer)
assert(obj.array == null)
}
test("marshal an int array") {
val buffer = DataBuffer.streaming(128)
val input = new Array[Int](10)
for (i <- 0 until 10) input(i) = i + 3
RuntimeMarshaler.marshal(input, buffer)
println(buffer.input.byteString)
val obj = RuntimeMarshaler.unmarshal[Array[Int]](buffer)
assert(obj.length == 10)
for (i <- 0 until 10) assert(obj(i) == i + 3)
}
test("marshal a big int array") {
val buffer = DataBuffer.streaming(128)
val input = new Array[Int](256)
for (i <- 0 until 256) input(i) = i + 3
RuntimeMarshaler.marshal(input, buffer)
println(buffer.input.byteString)
val obj = RuntimeMarshaler.unmarshal[Array[Int]](buffer)
assert(obj.length == 256)
for (i <- 0 until 256) assert(obj(i) == i + 3)
}
test("marshal a long array") {
val buffer = DataBuffer.streaming(128)
val input = new Array[Long](256)
for (i <- 0 until 256) input(i) = i + 3
RuntimeMarshaler.marshal(input, buffer)
println(buffer.input.byteString)
val obj = RuntimeMarshaler.unmarshal[Array[Long]](buffer)
assert(obj.length == 256)
for (i <- 0 until 256) assert(obj(i) == i + 3)
}
test("marshal an object with a long array") {
val buffer = DataBuffer.streaming(128)
val input = new LongArrayObject(256)
for (i <- 0 until 256) input.array(i) = i + 3
RuntimeMarshaler.marshal(input, buffer)
println(buffer.input.byteString)
val obj = RuntimeMarshaler.unmarshal[LongArrayObject](buffer)
assert(obj.array.length == 256)
for (i <- 0 until 256) assert(obj.array(i) == i + 3)
}
test("marshal an object with a double array") {
val buffer = DataBuffer.streaming(128)
val input = new DoubleArrayObject(256)
for (i <- 0 until 256) input.array(i) = i + 3.5
RuntimeMarshaler.marshal(input, buffer)
println(buffer.input.byteString)
val obj = RuntimeMarshaler.unmarshal[DoubleArrayObject](buffer)
assert(obj.array.length == 256)
for (i <- 0 until 256) assert(obj.array(i) == i + 3.5)
}
test("marshal an object with a float array") {
val buffer = DataBuffer.streaming(128)
val input = new FloatArrayObject(256)
for (i <- 0 until 256) input.array(i) = i + 3.5f
RuntimeMarshaler.marshal(input, buffer)
println(buffer.input.byteString)
val obj = RuntimeMarshaler.unmarshal[FloatArrayObject](buffer)
assert(obj.array.length == 256)
for (i <- 0 until 256) assert(obj.array(i) == i + 3.5f)
}
test("marshal an object with a byte array") {
val buffer = DataBuffer.streaming(128)
val input = new ByteArrayObject(256)
for (i <- 0 until 256) input.array(i) = (i + 3).toByte
RuntimeMarshaler.marshal(input, buffer)
println(buffer.input.byteString)
val obj = RuntimeMarshaler.unmarshal[ByteArrayObject](buffer)
assert(obj.array.length == 256)
for (i <- 0 until 256) assert(obj.array(i) == (i + 3).toByte)
}
test("marshal an object with a boolean array") {
val buffer = DataBuffer.streaming(128)
val input = new BooleanArrayObject(256)
for (i <- 0 until 256) input.array(i) = i % 3 != 0
RuntimeMarshaler.marshal(input, buffer)
println(buffer.input.byteString)
val obj = RuntimeMarshaler.unmarshal[BooleanArrayObject](buffer)
assert(obj.array.length == 256)
for (i <- 0 until 256) assert(obj.array(i) == (i % 3 != 0))
}
test("marshal an object with a char array") {
val buffer = DataBuffer.streaming(128)
val input = new CharArrayObject(256)
for (i <- 0 until 256) input.array(i) = i.toChar
RuntimeMarshaler.marshal(input, buffer)
println(buffer.input.byteString)
val obj = RuntimeMarshaler.unmarshal[CharArrayObject](buffer)
assert(obj.array.length == 256)
for (i <- 0 until 256) assert(obj.array(i) == i.toChar)
}
test("marshal an object with a short array") {
val buffer = DataBuffer.streaming(128)
val input = new ShortArrayObject(256)
for (i <- 0 until 256) input.array(i) = i.toShort
RuntimeMarshaler.marshal(input, buffer)
println(buffer.input.byteString)
val obj = RuntimeMarshaler.unmarshal[ShortArrayObject](buffer)
assert(obj.array.length == 256)
for (i <- 0 until 256) assert(obj.array(i) == i.toShort)
}
test("marshal an object with a object array") {
val buffer = DataBuffer.streaming(128)
val input = new ObjectArrayObject(256)
for (i <- 0 until 256) input.array(i) = new SingleLong(i)
RuntimeMarshaler.marshal(input, buffer)
println(buffer.input.byteString)
val obj = RuntimeMarshaler.unmarshal[ObjectArrayObject](buffer)
assert(obj.array.length == 256)
for (i <- 0 until 256) assert(obj.array(i).x == i, s"$i == ${obj.array(i)}")
}
test("marshal an object with a final object array") {
val buffer = DataBuffer.streaming(128)
val input = new FinalObjectArrayObject(256)
for (i <- 0 until 256) input.array(i) = new FinalSingleInt(i)
RuntimeMarshaler.marshal(input, buffer)
println(buffer.input.byteString)
val obj = RuntimeMarshaler.unmarshal[FinalObjectArrayObject](buffer)
assert(obj.array.length == 256)
for (i <- 0 until 256) assert(obj.array(i).x == i, s"$i == ${obj.array(i)}")
}
test("marshal an array of repeated and null objects") {
val buffer = DataBuffer.streaming(128)
val input = new Array[AnyRef](256)
for (i <- 0 until 256) input(i) = i match {
case i if i % 5 == 0 => null
case i if i % 6 == 0 => input(i - 5)
case i if i % 11 == 0 => new SingleLong(i)
case _ => new FinalSingleInt(i)
}
RuntimeMarshaler.marshal(input, buffer)
println(buffer.input.byteString)
val array = RuntimeMarshaler.unmarshal[Array[AnyRef]](buffer)
assert(array.length == 256)
for (i <- 0 until 256) i match {
case i if i % 5 == 0 =>
assert(array(i) == null)
case i if i % 6 == 0 =>
assert(array(i) eq array(i - 5))
input(i) match {
case null =>
assert(array(i) == null)
case obj: FinalSingleInt =>
assert(array(i).asInstanceOf[FinalSingleInt].x == obj.x)
case obj: SingleLong =>
assert(array(i).asInstanceOf[SingleLong].x == obj.x)
}
case i if i % 11 == 0 =>
assert(array(i).isInstanceOf[SingleLong])
assert(array(i).asInstanceOf[SingleLong].x == i)
case _ =>
assert(array(i).asInstanceOf[FinalSingleInt].x == i)
}
}
test("marshal an array pointing to itself") {
val buffer = DataBuffer.streaming(128)
val input = new Array[AnyRef](256)
for (i <- 0 until 256) input(i) = input
RuntimeMarshaler.marshal(input, buffer)
println(buffer.input.byteString)
val obj = RuntimeMarshaler.unmarshal[Array[AnyRef]](buffer)
assert(obj.array.length == 256)
for (i <- 0 until 256) assert(obj(i) eq obj)
}
test("marshal an array buffer") {
val buffer = DataBuffer.streaming(128)
val input = mutable.ArrayBuffer[Int]()
for (i <- 0 until 128) input += i
RuntimeMarshaler.marshal(input, buffer)
println(buffer.input.byteString)
val obj = RuntimeMarshaler.unmarshal[mutable.ArrayBuffer[Int]](buffer)
assert(obj.length == 128)
for (i <- 0 until 128) assert(obj(i) == i)
}
test("marshal a list") {
val buffer = DataBuffer.streaming(128)
val input = (0 until 100).toList
RuntimeMarshaler.marshal(input, buffer)
println(buffer.input.byteString)
val obj = RuntimeMarshaler.unmarshal[List[Int]](buffer)
assert(obj.length == 100)
for (i <- 0 until 100) assert(obj(i) == i)
}
}
class NonFinalEmpty extends Marshalee
final class FinalEmpty extends Marshalee
class NonFinalSingleInt(val x: Int) extends Marshalee
final class FinalSingleInt(val x: Int) extends Marshalee
class SingleLong(val x: Long) extends Marshalee
class SingleDouble(val x: Double) extends Marshalee
class SingleFloat(val x: Float) extends Marshalee
class SingleByte(val x: Byte) extends Marshalee
class SingleBoolean(val x: Boolean) extends Marshalee
class SingleChar(val x: Char) extends Marshalee
class SingleShort(val x: Short) extends Marshalee
class MixedPrimitives(
val x: Int, var y: Short, val z: Double, val b: Boolean, val f: Float, val c: Char
) extends Marshalee
class FinalClassObject(val inner: FinalSingleInt) extends Marshalee
class RecursiveObject(val x: Int, var tail: RecursiveObject) extends Marshalee
class BaseClass(val x: Int) extends Marshalee
class InheritedClass(val y: Int, px: Int) extends BaseClass(px) with Marshalee
class CyclicObjectPair(val x: Int, var o1: CyclicObjectPair, var o2: CyclicObjectPair)
extends Marshalee
class ArrayObject(length: Int) extends Marshalee {
val array = new Array[Int](length)
}
class VarArrayObject(var array: Array[Int]) extends Marshalee
class LongArrayObject(length: Int) extends Marshalee {
val array = new Array[Long](length)
}
class DoubleArrayObject(length: Int) extends Marshalee {
val array = new Array[Double](length)
}
class FloatArrayObject(length: Int) extends Marshalee {
val array = new Array[Float](length)
}
class ByteArrayObject(length: Int) extends Marshalee {
val array = new Array[Byte](length)
}
class BooleanArrayObject(length: Int) extends Marshalee {
val array = new Array[Boolean](length)
}
class CharArrayObject(length: Int) extends Marshalee {
val array = new Array[Char](length)
}
class ShortArrayObject(length: Int) extends Marshalee {
val array = new Array[Short](length)
}
class ObjectArrayObject(length: Int) extends Marshalee {
val array = new Array[SingleLong](length)
}
class FinalObjectArrayObject(length: Int) extends Marshalee {
val array = new Array[FinalSingleInt](length)
}
class LinkedList(val head: Int, val tail: LinkedList) extends Marshalee
class RuntimeMarshalerCheck
extends Properties("RuntimeMarshaler") with ExtendedProperties {
val sizes = detChoose(0, 1000)
val smallSizes = detChoose(0, 100)
val depths = detChoose(0, 12)
property("integer arrays") = forAllNoShrink(sizes) { size =>
stackTraced {
val buffer = DataBuffer.streaming(128)
val array = new Array[Int](size)
for (i <- 0 until size) array(i) = i
RuntimeMarshaler.marshal(array, buffer)
val result = RuntimeMarshaler.unmarshal[Array[Int]](buffer)
assert(result.length == size)
for (i <- 0 until size) assert(array(i) == i)
true
}
}
property("object arrays") = forAllNoShrink(sizes) { size =>
stackTraced {
val buffer = DataBuffer.streaming(128)
val array = new Array[AnyRef](size)
for (i <- 0 until size) array(i) = i.toString
RuntimeMarshaler.marshal(array, buffer)
val result = RuntimeMarshaler.unmarshal[Array[AnyRef]](buffer)
assert(result.length == size)
for (i <- 0 until size) assert(array(i) == i.toString)
true
}
}
property("circular arrays") = forAllNoShrink(sizes) { size =>
stackTraced {
val buffer = DataBuffer.streaming(128)
val array = new Array[AnyRef](size)
for (i <- 0 until size) {
if (i % 2 == 0) array(i) = array
else array(i) = new Array[Int](0)
}
RuntimeMarshaler.marshal(array, buffer)
val result = RuntimeMarshaler.unmarshal[Array[AnyRef]](buffer)
assert(result.length == size)
for (i <- 0 until size) {
if (i % 2 == 0) assert(array(i) == array)
else assert(array(i).asInstanceOf[Array[Int]].length == 0)
}
true
}
}
property("linked lists") = forAllNoShrink(smallSizes) { size =>
stackTraced {
val buffer = DataBuffer.streaming(128)
var list: LinkedList = null
for (i <- 0 until size) list = new LinkedList(i, list)
RuntimeMarshaler.marshal(list, buffer)
var result = RuntimeMarshaler.unmarshal[LinkedList](buffer)
for (i <- (0 until size).reverse) {
assert(result.head == i)
result = result.tail
}
assert(result == null, result.tail)
true
}
}
property("trees") = forAllNoShrink(depths) { maxDepth =>
stackTraced {
val buffer = DataBuffer.streaming(128)
val root = new Array[AnyRef](3)
def generate(node: Array[AnyRef], depth: Int): Unit = if (depth < maxDepth) {
val left = new Array[AnyRef](3)
val right = new Array[AnyRef](3)
left(2) = depth.toString
right(2) = depth.toString
node(0) = left
node(1) = right
generate(left, depth + 1)
generate(right, depth + 1)
}
generate(root, 0)
RuntimeMarshaler.marshal(root, buffer)
var result = RuntimeMarshaler.unmarshal[Array[AnyRef]](buffer)
def compare(before: Array[AnyRef], after: Array[AnyRef]): Unit = {
if (before == null) assert(after == null)
else {
def asNode(x: AnyRef) = x.asInstanceOf[Array[AnyRef]]
assert(before(2) == after(2))
compare(asNode(before(0)), asNode(after(0)))
compare(asNode(before(1)), asNode(after(1)))
}
}
compare(root, result)
true
}
}
property("array buffers") = forAllNoShrink(sizes) { size =>
stackTraced {
val buffer = DataBuffer.streaming(128)
val arrayBuffer = mutable.ArrayBuffer[Int]()
for (i <- 0 until size) arrayBuffer += i
RuntimeMarshaler.marshal(arrayBuffer, buffer)
val result = RuntimeMarshaler.unmarshal[mutable.ArrayBuffer[Int]](buffer)
assert(result.length == size, s"${result.length}, expected $size")
for (i <- 0 until size) assert(result(i) == arrayBuffer(i))
true
}
}
property("hash tries") = forAllNoShrink(sizes) { size =>
stackTraced {
val buffer = DataBuffer.streaming(128)
var map = immutable.HashMap[Int, String]()
for (i <- 0 until size) map += i -> i.toString
RuntimeMarshaler.marshal(map, buffer)
val result = RuntimeMarshaler.unmarshal[immutable.HashMap[Int, String]](buffer)
assert(result.size == size, s"${result.size}, expected $size")
for (i <- 0 until size) assert(result(i) == i.toString)
true
}
}
}
| storm-enroute/reactors | reactors-remote/jvm/src/test/scala/io/reactors/remote/runtime-marshaler-tests.scala | Scala | bsd-3-clause | 22,826 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.accounts.frs102.validation
import uk.gov.hmrc.ct.accounts.frs10x.retriever.Frs10xDirectorsBoxRetriever
import uk.gov.hmrc.ct.box.retriever.FilingAttributesBoxValueRetriever
trait DirectorsReportEnabledCalculator {
def calculateDirectorsReportEnabled(boxRetriever: Frs10xDirectorsBoxRetriever with FilingAttributesBoxValueRetriever): Boolean = {
val isCoHoFiling = boxRetriever.companiesHouseFiling().value
val isHmrcFiling = boxRetriever.hmrcFiling().value
val isMicroEntityFiling = boxRetriever.microEntityFiling().value
val answeredYesToCoHoDirectorsReportQuestion = boxRetriever.ac8021().orFalse
val answeredYesToHmrcDirectorsReportQuestion = boxRetriever.ac8023().orFalse
(isCoHoFiling, isHmrcFiling) match {
case (true, false) => answeredYesToCoHoDirectorsReportQuestion
case _ => !isMicroEntityFiling || answeredYesToHmrcDirectorsReportQuestion
}
}
}
| hmrc/ct-calculations | src/main/scala/uk/gov/hmrc/ct/accounts/frs102/validation/DirectorsReportEnabledCalculator.scala | Scala | apache-2.0 | 1,532 |
package notebook.kernel.pfork
import java.io.{EOFException, File, ObjectInputStream, ObjectOutputStream}
import java.net._
import java.util.concurrent.Executors
import java.util.concurrent.atomic.AtomicBoolean
import com.typesafe.config.Config
import org.apache.commons.exec._
import org.apache.commons.exec.util.StringUtils
import org.apache.log4j.PropertyConfigurator
import org.slf4j.LoggerFactory
import play.api.{Logger, Play}
import scala.collection.JavaConversions._
import scala.collection.mutable.ListBuffer
import scala.concurrent._
import scala.concurrent.duration.Duration
trait ForkableProcess {
/**
* Called in the remote VM. Can return any useful information to the server through the return
* @param args
* @return
*/
def init(args: Seq[String]): String
def waitForExit()
}
/**
* I am so sick of this being a thing that gets implemented everywhere. Let's abstract.
*/
class BetterFork[A <: ForkableProcess : reflect.ClassTag](config: Config,
executionContext: ExecutionContext, customArgs:Option[List[String]]) {
private implicit val ec = executionContext
import BetterFork._
val processClass = (implicitly[reflect.ClassTag[A]]).runtimeClass
def workingDirectory = new File(if (config.hasPath("wd")) config.getString("wd") else ".")
def heap: Long = if (config.hasPath("heap")) config.getBytes("heap") else defaultHeap
def stack: Long = if (config.hasPath("stack")) config.getBytes("stack") else -1
def permGen: Long = if (config.hasPath("permGen")) config.getBytes("permGen") else -1
def reservedCodeCache: Long = if (config.hasPath("reservedCodeCache")) config.getBytes("reservedCodeCache") else -1
def server: Boolean = true
def debugPort: Option[Int] = if (config.hasPath("debug.port")) Some(config.getInt("debug.port")) else None
def logLevel: String = if (config.hasPath("log.level")) config.getString("log.level") else "info"
def vmArgs: List[String] = if (config.hasPath("vmArgs")) config.getStringList("vmArgs").toList else Nil
def classPathEnv = Array(
sys.env.get("YARN_CONF_DIR"),
sys.env.get("HADOOP_CONF_DIR"),
sys.env.get("EXTRA_CLASSPATH")
).collect { case Some(x) => x }
def classPath: IndexedSeq[String] =
if (config.hasPath("classpath")) config.getStringList("classpath").toList.toVector else Vector.empty[String]
def classPathString = (defaultClassPath ++ classPath ++ classPathEnv).mkString(File.pathSeparator)
def jvmArgs = {
val builder = IndexedSeq.newBuilder[String]
def ifNonNeg(value: Long, prefix: String) {
if (value >= 0) {
builder += (prefix + value)
}
}
ifNonNeg(heap, "-Xmx")
ifNonNeg(stack, "-Xss")
ifNonNeg(permGen, "-XX:MaxPermSize=")
ifNonNeg(reservedCodeCache, "-XX:ReservedCodeCacheSize=")
if (server) builder += "-server"
debugPort.foreach { p =>
builder ++= IndexedSeq("-Xdebug", "-Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=" + p)
}
builder ++= vmArgs
customArgs.foreach(as => builder ++= as)
builder.result()
}
implicit protected def int2SuffixOps(i: Int) = new SuffixOps(i)
protected final class SuffixOps(i: Int) {
def k: Long = i.toLong << 10
def m: Long = i.toLong << 20
def g: Long = i.toLong << 30
}
def execute(args: String*): Future[ProcessInfo] = {
/* DK: Bi-directional liveness can be detected via redirected System.in (child), System.out (parent), avoids need for socket... */
val ss = new ServerSocket(0)
val cmd = new CommandLine(javaHome + "/bin/java")
.addArguments(jvmArgs.toArray)
.addArgument(classOf[ChildProcessMain].getName)
.addArgument(processClass.getName)
.addArgument(ss.getLocalPort.toString)
.addArgument(logLevel)
.addArguments(args.toArray)
Future {
log.info("Spawning %s".format(cmd.toString))
// use environment because classpaths can be longer here than as a command line arg
val environment = System.getenv + ("CLASSPATH" -> classPathString)
val exec = new KillableExecutor
val completion = Promise[Int]()
exec.setWorkingDirectory(workingDirectory)
exec.execute(cmd, environment, new ExecuteResultHandler {
Logger.info(s"Spawning $cmd")
Logger.trace(s"With Env $environment")
Logger.info(s"In working directory $workingDirectory")
def onProcessFailed(e: ExecuteException) {
Logger.error(e.getMessage)
e.printStackTrace()
}
def onProcessComplete(exitValue: Int) {
completion.success(exitValue)
}
})
val socket = ss.accept()
serverSockets += socket
try {
val ois = new ObjectInputStream(socket.getInputStream)
val resp = ois.readObject().asInstanceOf[String]
new ProcessInfo(() => exec.kill(), resp, completion.future)
} catch {
case ex: SocketException => throw new ExecuteException("Failed to start process %s".format(cmd), 1, ex)
case ex: EOFException => throw new ExecuteException("Failed to start process %s".format(cmd), 1, ex)
}
}
}
}
class ProcessInfo(killer: () => Unit, val initReturn: String, val completion: Future[Int]) {
def kill() {
killer()
}
}
object BetterFork {
// Keeps server sockets around so they are not GC'd
private val serverSockets = new ListBuffer[Socket]()
// βββββββββββββ NEEDED WHEN running in SBT/Play ...
def defaultClassPath: IndexedSeq[String] = {
def urls(cl: ClassLoader, acc: IndexedSeq[String] = IndexedSeq.empty): IndexedSeq[String] = {
if (cl != null) {
val us = if (!cl.isInstanceOf[URLClassLoader]) {
//println(" ----- ")
//println(cl.getClass.getSimpleName)
acc
} else {
acc ++ (cl.asInstanceOf[URLClassLoader].getURLs map { u =>
val f = new File(u.getFile)
URLDecoder.decode(f.getAbsolutePath, "UTF8")
})
}
urls(cl.getParent, us)
} else {
acc
}
}
val loader = Play.current.classloader
val gurls = urls(loader).distinct.filter(!_.contains("logback-classic"))
gurls
}
def defaultHeap = Runtime.getRuntime.maxMemory
/* Override to expose ability to forcibly kill the process */
private class KillableExecutor extends DefaultExecutor {
val killed = new AtomicBoolean(false)
setWatchdog(new ExecuteWatchdog(ExecuteWatchdog.INFINITE_TIMEOUT) {
override def start(p: Process) {
if (killed.get()) p.destroy()
}
})
def kill() {
if (killed.compareAndSet(false, true))
Option(getExecutorThread) foreach (_.interrupt())
}
}
private lazy val javaHome = System.getProperty("java.home")
private lazy val log = LoggerFactory.getLogger(getClass)
private[pfork] def main(args: Array[String]) {
val className = args(0)
val parentPort = args(1).toInt
val logLevel = args(2)
val kernelId = args(3)
val path = args(4)
val remainingArgs = args.drop(5).toIndexedSeq
val propLog = new java.util.Properties()
propLog.load(getClass().getResourceAsStream("/log4j.subprocess.properties"))
val cleanPath = path.replaceAll("/", "\\\\\\\\").replaceAll("\\"", "").replaceAll("'", "")
propLog.setProperty("log4j.appender.rolling.File", s"logs/sn-session-$kernelId-$cleanPath.log")
propLog.setProperty("log4j.rootLogger", s"$logLevel, rolling")
PropertyConfigurator.configure(propLog)
log.info("Remote process starting")
val socket = new Socket("127.0.0.1", parentPort)
val hostedClass = Class.forName(className).newInstance().asInstanceOf[ForkableProcess]
val result = hostedClass.init(remainingArgs)
val oos = new ObjectOutputStream(socket.getOutputStream)
oos.writeObject(result)
oos.flush()
val executorService = Executors.newFixedThreadPool(10)
implicit val ec = ExecutionContext.fromExecutorService(executorService)
val parentDone = Future {
socket.getInputStream.read()
}
val localDone = Future {
hostedClass.waitForExit()
}
val done = Future.firstCompletedOf(Seq(parentDone, localDone))
try {
Await.result(done, Duration.Inf)
} finally {
log.warn("Parent process stopped; exiting.")
sys.exit(0)
}
}
}
| radek1st/spark-notebook | modules/subprocess/src/main/scala/notebook/kernel/pfork/BetterFork.scala | Scala | apache-2.0 | 8,413 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.yarn
import java.util.concurrent.{Executor, ScheduledExecutorService}
import akka.actor.ActorRef
import org.apache.flink.configuration.Configuration
import org.apache.flink.runtime.blob.BlobServer
import org.apache.flink.runtime.checkpoint.CheckpointRecoveryFactory
import org.apache.flink.runtime.execution.librarycache.BlobLibraryCacheManager
import org.apache.flink.runtime.executiongraph.restart.RestartStrategyFactory
import org.apache.flink.runtime.instance.InstanceManager
import org.apache.flink.runtime.jobmanager.SubmittedJobGraphStore
import org.apache.flink.runtime.jobmanager.scheduler.Scheduler
import org.apache.flink.runtime.leaderelection.LeaderElectionService
import org.apache.flink.runtime.metrics.groups.JobManagerMetricGroup
import org.apache.flink.runtime.testingUtils.TestingJobManagerLike
import scala.concurrent.duration.FiniteDuration
/** [[YarnJobManager]] implementation which mixes in the [[TestingJobManagerLike]] mixin.
*
* This actor class is used for testing purposes on Yarn. Here we use an explicit class definition
* instead of an anonymous class with the respective mixin to obtain a more readable logger name.
*
* @param flinkConfiguration Configuration object for the actor
* @param futureExecutor Execution context which is used to execute concurrent tasks in the
* [[org.apache.flink.runtime.executiongraph.ExecutionGraph]]
* @param ioExecutor for blocking io operations
* @param instanceManager Instance manager to manage the registered
* [[org.apache.flink.runtime.taskmanager.TaskManager]]
* @param scheduler Scheduler to schedule Flink jobs
* @param libraryCacheManager Manager to manage uploaded jar files
* @param archive Archive for finished Flink jobs
* @param restartStrategyFactory Default restart strategy for job restarts
* @param timeout Timeout for futures
* @param leaderElectionService LeaderElectionService to participate in the leader election
*/
class TestingYarnJobManager(
flinkConfiguration: Configuration,
futureExecutor: ScheduledExecutorService,
ioExecutor: Executor,
instanceManager: InstanceManager,
scheduler: Scheduler,
blobServer: BlobServer,
libraryCacheManager: BlobLibraryCacheManager,
archive: ActorRef,
restartStrategyFactory: RestartStrategyFactory,
timeout: FiniteDuration,
leaderElectionService: LeaderElectionService,
submittedJobGraphs : SubmittedJobGraphStore,
checkpointRecoveryFactory : CheckpointRecoveryFactory,
jobRecoveryTimeout: FiniteDuration,
jobManagerMetricGroup : JobManagerMetricGroup,
optRestAddress: Option[String])
extends YarnJobManager(
flinkConfiguration,
futureExecutor,
ioExecutor,
instanceManager,
scheduler,
blobServer,
libraryCacheManager,
archive,
restartStrategyFactory,
timeout,
leaderElectionService,
submittedJobGraphs,
checkpointRecoveryFactory,
jobRecoveryTimeout,
jobManagerMetricGroup,
optRestAddress)
with TestingJobManagerLike {}
| zimmermatt/flink | flink-yarn-tests/src/test/scala/org/apache/flink/yarn/TestingYarnJobManager.scala | Scala | apache-2.0 | 3,897 |
/*
* Copyright 2016 Alexey Kardapoltsev
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.github.kardapoltsev.astparser.parser.doc
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpec
class DocLexerSpec extends AnyWordSpec with Matchers {
import DocLexer._
private val lexer = new DocLexer
private def scan(input: String): List[Token] = {
lexer.scan(input)
}
"DocLexer" should {
"parse docs" in {
scan("ref to `pkg.test`") shouldBe List(
Identifier("ref"),
Space(),
Identifier("to"),
Space(),
BackTick(),
Identifier("pkg"),
Dot(),
Identifier("test"),
BackTick()
)
}
"parse special characters" in {
scan("word () wordWithCharacter[]") shouldBe List(
Identifier("word"),
Space(),
SpecialCharacters("()"),
Space(),
Identifier("wordWithCharacter"),
SpecialCharacters("[]")
)
}
"parse multiline docs" in {
scan(
"""line one
|line two
| third line with spaces at the beginning""".stripMargin
) shouldBe (
List(
Identifier("line"),
Space(),
Identifier("one"),
Newline(),
Identifier("line"),
Space(),
Identifier("two"),
Newline(),
Space(),
Space(),
Space(),
Space(),
Identifier("third"),
Space(),
Identifier("line"),
Space(),
Identifier("with"),
Space(),
Identifier("spaces"),
Space(),
Identifier("at"),
Space(),
Identifier("the"),
Space(),
Identifier("beginning")
)
)
}
}
}
| kardapoltsev/astparser | src/test/scala/com/github/kardapoltsev/astparser/parser/doc/DocLexerSpec.scala | Scala | apache-2.0 | 2,333 |
package com.sksamuel.elastic4s.termvectors
import com.sksamuel.elastic4s.Executable
import org.elasticsearch.action.termvectors.TermVectorsRequest.FilterSettings
import org.elasticsearch.action.termvectors.{TermVectorsRequestBuilder, TermVectorsResponse}
import org.elasticsearch.client.Client
import org.elasticsearch.index.VersionType
import scala.collection.JavaConverters._
import scala.concurrent.Future
trait TermVectorsExecutables {
implicit object TermVectorExecutable
extends Executable[TermVectorsDefinition, TermVectorsResponse, TermVectorsResult] {
def builder(client: Client, t: TermVectorsDefinition): TermVectorsRequestBuilder = {
val builder = client.prepareTermVectors(t.indexAndType.index, t.indexAndType.`type`, t.id)
t.fieldStatistics.foreach(builder.setFieldStatistics)
t.offsets.foreach(builder.setOffsets)
t.parent.foreach(builder.setParent)
t.payloads.foreach(builder.setPayloads)
builder.setPerFieldAnalyzer(t.perFieldAnalyzer.asJava)
t.positions.foreach(builder.setPositions)
t.preference.foreach(builder.setPreference)
t.realtime.foreach(b => builder.setRealtime(java.lang.Boolean.valueOf(b)))
t.routing.foreach(builder.setRouting)
if (t.fields.nonEmpty)
builder.setSelectedFields(t.fields: _*)
t.termStatistics.foreach(builder.setTermStatistics)
t.version.foreach(builder.setVersion)
t.versionType.map(VersionType.fromString).foreach(builder.setVersionType)
val settings = new FilterSettings()
t.maxNumTerms.foreach(settings.maxNumTerms = _)
t.minTermFreq.foreach(settings.minTermFreq = _)
t.maxTermFreq.foreach(settings.maxTermFreq = _)
t.minDocFreq.foreach(settings.minDocFreq = _)
t.maxDocFreq.foreach(settings.maxDocFreq = _)
t.minWordLength.foreach(settings.minWordLength = _)
t.maxWordLength.foreach(settings.maxWordLength = _)
builder.setFilterSettings(settings)
builder
}
override def apply(client: Client, t: TermVectorsDefinition): Future[TermVectorsResult] = {
val _builder = builder(client, t)
injectFutureAndMap(_builder.execute)(TermVectorsResult.apply)
}
}
}
| aroundus-inc/elastic4s | elastic4s-tcp/src/main/scala/com/sksamuel/elastic4s/termvectors/TermVectorsExecutables.scala | Scala | apache-2.0 | 2,198 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.