code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1 value | license stringclasses 15 values | size int64 5 1M |
|---|---|---|---|---|---|
// Copyright 2016 Duong Dang
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.github.duongdang.wikidata
import org.json4s._
import org.json4s.native.JsonMethods._
import java.io.Serializable
case class WikidataLabel (
entityId: String,
entityType: String,
language: String,
label: String,
description: String
) extends Serializable
object WikidataLabel {
case class Text(language: String, value: String)
def fromText(input: String) = {
val json = parse(input)
implicit val formats = DefaultFormats
val entityId = (json \\ "id").extract[String]
val entityType = (json \\ "type").extract[String]
val labels = (json \\ "labels").extract[Map[String, Text]]
val descriptions = (json \\ "descriptions").extract[Map[String, Text]]
labels.map {
case (lang, text) => WikidataLabel(entityId, entityType, lang, text.value,
descriptions.getOrElse(lang, Text("","")).value)
}
}
}
| duongdang/wiki-extraction | src/main/scala/com/github/duongdang/wikidata/WikidataLabel.scala | Scala | apache-2.0 | 1,451 |
package spoiwo.model
import org.scalatest.flatspec.AnyFlatSpec
import org.scalatest.matchers.should.Matchers
class CellSpec extends AnyFlatSpec with Matchers {
"Cell" should "be created as formula cell when initialized with string starting with '='" in {
Cell("=3+2") shouldBe a[FormulaCell]
}
it should "be created as a string cell normal text content" in {
Cell("Normal text") shouldBe a[StringCell]
}
}
| norbert-radyk/spoiwo | core/src/test/scala/spoiwo/model/CellSpec.scala | Scala | mit | 427 |
package actors
import com.typesafe.config.Config
class ZILiquidityDemanderConfig(val config: Config) extends RandomLiquidityDemanderConfig
| ScalABM/models-library | farmer-patelli-zovko/src/main/scala-2.11/actors/ZILiquidityDemanderConfig.scala | Scala | apache-2.0 | 142 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.scalatest._
import org.scalatest.testng._
import org.scalatest.jmock._
import java.io.File
import org.apache.commons.io.FileUtils
import org.jmock.Mockery
import org.jmock.Expectations
import org.scalatest.events.Ordinal
package org.scalatest.testng {
class TestNGWrapperSuiteSuite extends FunSuite with SuiteExpectations {
val XML_SUITES_PROPERTY = "xml_suites"
val legacySuiteXml =
<suite name="Simple Suite">
<test verbose="10" name="org.scalatest.testng.test" annotations="JDK">
<classes>
<class name="org.scalatest.testng.testpackage.LegacySuite"/>
</classes>
</test>
</suite>
test("wrapper suite properly notifies reporter when tests start, and pass") {
val xmlSuiteFile = this.createSuite( legacySuiteXml )
val context = new Mockery
val reporter = context.mock(classOf[Reporter])
context.checking(
new Expectations() {
expectSingleTestToPass(this, reporter)
}
)
val status = new ScalaTestStatefulStatus
(new TestNGWrapperSuite(List(xmlSuiteFile))).runTestNG(reporter, new Tracker, status)
status.setCompleted()
context.assertIsSatisfied()
}
val legacySuiteWithThreeTestsXml =
<suite name="Simple Suite">
<test verbose="10" name="org.scalatest.testng.test" annotations="JDK">
<classes>
<class name="org.scalatest.testng.testpackage.LegacySuite"/>
<class name="org.scalatest.testng.testpackage.LegacySuiteWithTwoTests"/>
</classes>
</test>
</suite>
test("wrapper suite should be notified for all tests") {
val xmlSuiteFile = this.createSuite(legacySuiteWithThreeTestsXml)
val context = new Mockery
val reporter = context.mock(classOf[Reporter])
context.checking(
new Expectations() {
expectNTestsToPass(this, 3, reporter)
}
)
val status = new ScalaTestStatefulStatus()
(new TestNGWrapperSuite(List(xmlSuiteFile))).runTestNG(reporter, new Tracker, status)
status.setCompleted()
context.assertIsSatisfied()
}
def createSuite(suiteNode: scala.xml.Elem) : String = {
val tmp = File.createTempFile("testng", "wrapper")
FileUtils.writeStringToFile(tmp, suiteNode.toString)
tmp.getAbsolutePath
}
}
package testpackage {
import org.testng.annotations._
class LegacySuite extends TestNGSuite {
@Test def legacyTestThatPasses() {}
}
class LegacySuiteWithTwoTests extends TestNGSuite {
@Test def anotherLegacyTestThatPasses() {}
@Test def anotherLegacyTestThatPasses2() {}
}
}
}
| travisbrown/scalatest | src/test/scala/org/scalatest/testng/TestNGWrapperSuiteSuite.scala | Scala | apache-2.0 | 3,366 |
/*
* The MIT License
*
* Copyright (c) 2016 Zhixun Tan
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package arrow_test.repr_test.draw_test
import arrow._
import shapeless._
object OneToOneTest {
def main(args: Array[String]) {
val graph = new ArrowGraph
import graph._
val f = (x: Int) => x
val g = (y: Int) => y
val h = (z: Int) => z
f |> g |> h
draw()
}
}
object BroadcastTest {
def main(args: Array[String]) {
val graph = new ArrowGraph
import graph._
val f = (x: Int) => x
val g1 = (y: Int) => y
val g2 = (y: Int) => y
val gs = List(g1, g2)
f |> gs
draw()
}
}
object MergeTest {
def main(args: Array[String]) {
val graph = new ArrowGraph
import graph._
val f1 = (x: Int) => x
val f2 = (x: Int) => x
val fs = List(f1, f2)
val g = (y: Int) => y
fs |> g
draw()
}
}
object HSplitTest {
def main(args: Array[String]) {
val graph = new ArrowGraph
import graph._
val f = (_: Int) => 1 :: 3.0 :: HNil
val g0 = identity[Int] _
val g1 = identity[Double] _
val gs = g0 :: g1 :: HNil
f |> gs
f |> gs
draw()
}
}
object HJoinTest {
def main(args: Array[String]) {
val graph = new ArrowGraph
import graph._
val f0 = (x: Int) => x
val f1 = (x: Int) => x
val fs = f0 :: f1 :: HNil
val g = (_: Int :: Int :: HNil) => 0
fs |> g
draw()
}
}
object SplitTest {
def main(args: Array[String]) {
val graph = new ArrowGraph
import graph._
val f = (x: Int) => List(x, x, x)
val g1 = (x: Int) => x
val g2 = (x: Int) => x
val g3 = (x: Int) => x
val gs = List(g1, g2, g3)
f |> gs
draw()
}
}
object JoinTest {
def main(args: Array[String]) {
val graph = new ArrowGraph
import graph._
val f = (x: Int) => x
val fs = List(f, f, f)
val g = (_: List[Int]) => 0
fs |> g
draw()
}
} | phisiart/arrow | src/test/scala/arrow_test/repr_test/draw_test/DrawTest.scala | Scala | mit | 3,227 |
package is.hail.methods
import java.io.{FileInputStream, IOException}
import java.util.Properties
import is.hail.annotations._
import is.hail.expr.JSONAnnotationImpex
import is.hail.expr.ir.{ExecuteContext, TableValue}
import is.hail.expr.ir.functions.TableToTableFunction
import is.hail.types._
import is.hail.types.physical.{PCanonicalStruct, PStruct, PType}
import is.hail.types.virtual._
import is.hail.rvd.{RVD, RVDContext, RVDType}
import is.hail.sparkextras.ContextRDD
import is.hail.utils._
import is.hail.variant.{Locus, RegionValueVariant}
import org.apache.spark.sql.Row
import org.apache.spark.storage.StorageLevel
import org.json4s.jackson.JsonMethods
import scala.collection.JavaConverters._
import scala.collection.mutable
object Nirvana {
//For Nirnava v2.0.8
val nirvanaSignature = TStruct(
"chromosome" -> TString,
"refAllele" -> TString,
"position" -> TInt32,
"altAlleles" -> TArray(TString),
"cytogeneticBand" -> TString,
"quality" -> TFloat64,
"filters" -> TArray(TString),
"jointSomaticNormalQuality" -> TInt32,
"copyNumber" -> TInt32,
"strandBias" -> TFloat64,
"recalibratedQuality" -> TFloat64,
"clingen" -> TArray(TStruct(
"chromosome" -> TString,
"begin" -> TInt32,
"end" -> TInt32,
"variantType" -> TString,
"id" -> TString,
"clinicalInterpretation" -> TString,
"observedGains" -> TInt32,
"observedLosses" -> TInt32,
"validated" -> TBoolean,
"phenotypes" -> TArray(TString),
"phenotypeIds" -> TArray(TString),
"reciprocalOverlap" -> TFloat64
)),
"dgv" -> TArray(TStruct(
"chromosome" -> TString,
"begin" -> TInt32,
"end" -> TInt32,
"variantType" -> TString,
"id" -> TString,
"variantFreqAll" -> TFloat64,
"sampleSize" -> TInt32,
"observedGains" -> TInt32,
"observedLosses" -> TInt32,
"reciprocalOverlap" -> TFloat64
)),
"oneKg" -> TArray(TStruct(
"chromosome" -> TString,
"begin" -> TInt32,
"end" -> TInt32,
"variantType" -> TString,
"id" -> TString,
"variantFreqAll" -> TFloat64,
"variantFreqAfr" -> TFloat64,
"variantFreqAmr" -> TFloat64,
"variantFreqEas" -> TFloat64,
"variantFreqEur" -> TFloat64,
"variantFreqSas" -> TFloat64,
"sampleSize" -> TInt32,
"sampleSizeAfr" -> TInt32,
"sampleSizeAmr" -> TInt32,
"sampleSizeEas" -> TInt32,
"sampleSizeEur" -> TInt32,
"sampleSizeSas" -> TInt32,
"observedGains" -> TInt32,
"observedLosses" -> TInt32,
"reciprocalOverlap" -> TFloat64
)),
"cosmic" -> TArray(TStruct(
"id" -> TInt32,
"chromosome" -> TString,
"begin" -> TInt32,
"end" -> TInt32,
"variantType" -> TString,
"copyNumber" -> TInt32,
"cancerTypes" -> TArray(TTuple(TString,TInt32)),
"tissues" -> TArray(TTuple(TString,TInt32)),
"reciprocalOverlap" -> TFloat64
)),
"variants" -> TArray(TStruct(
"altAllele" -> TString,
"refAllele" -> TString,
"chromosome" -> TString,
"begin" -> TInt32,
"end" -> TInt32,
"phylopScore" -> TFloat64,
"isReferenceMinor" -> TBoolean,
"variantType" -> TString,
"vid" -> TString,
"hgvsg" -> TString,
"isRecomposedVariant" -> TBoolean,
"isDecomposedVariant" -> TBoolean,
"regulatoryRegions" -> TArray(TStruct(
"id" -> TString,
"type" -> TString,
"consequence" -> TSet(TString)
)),
"clinvar" -> TArray(TStruct(
"id" -> TString,
"reviewStatus" -> TString,
"isAlleleSpecific" -> TBoolean,
"alleleOrigins" -> TArray(TString),
"refAllele" -> TString,
"altAllele" -> TString,
"phenotypes" -> TArray(TString),
"medGenIds" -> TArray(TString),
"omimIds" -> TArray(TString),
"orphanetIds" -> TArray(TString),
"significance" -> TString,
"lastUpdatedDate" -> TString,
"pubMedIds" -> TArray(TString)
)),
"cosmic" -> TArray(TStruct(
"id" -> TString,
"isAlleleSpecific" -> TBoolean,
"refAllele" -> TString,
"altAllele" -> TString,
"gene" -> TString,
"sampleCount" -> TInt32,
"studies" -> TArray(TStruct(
"id" -> TInt32,
"histology" -> TString,
"primarySite" -> TString
))
)),
"dbsnp" -> TStruct("ids" -> TArray(TString)),
"gnomad" -> TStruct(
"coverage" -> TString,
"allAf" -> TFloat64,
"allAc" -> TInt32,
"allAn" -> TInt32,
"allHc" -> TInt32,
"afrAf" -> TFloat64,
"afrAc" -> TInt32,
"afrAn" -> TInt32,
"afrHc" -> TInt32,
"amrAf" -> TFloat64,
"amrAc" -> TInt32,
"amrAn" -> TInt32,
"amrHc" -> TInt32,
"easAf" -> TFloat64,
"easAc" -> TInt32,
"easAn" -> TInt32,
"easHc" -> TInt32,
"finAf" -> TFloat64,
"finAc" -> TInt32,
"finAn" -> TInt32,
"finHc" -> TInt32,
"nfeAf" -> TFloat64,
"nfeAc" -> TInt32,
"nfeAn" -> TInt32,
"nfeHc" -> TInt32,
"othAf" -> TFloat64,
"othAc" -> TInt32,
"othAn" -> TInt32,
"othHc" -> TInt32,
"asjAf" -> TFloat64,
"asjAc" -> TInt32,
"asjAn" -> TInt32,
"asjHc" -> TInt32,
"failedFilter" -> TBoolean
),
"gnomadExome" -> TStruct(
"coverage" -> TString,
"allAf" -> TFloat64,
"allAc" -> TInt32,
"allAn" -> TInt32,
"allHc" -> TInt32,
"afrAf" -> TFloat64,
"afrAc" -> TInt32,
"afrAn" -> TInt32,
"afrHc" -> TInt32,
"amrAf" -> TFloat64,
"amrAc" -> TInt32,
"amrAn" -> TInt32,
"amrHc" -> TInt32,
"easAf" -> TFloat64,
"easAc" -> TInt32,
"easAn" -> TInt32,
"easHc" -> TInt32,
"finAf" -> TFloat64,
"finAc" -> TInt32,
"finAn" -> TInt32,
"finHc" -> TInt32,
"nfeAf" -> TFloat64,
"nfeAc" -> TInt32,
"nfeAn" -> TInt32,
"nfeHc" -> TInt32,
"othAf" -> TFloat64,
"othAc" -> TInt32,
"othAn" -> TInt32,
"othHc" -> TInt32,
"asjAf" -> TFloat64,
"asjAc" -> TInt32,
"asjAn" -> TInt32,
"asjHc" -> TInt32,
"sasAf" -> TFloat64,
"sasAc" -> TInt32,
"sasAn" -> TInt32,
"sasHc" -> TInt32,
"failedFilter" -> TBoolean
),
"topmed" -> TStruct(
"failedFilter" -> TBoolean,
"allAc" -> TInt32,
"allAn" -> TInt32,
"allAf" -> TFloat64,
"allHc" -> TInt32
),
"globalAllele" -> TStruct(
"globalMinorAllele" -> TString,
"globalMinorAlleleFrequency" -> TFloat64
),
"oneKg" -> TStruct(
"ancestralAllele" -> TString,
"allAf" -> TFloat64,
"allAc" -> TInt32,
"allAn" -> TInt32,
"afrAf" -> TFloat64,
"afrAc" -> TInt32,
"afrAn" -> TInt32,
"amrAf" -> TFloat64,
"amrAc" -> TInt32,
"amrAn" -> TInt32,
"easAf" -> TFloat64,
"easAc" -> TInt32,
"easAn" -> TInt32,
"eurAf" -> TFloat64,
"eurAc" -> TInt32,
"eurAn" -> TInt32,
"sasAf" -> TFloat64,
"sasAc" -> TInt32,
"sasAn" -> TInt32
),
"mitomap" -> TArray(TStruct(
"refAllele" -> TString,
"altAllele" -> TString,
"diseases" -> TArray(TString),
"hasHomoplasmy" -> TBoolean,
"hasHeteroplasmy" -> TBoolean,
"status" -> TString,
"clinicalSignificance" -> TString,
"scorePercentile" -> TFloat64,
"isAlleleSpecific" -> TBoolean,
"chromosome" -> TString,
"begin" -> TInt32,
"end" -> TInt32,
"variantType" -> TString
)),
"transcripts" -> TStruct(
"refSeq" -> TArray(TStruct(
"transcript" -> TString,
"bioType" -> TString,
"aminoAcids" -> TString,
"cdnaPos" -> TString,
"codons" -> TString,
"cdsPos" -> TString,
"exons" -> TString,
"introns" -> TString,
"geneId" -> TString,
"hgnc" -> TString,
"consequence" -> TArray(TString),
"hgvsc" -> TString,
"hgvsp" -> TString,
"isCanonical" -> TBoolean,
"polyPhenScore" -> TFloat64,
"polyPhenPrediction" -> TString,
"proteinId" -> TString,
"proteinPos" -> TString,
"siftScore" -> TFloat64,
"siftPrediction" -> TString
)),
"ensembl" -> TArray(TStruct(
"transcript" -> TString,
"bioType" -> TString,
"aminoAcids" -> TString,
"cdnaPos" -> TString,
"codons" -> TString,
"cdsPos" -> TString,
"exons" -> TString,
"introns" -> TString,
"geneId" -> TString,
"hgnc" -> TString,
"consequence" -> TArray(TString),
"hgvsc" -> TString,
"hgvsp" -> TString,
"isCanonical" -> TBoolean,
"polyPhenScore" -> TFloat64,
"polyPhenPrediction" -> TString,
"proteinId" -> TString,
"proteinPos" -> TString,
"siftScore" -> TFloat64,
"siftPrediction" -> TString
))
),
"overlappingGenes" -> TArray(TString)
)),
"genes" -> TArray(TStruct(
"name" -> TString,
"omim" -> TArray(TStruct(
"mimNumber" -> TInt32,
"hgnc" -> TString,
"description" -> TString,
"phenotypes" -> TArray(TStruct(
"mimNumber" -> TInt32,
"phenotype" -> TString,
"mapping" -> TString,
"inheritance" -> TArray(TString),
"comments" -> TString
))
)),
"exac" -> TStruct(
"pLi" -> TFloat64,
"pRec" -> TFloat64,
"pNull" -> TFloat64
)
))
)
def printContext(w: (String) => Unit) {
w("##fileformat=VCFv4.1")
w("#CHROM\\tPOS\\tID\\tREF\\tALT\\tQUAL\\tFILTER\\tINFO\\tFORMAT")
}
def printElement(vaSignature: PType)(w: (String) => Unit, v: (Locus, Array[String])) {
val (locus, alleles) = v
val sb = new StringBuilder()
sb.append(locus.contig)
sb += '\\t'
sb.append(locus.position)
sb.append("\\t.\\t")
sb.append(alleles(0))
sb += '\\t'
sb.append(alleles.tail.filter(_ != "*").mkString(","))
sb += '\\t'
sb.append("\\t.\\t.\\tGT")
w(sb.result())
}
def annotate(ctx: ExecuteContext, tv: TableValue, config: String, blockSize: Int): TableValue = {
assert(tv.typ.key == FastIndexedSeq("locus", "alleles"))
assert(tv.typ.rowType.size == 2)
val properties = try {
val p = new Properties()
val is = new FileInputStream(config)
p.load(is)
is.close()
p
} catch {
case e: IOException =>
fatal(s"could not open file: ${ e.getMessage }")
}
val dotnet = properties.getProperty("hail.nirvana.dotnet", "dotnet")
val nirvanaLocation = properties.getProperty("hail.nirvana.location")
if (nirvanaLocation == null)
fatal("property hail.nirvana.location' required")
val path = Option(properties.getProperty("hail.nirvana.path"))
val cache = properties.getProperty("hail.nirvana.cache")
val supplementaryAnnotationDirectoryOpt = Option(properties.getProperty("hail.nirvana.supplementaryAnnotationDirectory"))
val supplementaryAnnotationDirectory = if (supplementaryAnnotationDirectoryOpt.isEmpty) List[String]() else List("--sd", supplementaryAnnotationDirectoryOpt.get)
val reference = properties.getProperty("hail.nirvana.reference")
val cmd: List[String] = List[String](dotnet, s"$nirvanaLocation") ++
List("-c", cache) ++
supplementaryAnnotationDirectory ++
List("--disable-recomposition", "-r", reference,
"-i", "-",
"-o", "-")
println(cmd.mkString(" "))
val contigQuery: Querier = nirvanaSignature.query("chromosome")
val startQuery = nirvanaSignature.query("position")
val refQuery = nirvanaSignature.query("refAllele")
val altsQuery = nirvanaSignature.query("altAlleles")
val localRowType = tv.rvd.rowPType
val localBlockSize = blockSize
val rowKeyOrd = tv.typ.keyType.ordering
info("Running Nirvana")
val prev = tv.rvd
val annotations = prev
.mapPartitions { (_, it) =>
val pb = new ProcessBuilder(cmd.asJava)
val env = pb.environment()
if (path.orNull != null)
env.put("PATH", path.get)
val warnContext = new mutable.HashSet[String]
val rvv = new RegionValueVariant(localRowType)
it.map { ptr =>
rvv.set(ptr)
(rvv.locus(), rvv.alleles())
}
.grouped(localBlockSize)
.flatMap { block =>
val (jt, err, proc) = block.iterator.pipe(pb,
printContext,
printElement(localRowType),
_ => ())
// The filter is because every other output line is a comma.
val kt = jt.filter(_.startsWith("{\\"chromosome")).map { s =>
val a = JSONAnnotationImpex.importAnnotation(JsonMethods.parse(s), nirvanaSignature, warnContext = warnContext)
val locus = Locus(contigQuery(a).asInstanceOf[String],
startQuery(a).asInstanceOf[Int])
val alleles = refQuery(a).asInstanceOf[String] +: altsQuery(a).asInstanceOf[IndexedSeq[String]]
(Annotation(locus, alleles), a)
}
val r = kt.toArray
.sortBy(_._1)(rowKeyOrd.toOrdering)
val rc = proc.waitFor()
if (rc != 0)
fatal(s"nirvana command failed with non-zero exit status $rc\\n\\tError:\\n${err.toString}")
r
}
}
val nirvanaRVDType = prev.typ.copy(rowType = prev.rowPType.appendKey("nirvana", PType.canonical(nirvanaSignature)))
val nirvanaRowType = nirvanaRVDType.rowType
val nirvanaRVD: RVD = RVD(
nirvanaRVDType,
prev.partitioner,
ContextRDD.weaken(annotations).cmapPartitions { (ctx, it) =>
val rvb = new RegionValueBuilder(ctx.region)
it.map { case (v, nirvana) =>
rvb.start(nirvanaRowType)
rvb.startStruct()
rvb.addAnnotation(nirvanaRowType.types(0).virtualType, v.asInstanceOf[Row].get(0))
rvb.addAnnotation(nirvanaRowType.types(1).virtualType, v.asInstanceOf[Row].get(1))
rvb.addAnnotation(nirvanaRowType.types(2).virtualType, nirvana)
rvb.endStruct()
rvb.end()
}
}).persist(ctx, StorageLevel.MEMORY_AND_DISK)
TableValue(ctx,
TableType(nirvanaRowType.virtualType, FastIndexedSeq("locus", "alleles"), TStruct.empty),
BroadcastRow.empty(ctx),
nirvanaRVD
)
}
}
case class Nirvana(config: String, blockSize: Int = 500000) extends TableToTableFunction {
override def typ(childType: TableType): TableType = {
assert(childType.key == FastIndexedSeq("locus", "alleles"))
assert(childType.rowType.size == 2)
TableType(childType.rowType ++ TStruct("nirvana" -> Nirvana.nirvanaSignature), childType.key, childType.globalType)
}
def preservesPartitionCounts: Boolean = false
def execute(ctx: ExecuteContext, tv: TableValue): TableValue = {
Nirvana.annotate(ctx, tv, config, blockSize)
}
}
| danking/hail | hail/src/main/scala/is/hail/methods/Nirvana.scala | Scala | mit | 15,515 |
package fif.spark
import org.apache.spark.rdd.RDD
import scala.reflect.ClassTag
/**
* Methods that either wrap, or operate on wrapped, values so that
* common RDD operations are available with a natural, functional syntax.
*
* Let's look at Map as an example:
*
* {{{
* // implemented using librray that is not extendable and doesn't implement Serialzable
* val f: A => B = ...
*
* // can be anywhere, error will occur even if in local mode
* val data: RDD[A] = ...
*
* // cannot do
* data.map(f)
* // runtime exception :(
* // as f does not implement Serializable
*
* // instead do
* Map(f)(data)
* // will serialize it using Kryo and safely
* // deserialize to perform map on the data RDD
* }}}
*/
object RddSerializedOps extends Serializable {
object Map extends Serializable {
def apply[A, B: ClassTag](f: A => B): (RDD[A] => RDD[B]) =
apply(KryoSerializationWrapper(f))
def apply[A, B: ClassTag](
fnSerialized: KryoSerializationWrapper[A => B]): (RDD[A] => RDD[B]) =
(data: RDD[A]) =>
data.mapPartitions(partition => {
val f = fnSerialized.getValue
partition.map(f)
})
}
object FlatMap extends Serializable {
def apply[A, B: ClassTag](f: A => TraversableOnce[B]): (RDD[A] => RDD[B]) =
apply(KryoSerializationWrapper(f))
def apply[A, B: ClassTag](
fnSerialized: KryoSerializationWrapper[A => TraversableOnce[B]])
: (RDD[A] => RDD[B]) =
(data: RDD[A]) =>
data.mapPartitions(partition => {
val f = fnSerialized.getValue
partition.flatMap(f)
})
}
object Foreach extends Serializable {
def apply[A](f: A => Any): (RDD[A] => Unit) =
apply(KryoSerializationWrapper(f))
def apply[A](
fnSerialized: KryoSerializationWrapper[A => Any]): (RDD[A] => Unit) =
(data: RDD[A]) =>
data.foreachPartition(partition => {
val f = fnSerialized.getValue
partition.foreach(f)
})
}
object Aggregate extends Serializable {
def apply[A, B: ClassTag](zero: B,
seqOp: (B, A) => B,
combOp: (B, B) => B): (RDD[A] => B) =
apply(zero,
KryoSerializationWrapper(seqOp),
KryoSerializationWrapper(combOp))
def apply[A, B: ClassTag](
zero: B,
serSeqOp: KryoSerializationWrapper[(B, A) => B],
serCombOp: KryoSerializationWrapper[(B, B) => B]): (RDD[A] => B) =
(data: RDD[A]) =>
data.aggregate(zero)(
{
case (b, a) =>
val f = serSeqOp.getValue
f(b, a)
}, {
case (b1, b2) =>
val f = serCombOp.getValue
f(b1, b2)
}
)
}
}
| malcolmgreaves/abstract_data | data-tc-spark/src/main/scala/fif/spark/RddSerializedOps.scala | Scala | apache-2.0 | 2,812 |
/* Copyright 2009-2016 EPFL, Lausanne */
package leon
package purescala
import Common._
import Expressions._
import Definitions._
import TypeOps._
object Types {
trait Typed extends Printable {
def getType: TypeTree
def isTyped : Boolean = getType != Untyped
}
class TypeErrorException(msg: String) extends Exception(msg)
object TypeErrorException {
def apply(obj: Expr, exp: List[TypeTree]): TypeErrorException = {
new TypeErrorException("Type error: "+obj+", expected: "+exp.mkString(" or ")+", found "+obj.getType)
}
def apply(obj: Expr, exp: TypeTree): TypeErrorException = {
apply(obj, List(exp))
}
}
abstract class TypeTree extends Tree with Typed {
val getType = this
// Checks whether the subtypes of this type contain Untyped,
// and if so sets this to Untyped.
// Assumes the subtypes are correctly formed, so it does not descend
// deep into the TypeTree.
def unveilUntyped: TypeTree = this match {
case NAryType(tps, _) =>
if (tps contains Untyped) Untyped else this
}
}
case object Untyped extends TypeTree
case object BooleanType extends TypeTree
case object UnitType extends TypeTree
case object CharType extends TypeTree
case object IntegerType extends TypeTree
case object RealType extends TypeTree
abstract class BitVectorType(val size: Int) extends TypeTree
case object Int8Type extends BitVectorType(8)
case object Int32Type extends BitVectorType(32)
case object StringType extends TypeTree
object BVType {
def unapply(typ: TypeTree): Option[Int] = typ match {
case bv: BitVectorType => Some(bv.size)
case _ => None
}
}
class TypeParameter private (name: String) extends TypeTree {
val id = FreshIdentifier(name, this)
//TODO: really not sure about what I'm doing there, but
// I really need the information of whether the TypeParameter
// was mutable or not, so I copy it around with the freshen
// @samarion please excuse me for using vars
def freshen = {
val ntp = new TypeParameter(name)
ntp.isMutable = isMutable
ntp
}
override def equals(that: Any) = that match {
case TypeParameter(id) => this.id == id
case _ => false
}
override def hashCode = id.hashCode
//is the type parameter annotated as Mutable
var isMutable: Boolean = false
}
object TypeParameter {
def unapply(tp: TypeParameter): Option[Identifier] = Some(tp.id)
def fresh(name: String) = new TypeParameter(name)
}
/*
* If you are not sure about the requirement,
* you should use tupleTypeWrap in purescala.Constructors
*/
case class TupleType(bases: Seq[TypeTree]) extends TypeTree {
val dimension: Int = bases.length
require(dimension >= 2)
}
case class SetType(base: TypeTree) extends TypeTree
case class BagType(base: TypeTree) extends TypeTree
case class MapType(from: TypeTree, to: TypeTree) extends TypeTree
case class FunctionType(from: Seq[TypeTree], to: TypeTree) extends TypeTree
case class ArrayType(base: TypeTree) extends TypeTree
sealed abstract class ClassType extends TypeTree {
val classDef: ClassDef
val id: Identifier = classDef.id
override def hashCode : Int = id.hashCode + tps.hashCode
override def equals(that : Any) : Boolean = that match {
case t : ClassType => t.id == this.id && t.tps == this.tps
case _ => false
}
val tps: Seq[TypeTree]
assert(classDef.tparams.size == tps.size)
lazy val fields = {
val tmap = (classDef.typeArgs zip tps).toMap
if (tmap.isEmpty) {
classDef.fields
} else {
// !! WARNING !!
// vd.id changes but this should not be an issue as selector uses
// classDef.params ids which do not change!
classDef.fields.map { vd =>
val newTpe = instantiateType(vd.getType, tmap)
val newId = FreshIdentifier(vd.id.name, newTpe).copiedFrom(vd.id)
vd.copy(id = newId).setPos(vd)
}
}
}
def invariant = classDef.invariant.map(_.typed(tps))
def knownDescendants = classDef.knownDescendants.map( _.typed(tps) )
def knownCCDescendants: Seq[CaseClassType] = classDef.knownCCDescendants.map( _.typed(tps) )
lazy val fieldsTypes = fields.map(_.getType)
lazy val root: ClassType = parent.map{ _.root }.getOrElse(this)
lazy val parent = classDef.parent.map { pct =>
instantiateType(pct, (classDef.typeArgs zip tps).toMap) match {
case act: AbstractClassType => act
case t => throw LeonFatalError("Unexpected translated parent type: "+t)
}
}
}
case class AbstractClassType(classDef: AbstractClassDef, tps: Seq[TypeTree]) extends ClassType
case class CaseClassType(classDef: CaseClassDef, tps: Seq[TypeTree]) extends ClassType
object NAryType extends TreeExtractor[TypeTree] {
def unapply(t: TypeTree): Option[(Seq[TypeTree], Seq[TypeTree] => TypeTree)] = t match {
case CaseClassType(ccd, ts) => Some((ts, ts => CaseClassType(ccd, ts)))
case AbstractClassType(acd, ts) => Some((ts, ts => AbstractClassType(acd, ts)))
case TupleType(ts) => Some((ts, TupleType))
case ArrayType(t) => Some((Seq(t), ts => ArrayType(ts.head)))
case SetType(t) => Some((Seq(t), ts => SetType(ts.head)))
case BagType(t) => Some((Seq(t), ts => BagType(ts.head)))
case MapType(from,to) => Some((Seq(from, to), t => MapType(t(0), t(1))))
case FunctionType(fts, tt) => Some((tt +: fts, ts => FunctionType(ts.tail.toList, ts.head)))
/* TODO: use some extractable interface once this proved useful */
case solvers.RawArrayType(from,to) => Some((Seq(from, to), t => solvers.RawArrayType(t(0), t(1))))
/* nullary types */
case t => Some(Nil, _ => t)
}
}
object FirstOrderFunctionType {
def unapply(tpe: TypeTree): Option[(Seq[TypeTree], TypeTree)] = tpe match {
case FunctionType(from, to) =>
unapply(to).map(p => (from ++ p._1) -> p._2) orElse Some(from -> to)
case _ => None
}
}
def optionToType(tp: Option[TypeTree]) = tp getOrElse Untyped
}
| epfl-lara/leon | src/main/scala/leon/purescala/Types.scala | Scala | gpl-3.0 | 6,178 |
/**
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
**/
package kafka.server
import java.net.Socket
import java.util.{Collections, Properties}
import kafka.utils.TestUtils
import org.apache.kafka.common.network.ListenerName
import org.apache.kafka.common.requests.{ListGroupsRequest, ListGroupsResponse}
import org.apache.kafka.common.metrics.MetricsReporter
import org.apache.kafka.common.metrics.KafkaMetric
import org.apache.kafka.common.security.auth.SecurityProtocol
import org.apache.kafka.common.protocol.Errors
import org.junit.Assert._
import org.junit.{Before, Test}
import org.junit.After
import java.util.concurrent.atomic.AtomicInteger
import org.apache.kafka.common.message.ListGroupsRequestData
/*
* this test checks that a reporter that throws an exception will not affect other reporters
* and will not affect the broker's message handling
*/
class KafkaMetricReporterExceptionHandlingTest extends BaseRequestTest {
override def brokerCount: Int = 1
override def brokerPropertyOverrides(properties: Properties): Unit = {
properties.put(KafkaConfig.MetricReporterClassesProp, classOf[KafkaMetricReporterExceptionHandlingTest.BadReporter].getName + "," + classOf[KafkaMetricReporterExceptionHandlingTest.GoodReporter].getName)
}
@Before
override def setUp(): Unit = {
super.setUp()
// need a quota prop to register a "throttle-time" metrics after server startup
val quotaProps = new Properties()
quotaProps.put(DynamicConfig.Client.RequestPercentageOverrideProp, "0.1")
adminZkClient.changeClientIdConfig("<default>", quotaProps)
}
@After
override def tearDown(): Unit = {
KafkaMetricReporterExceptionHandlingTest.goodReporterRegistered.set(0)
KafkaMetricReporterExceptionHandlingTest.badReporterRegistered.set(0)
super.tearDown()
}
@Test
def testBothReportersAreInvoked(): Unit = {
val port = anySocketServer.boundPort(ListenerName.forSecurityProtocol(SecurityProtocol.PLAINTEXT))
val socket = new Socket("localhost", port)
socket.setSoTimeout(10000)
try {
TestUtils.retry(10000) {
val listGroupsRequest = new ListGroupsRequest.Builder(new ListGroupsRequestData).build()
val listGroupsResponse = sendAndReceive[ListGroupsResponse](listGroupsRequest, socket)
val errors = listGroupsResponse.errorCounts()
assertEquals(Collections.singletonMap(Errors.NONE, 1), errors)
assertEquals(KafkaMetricReporterExceptionHandlingTest.goodReporterRegistered.get, KafkaMetricReporterExceptionHandlingTest.badReporterRegistered.get)
assertTrue(KafkaMetricReporterExceptionHandlingTest.goodReporterRegistered.get > 0)
}
} finally {
socket.close()
}
}
}
object KafkaMetricReporterExceptionHandlingTest {
var goodReporterRegistered = new AtomicInteger
var badReporterRegistered = new AtomicInteger
class GoodReporter extends MetricsReporter {
def configure(configs: java.util.Map[String, _]): Unit = {
}
def init(metrics: java.util.List[KafkaMetric]): Unit = {
}
def metricChange(metric: KafkaMetric): Unit = {
if (metric.metricName.group == "Request") {
goodReporterRegistered.incrementAndGet
}
}
def metricRemoval(metric: KafkaMetric): Unit = {
}
def close(): Unit = {
}
}
class BadReporter extends GoodReporter {
override def metricChange(metric: KafkaMetric): Unit = {
if (metric.metricName.group == "Request") {
badReporterRegistered.incrementAndGet
throw new RuntimeException(metric.metricName.toString)
}
}
}
}
| sslavic/kafka | core/src/test/scala/unit/kafka/server/KafkaMetricReporterExceptionHandlingTest.scala | Scala | apache-2.0 | 4,106 |
package models.user
import com.mohiva.play.silhouette.api.{ Authenticator, Authorization }
import play.api.i18n._
import play.api.mvc.Request
import scala.concurrent.Future
case class WithRole(role: Role) extends Authorization[User, Authenticator] {
override def isAuthorized[B](user: User, authenticator: Authenticator)(implicit request: Request[B], messages: Messages) = {
Future.successful(user.roles match {
case list: Set[Role] => list.contains(role)
case _ => false
})
}
}
sealed trait Role extends Serializable {
def name: String
}
object Role {
def apply(role: String): Role = role match {
case Admin.name => Admin
case User.name => User
case _ => Unknown
}
def unapply(role: Role): Option[String] = Some(role.name)
object Admin extends Role {
val name = "admin"
}
object User extends Role {
val name = "user"
}
object Unknown extends Role {
val name = "-"
}
}
| bradegler/boilerplay | app/models/user/Roles.scala | Scala | apache-2.0 | 946 |
package julienrf.forms.codecs
import org.scalacheck.Prop._
import org.scalacheck.Properties
object CodecTest extends Properties("Codec") {
object laws {
def encodeDecode[A, B](codec: Codec[A, B], b: B): Boolean =
succeeds(b, codec.decode(codec.encode(b)))
}
val text = forAll { (s: String) =>
s.nonEmpty ==> {
val codec = Codec.text
laws.encodeDecode(codec, s) &&
fails(Seq(Error.Required), codec.decode(None)) &&
fails(Seq(Error.Required), codec.decode(Some(Nil))) &&
fails(Seq(Error.Required), codec.decode(Some(Seq("")))) // empty text is not valid
}
}
val int = forAll { (n: Int) =>
val codec = Codec.int
laws.encodeDecode(codec, n) &&
codec.decode(Some(Nil)) == Left(Seq(Error.Required))
}
val boolean = forAll { (b: Boolean) =>
laws.encodeDecode(Codec.boolean, b)
}
val greaterOrEqual = forAll { (n: Int, m: Int) =>
val constraint = Constraint.greaterOrEqual(m)
val result = constraint.decode(n)
if (n >= m) succeeds(n, result) else result == Left(Seq(Error.MustBeAtLeast(m)))
}
val oneOf = forAll { (map: Map[Int, String], s: String) =>
(map.values.forall(_.nonEmpty) && s.nonEmpty && !map.values.exists(_ == s)) ==> {
val codec = Codec.oneOf(map)
map.keys.forall(n => laws.encodeDecode(codec, n)) &&
fails(Seq(Error.Undefined), codec.decode(Some(Seq(s))))
}
}
val severalOf = forAll { (map: Map[Int, String], s: String) =>
(map.values.forall(_.nonEmpty) && s.nonEmpty && !map.values.exists(_ == s)) ==> {
val codec = Codec.severalOf(map)
val nss = for {
i <- 0 until map.keys.size
j <- 0 until i
} yield map.keys.slice(j, i)
nss.forall(ns => laws.encodeDecode(codec, ns.to[Seq])) &&
fails(Seq(Error.Undefined), codec.decode(Some(Seq(s)))) &&
map.values.forall(s2 => fails(Seq(Error.Undefined), codec.decode(Some(Seq(s, s2))))) // One wrong key makes the whole thing fail
}
}
property("codecs") = text && int && boolean && greaterOrEqual && oneOf && severalOf
property("constraints") = {
def laws[A](constraint: Constraint[A], a: A): Boolean = {
val decode = constraint.decode(a) == (constraint.validate(a) match {
case Some(es) => Left(es)
case None => Right(a)
})
val encode = constraint.encode(a) == a
decode && encode
}
forAll { (n: Int, m: Int) => laws(Constraint.greaterOrEqual(n), m) }
}
val kleisli = {
object laws {
def decode[A, B](codec1: Codec[A, B], codec2: Codec[B, _], a: A): Boolean =
(codec1 >=> codec2).decode(a) == codec1.decode(a).right.flatMap(codec2.decode)
def encode[B, C](codec1: Codec[_, B], codec2: Codec[B, C], c: C): Boolean =
(codec1 >=> codec2).encode(c) == codec1.encode(codec2.encode(c))
}
forAll { (n: Int, m: Int, b: Boolean) =>
laws.decode(Codec.int, Constraint.greaterOrEqual(m), if (b) Some(Seq(n.toString)) else None) &&
laws.encode(Codec.int, Constraint.greaterOrEqual(m), n)
}
}
val orElse = {
object laws {
def decode[A](codec1: Codec[A, _], codec2: Codec[A, _], a: A): Boolean =
(codec1 || codec2).decode(a) == (codec1.decode(a) match {
case Right(b) => Right(Left(b))
case Left(_) => codec2.decode(a).right.map(c => Right(c))
})
def encode[A, B, C](codec1: Codec[A, B], codec2: Codec[A, C], bOrC: Either[B, C]): Boolean =
(codec1 || codec2).encode(bOrC) == (bOrC match {
case Left(b) => codec1.encode(b)
case Right(c) => codec2.encode(c)
})
}
forAll { (s: Option[Seq[String]]) => laws.decode(Codec.boolean, Codec.int, s) } && // FIXME Should use a more specific generator
forAll { (bOrN: Either[Boolean, Int]) => laws.encode(Codec.boolean, Codec.int, bOrN) }
}
val opt = {
object laws {
def decode[A](codec: Codec[Option[A], _], a: Option[A]): Boolean =
succeeds(codec.decode(a).right.toOption, codec.?.decode(a))
def encode[A, B](codec: Codec[Option[A], B], maybeB: Option[B]): Boolean =
codec.?.encode(maybeB) == (maybeB flatMap codec.encode)
}
forAll { (maybeN: Option[Int]) =>
laws.decode(Codec.int, maybeN.map(n => Seq(n.toString))) &&
laws.encode(Codec.int, maybeN)
}
}
val and = {
def laws[A](constraint1: Constraint[A], constraint2: Constraint[A], a: A): Boolean = {
val constraint = constraint1 && constraint2
val decode = constraint.decode(a) == ((constraint1.decode(a), constraint2.decode(a)) match {
case (Right(`a`), Right(`a`)) => Right(a)
case (Right(`a`), Left(es)) => Left(es)
case (Left(es), Right(`a`)) => Left(es)
case (Left(es1), Left(es2)) => Left(es1 ++ es2) // Errors are accumulated
})
val encode = constraint.encode(a) == constraint2.encode(a)
decode && encode
}
forAll { (x: Int, y: Int, z: Int) => laws(Constraint.greaterOrEqual(x), Constraint.greaterOrEqual(y), z) }
}
property("combinators") = kleisli && orElse && opt && and
def succeeds[A](a: A, result: Either[Seq[Throwable], A]): Boolean = result match {
case Right(ra) => a == ra
case Left(_) => false
}
def fails(expectedErrs: Seq[Throwable], result: Either[Seq[Throwable], _]): Boolean = result match {
case Right(_) => false
case Left(es) => expectedErrs.to[Set] == es.to[Set]
}
}
| julienrf/play-forms | play-forms/src/test/scala/julienrf/forms/codecs/CodecTest.scala | Scala | mit | 5,407 |
package com.monsanto.arch.cloudformation.model
import spray.json._
import DefaultJsonProtocol._
import scala.collection.immutable.ListMap
/**
* Created by Ryan Richt on 2/15/15
*/
case class Mapping[A](name: String, map: Map[String, Map[String, A]])(implicit val formatter: JsonFormat[A]){
type T = A
}
object Mapping extends DefaultJsonProtocol {
implicit object seqFormat extends JsonWriter[Seq[Mapping[_]]] {
implicit val format: JsonWriter[Mapping[_]] = new JsonWriter[Mapping[_]]{
def write(obj: Mapping[_]) = {
implicit val foo: JsonFormat[obj.T] = obj.formatter.asInstanceOf[JsonFormat[obj.T]]
val raw = obj.asInstanceOf[Mapping[obj.T]].map.toJson
JsObject(raw.asJsObject.fields - "name")
}
}
def write(objs: Seq[Mapping[_]]) = JsObject(ListMap(objs.map(o => o.name -> format.write(o)): _*))
}
}
| joewing/cloudformation-template-generator | src/main/scala/com/monsanto/arch/cloudformation/model/Mapping.scala | Scala | bsd-3-clause | 869 |
/*
* Copyright (C) 2009-2014 Typesafe Inc. <http://www.typesafe.com>
*/
package play.api.db.evolutions
import java.sql.{ Statement, Connection, SQLException }
import javax.inject.{ Inject, Provider, Singleton }
import javax.sql.DataSource
import scala.util.control.Exception.ignoring
import play.api.db.{ Database, DBApi }
import play.api.{ Configuration, Environment, Mode, Logger, PlayException }
import play.core.{ HandleWebCommandSupport, WebCommands }
/**
* Run evolutions on application startup. Automatically runs on construction.
*/
@Singleton
class ApplicationEvolutions @Inject() (
config: EvolutionsConfig,
reader: EvolutionsReader,
evolutions: EvolutionsApi,
dynamicEvolutions: DynamicEvolutions,
dbApi: DBApi,
environment: Environment,
webCommands: WebCommands) {
private val logger = Logger(classOf[ApplicationEvolutions])
/**
* Checks the evolutions state. Called on construction.
*/
def start(): Unit = {
webCommands.addHandler(new EvolutionsWebCommands(evolutions, reader, config))
// allow db modules to write evolution files
dynamicEvolutions.create()
dbApi.databases().foreach(runEvolutions)
}
private def runEvolutions(database: Database): Unit = {
val db = database.name
val dbConfig = config.forDatasource(db)
if (dbConfig.enabled) {
withLock(database, dbConfig) {
val scripts = evolutions.scripts(db, reader)
val hasDown = scripts.exists(_.isInstanceOf[DownScript])
val autocommit = dbConfig.autocommit
if (scripts.nonEmpty) {
import Evolutions.toHumanReadableScript
environment.mode match {
case Mode.Test => evolutions.evolve(db, scripts, autocommit)
case Mode.Dev if dbConfig.autoApply => evolutions.evolve(db, scripts, autocommit)
case Mode.Prod if !hasDown && dbConfig.autoApply => evolutions.evolve(db, scripts, autocommit)
case Mode.Prod if hasDown && dbConfig.autoApply && dbConfig.autoApplyDowns => evolutions.evolve(db, scripts, autocommit)
case Mode.Prod if hasDown =>
logger.warn(s"Your production database [$db] needs evolutions, including downs! \\n\\n${toHumanReadableScript(scripts)}")
logger.warn(s"Run with -Dplay.modules.evolutions.db.$db.autoApply=true and -Dplay.modules.evolutions.db.$db.autoApplyDowns=true if you want to run them automatically, including downs (be careful, especially if your down evolutions drop existing data)")
throw InvalidDatabaseRevision(db, toHumanReadableScript(scripts))
case Mode.Prod =>
logger.warn(s"Your production database [$db] needs evolutions! \\n\\n${toHumanReadableScript(scripts)}")
logger.warn(s"Run with -Dplay.modules.evolutions.db.$db.autoApply=true if you want to run them automatically (be careful)")
throw InvalidDatabaseRevision(db, toHumanReadableScript(scripts))
case _ => throw InvalidDatabaseRevision(db, toHumanReadableScript(scripts))
}
}
}
}
}
private def withLock(db: Database, dbConfig: EvolutionsDatasourceConfig)(block: => Unit): Unit = {
if (dbConfig.useLocks) {
val ds = db.dataSource
val url = db.url
val c = ds.getConnection
c.setAutoCommit(false)
val s = c.createStatement()
createLockTableIfNecessary(url, c, s)
lock(c, s)
try {
block
} finally {
unlock(c, s)
}
} else {
block
}
}
private def createLockTableIfNecessary(url: String, c: Connection, s: Statement): Unit = {
import ApplicationEvolutions._
try {
val r = s.executeQuery("select lock from play_evolutions_lock")
r.close()
} catch {
case e: SQLException =>
c.rollback()
val createScript = url match {
case OracleJdbcUrl() => CreatePlayEvolutionsLockOracleSql
case _ => CreatePlayEvolutionsLockSql
}
s.execute(createScript)
s.executeUpdate("insert into play_evolutions_lock (lock) values (1)")
}
}
private def lock(c: Connection, s: Statement, attempts: Int = 5): Unit = {
try {
s.executeQuery("select lock from play_evolutions_lock where lock = 1 for update nowait")
} catch {
case e: SQLException =>
if (attempts == 0) throw e
else {
logger.warn("Exception while attempting to lock evolutions (other node probably has lock), sleeping for 1 sec")
c.rollback()
Thread.sleep(1000)
lock(c, s, attempts - 1)
}
}
}
private def unlock(c: Connection, s: Statement): Unit = {
ignoring(classOf[SQLException])(s.close())
ignoring(classOf[SQLException])(c.commit())
ignoring(classOf[SQLException])(c.close())
}
start() // on construction
}
private object ApplicationEvolutions {
val OracleJdbcUrl = "^jdbc:oracle:.*".r
val CreatePlayEvolutionsLockSql =
"""
create table play_evolutions_lock (
lock int not null primary key
)
"""
val CreatePlayEvolutionsLockOracleSql =
"""
CREATE TABLE play_evolutions_lock (
lock Number(10,0) Not Null Enable,
CONSTRAINT play_evolutions_lock_pk PRIMARY KEY (lock)
)
"""
}
/**
* Evolutions configuration for a given datasource.
*/
trait EvolutionsDatasourceConfig {
def enabled: Boolean
def autocommit: Boolean
def useLocks: Boolean
def autoApply: Boolean
def autoApplyDowns: Boolean
}
/**
* Evolutions configuration for all datasources.
*/
trait EvolutionsConfig {
def forDatasource(db: String): EvolutionsDatasourceConfig
}
/**
* Default evolutions datasource configuration.
*/
case class DefaultEvolutionsDatasourceConfig(
enabled: Boolean,
autocommit: Boolean,
useLocks: Boolean,
autoApply: Boolean,
autoApplyDowns: Boolean) extends EvolutionsDatasourceConfig
/**
* Default evolutions configuration.
*/
class DefaultEvolutionsConfig(defaultDatasourceConfig: EvolutionsDatasourceConfig,
datasources: Map[String, EvolutionsDatasourceConfig]) extends EvolutionsConfig {
def forDatasource(db: String) = datasources.getOrElse(db, defaultDatasourceConfig)
}
/**
* A provider that creates an EvolutionsConfig from the play.api.Configuration.
*/
@Singleton
class DefaultEvolutionsConfigParser @Inject() (configuration: Configuration) extends Provider[EvolutionsConfig] {
private val logger = Logger(classOf[DefaultEvolutionsConfigParser])
def get = parse()
def parse(): EvolutionsConfig = {
val config = configuration.getConfig("play.modules.evolutions")
// Find all the defined datasources, both using the old format, and the new format
def datasourcesAt(c: Option[Configuration], key: String) = c.flatMap(_.getConfig(key)).fold(Set.empty[String])(_.subKeys)
val datasources = datasourcesAt(config, "db") ++
datasourcesAt(Some(configuration), "applyEvolutions") ++
datasourcesAt(Some(configuration), "applyDownEvolutions")
// Note: When removing the deprecated config options, make sure you move the defaults to reference.conf
def loadBoolean(key: String, oldKey: Option[String], default: Boolean): Boolean = {
config.flatMap(_.getBoolean(key))
.orElse(oldKey.flatMap(okey => configuration.getBoolean(okey).map { value =>
logger.warn(s"Configuration option $okey is deprecated, use play.modules.evolutions.$key instead")
value
}))
.getOrElse(default)
}
// Load defaults
val autocommit = loadBoolean("autocommit", Some("evolutions.autocommit"), true)
val useLocks = loadBoolean("useLocks", Some("evolutions.use.locks"), false)
val autoApply = loadBoolean("autoApply", None, false)
val autoApplyDowns = loadBoolean("autoApplyDowns", None, false)
val defaultConfig = new DefaultEvolutionsDatasourceConfig(true, autocommit, useLocks, autoApply,
autoApplyDowns)
// Load config specific to datasources
val datasourceConfig = datasources.map { datasource =>
datasource -> {
def loadDsBoolean(key: String, oldKey: Option[String], default: Boolean) = {
loadBoolean(s"db.$datasource.$key", oldKey.map(_ + "." + datasource), default)
}
val enabled = loadDsBoolean("enabled", None, true)
val autocommit = loadDsBoolean("autocommit", None, defaultConfig.autocommit)
val useLocks = loadDsBoolean("useLocks", None, defaultConfig.useLocks)
val autoApply = loadDsBoolean("autoApply", Some("applyEvolutions"), defaultConfig.autoApply)
val autoApplyDowns = loadDsBoolean("autoApplyDowns", Some("applyDownEvolutions"), defaultConfig.autoApplyDowns)
new DefaultEvolutionsDatasourceConfig(enabled, autocommit, useLocks, autoApply, autoApplyDowns)
}
}.toMap
new DefaultEvolutionsConfig(defaultConfig, datasourceConfig)
}
/**
* Convert configuration sections of key-boolean pairs to a set of enabled keys.
*/
def enabledKeys(configuration: Configuration, section: String): Set[String] = {
configuration.getConfig(section).fold(Set.empty[String]) { conf =>
conf.keys.filter(conf.getBoolean(_).getOrElse(false))
}
}
}
/**
* Default implementation for optional dynamic evolutions.
*/
@Singleton
class DynamicEvolutions {
def create(): Unit = ()
}
/**
* Web command handler for applying evolutions on application start.
*/
@Singleton
class EvolutionsWebCommands @Inject() (evolutions: EvolutionsApi, reader: EvolutionsReader, config: EvolutionsConfig) extends HandleWebCommandSupport {
def handleWebCommand(request: play.api.mvc.RequestHeader, buildLink: play.core.BuildLink, path: java.io.File): Option[play.api.mvc.Result] = {
val applyEvolutions = """/@evolutions/apply/([a-zA-Z0-9_]+)""".r
val resolveEvolutions = """/@evolutions/resolve/([a-zA-Z0-9_]+)/([0-9]+)""".r
lazy val redirectUrl = request.queryString.get("redirect").filterNot(_.isEmpty).map(_(0)).getOrElse("/")
request.path match {
case applyEvolutions(db) => {
Some {
val scripts = evolutions.scripts(db, reader)
evolutions.evolve(db, scripts, config.forDatasource(db).autocommit)
buildLink.forceReload()
play.api.mvc.Results.Redirect(redirectUrl)
}
}
case resolveEvolutions(db, rev) => {
Some {
evolutions.resolve(db, rev.toInt)
buildLink.forceReload()
play.api.mvc.Results.Redirect(redirectUrl)
}
}
case _ => None
}
}
}
/**
* Exception thrown when the database is not up to date.
*
* @param db the database name
* @param script the script to be run to resolve the conflict.
*/
case class InvalidDatabaseRevision(db: String, script: String) extends PlayException.RichDescription(
"Database '" + db + "' needs evolution!",
"An SQL script need to be run on your database.") {
def subTitle = "This SQL script must be run:"
def content = script
private val javascript = """
document.location = '/@evolutions/apply/%s?redirect=' + encodeURIComponent(location)
""".format(db).trim
def htmlDescription = {
<span>An SQL script will be run on your database -</span>
<input name="evolution-button" type="button" value="Apply this script now!" onclick={ javascript }/>
}.mkString
}
| jyotikamboj/container | pf-framework/src/play-jdbc/src/main/scala/play/api/db/evolutions/ApplicationEvolutions.scala | Scala | mit | 11,355 |
/**
* Copyright 2015 Thomson Reuters
*
* Licensed under the Apache License, Version 2.0 (the “License”); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an “AS IS” BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmwell.stats
import java.net.{DatagramSocket, DatagramPacket, InetAddress}
import java.util.Calendar
import java.text.SimpleDateFormat
import akka.actor.{Props, ActorSystem, Actor}
import akka.actor.Actor.Receive
/**
* Created with IntelliJ IDEA.
* User: Michael
* Date: 4/3/14
* Time: 12:26 PM
* To change this template use File | Settings | File Templates.
*/
case class Message(msg : String, host : String , port : Int)
class SenderActor extends Actor {
private val dsocket = new DatagramSocket()
sys addShutdownHook {
dsocket.close()
}
override def receive: Receive = {
case Message(msg, host, port) =>
val address = InetAddress.getByName(host)
val packet = new DatagramPacket(msg.getBytes(), msg.length, address, port)
dsocket.send(packet)
}
}
class StatsSender (path : String , host : String = "localhost", port : Int = 8125) {
object Sender {
val system = ActorSystem("mySystem")
val actor = system.actorOf(Props[SenderActor], "SenderActor")
def send(message: String) {
actor ! Message(message, host , port)
}
}
private def getCurrentTimeStr : String = {
val now = Calendar.getInstance().getTime()
val dateFormat = new SimpleDateFormat("ddMMyyyy_hhmm")
dateFormat.format(now)
}
private def getMachineName : String = {
java.net.InetAddress.getLocalHost().getHostName().split('.')(0)
}
private def getName(p : String , action : String) : String = {
p.replace("{MachineName}", getMachineName).replace("{DateTime}", getCurrentTimeStr) + "." + action.replace(".","-").replace(" ","_")
}
def sendCounts(action : String, num : Int) {
val message = getName(path, action) + ":" + num + "|c"
Sender.send(message)
}
def sendTimings(action : String, num : Int) {
val message = getName(path, action) + ":" + num + "|ms"
Sender.send(message)
}
def sendGauges(action : String, num : Int) {
val message = getName(path, action) + ":" + num + "|g"
Sender.send(message)
}
def sendSets(action : String) {
val message = getName(path, action) + "|s"
Sender.send(message)
}
}
| nruppin/CM-Well | server/cmwell-util/src/main/scala/cmwell/stats/StatsSender.scala | Scala | apache-2.0 | 2,755 |
package demo.components
import japgolly.scalajs.react.ReactComponentB
import japgolly.scalajs.react.vdom.prefix_<^._
object ReactTreeViewInfo {
val component = ReactComponentB[Unit]("ReactTreeViewInfo")
.render(P => {
InfoTemplate(componentFilePath = "treeviews/ReactTreeView.scala")(
<.h3("React TreeView :"),
<.p("Tree View Component with search feature")
)
}).buildU
def apply() = component()
}
| elacin/scalajs-react-components | demo/src/main/scala/demo/components/ReactTreeViewInfo.scala | Scala | apache-2.0 | 432 |
/*
* Copyright © 2015-2019 the contributors (see Contributors.md).
*
* This file is part of Knora.
*
* Knora is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Knora is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public
* License along with Knora. If not, see <http://www.gnu.org/licenses/>.
*/
package org.knora.webapi.responders.v1
import java.util.UUID
import akka.actor.{ActorRef, Props}
import akka.testkit.ImplicitSender
import com.typesafe.config.{Config, ConfigFactory}
import org.knora.webapi.SharedOntologyTestDataADM._
import org.knora.webapi.SharedTestDataADM._
import org.knora.webapi._
import org.knora.webapi.app.{APPLICATION_MANAGER_ACTOR_NAME, ApplicationActor}
import org.knora.webapi.messages.store.sipimessages.SipiConversionFileRequestV1
import org.knora.webapi.messages.store.triplestoremessages._
import org.knora.webapi.messages.v1.responder.resourcemessages.{LocationV1, ResourceFullGetRequestV1, ResourceFullResponseV1}
import org.knora.webapi.messages.v1.responder.valuemessages._
import org.knora.webapi.messages.v2.responder.standoffmessages._
import org.knora.webapi.util.IriConversions._
import org.knora.webapi.util.{MutableTestIri, StringFormatter}
import scala.concurrent.duration._
/**
* Static data for testing [[ValuesResponderV1]].
*/
object ValuesResponderV1Spec {
val config: Config = ConfigFactory.parseString(
"""
akka.loglevel = "DEBUG"
akka.stdout-loglevel = "DEBUG"
""".stripMargin)
private val incunabulaProjectIri = INCUNABULA_PROJECT_IRI
private val anythingProjectIri = ANYTHING_PROJECT_IRI
private val zeitglöckleinIri = "http://rdfh.ch/0803/c5058f3a"
private val miscResourceIri = "http://rdfh.ch/0803/miscResource"
private val aThingIri = "http://rdfh.ch/0001/a-thing"
private val incunabulaUser = SharedTestDataADM.incunabulaMemberUser
private val imagesUser = SharedTestDataADM.imagesUser01
private val anythingUser = SharedTestDataADM.anythingUser1
}
/**
* Tests [[ValuesResponderV1]].
*/
class ValuesResponderV1Spec extends CoreSpec(ValuesResponderV1Spec.config) with ImplicitSender {
implicit private val stringFormatter: StringFormatter = StringFormatter.getGeneralInstance
import ValuesResponderV1Spec._
/* we need to run our app with the mocked sipi actor */
override lazy val appActor: ActorRef = system.actorOf(Props(new ApplicationActor with ManagersWithMockedSipi).withDispatcher(KnoraDispatchers.KnoraActorDispatcher), name = APPLICATION_MANAGER_ACTOR_NAME)
override lazy val rdfDataObjects = List(
RdfDataObject(path = "_test_data/responders.v1.ValuesResponderV1Spec/incunabula-data.ttl", name = "http://www.knora.org/data/0803/incunabula"),
RdfDataObject(path = "_test_data/demo_data/images-demo-data.ttl", name = "http://www.knora.org/data/00FF/images"),
RdfDataObject(path = "_test_data/all_data/anything-data.ttl", name = "http://www.knora.org/data/0001/anything")
)
// The default timeout for receiving reply messages from actors.
private val timeout = 30.seconds
// IRIs that are generated by tests and used by subsequent tests.
private val commentIri = new MutableTestIri
private val firstValueIriWithResourceRef = new MutableTestIri
private val secondValueIriWithResourceRef = new MutableTestIri
private val standoffLinkValueIri = new MutableTestIri
private val currentSeqnumValueIri = new MutableTestIri
private val currentPubdateValueIri = new MutableTestIri
private val linkObjLinkValueIri = new MutableTestIri
private val currentColorValueIri = new MutableTestIri
private val currentGeomValueIri = new MutableTestIri
private val partOfLinkValueIri = new MutableTestIri
// a sample set of standoff tags
private val sampleStandoff: Vector[StandoffTagV2] = Vector(
StandoffTagV2(
standoffTagClassIri = OntologyConstants.Standoff.StandoffParagraphTag.toSmartIri,
startPosition = 0,
endPosition = 10,
uuid = UUID.randomUUID(),
originalXMLID = None,
startIndex = 0
),
StandoffTagV2(
standoffTagClassIri = OntologyConstants.Standoff.StandoffBoldTag.toSmartIri,
startPosition = 0,
endPosition = 7,
uuid = UUID.randomUUID(),
originalXMLID = None,
startIndex = 1
)
)
private val dummyMapping = MappingXMLtoStandoff(
namespace = Map.empty[String, Map[String, Map[String, XMLTag]]],
defaultXSLTransformation = None
)
private def checkComment1aResponse(response: CreateValueResponseV1, utf8str: String, standoff: Seq[StandoffTagV2] = Seq.empty[StandoffTagV2]): Unit = {
assert(response.rights == 8, "rights was not 8")
assert(response.value.asInstanceOf[TextValueV1].utf8str == utf8str, "comment value did not match")
if (standoff.nonEmpty) {
response.value match {
case textValueWithStandoff: TextValueWithStandoffV1 =>
assert(textValueWithStandoff.standoff.sortBy(standoffTag => (standoffTag.standoffTagClassIri.toString, standoffTag.startPosition)) == standoff.sortBy(standoffTag => (standoffTag.standoffTagClassIri.toString, standoffTag.startPosition)), "standoff did not match")
case _ => throw AssertionException("response should be of type TextValueWithStandoffV1")
}
}
commentIri.set(response.id)
}
private def checkValueGetResponse(response: ValueGetResponseV1): Unit = {
assert(response.rights == 8, "rights was not 8")
assert(response.value.asInstanceOf[TextValueV1].utf8str == "Comment 1a", "comment value did not match")
}
private def checkValueGetResponseWithStandoff(response: ValueGetResponseV1): Unit = {
assert(response.rights == 6, "rights was not 6")
assert(response.value.asInstanceOf[TextValueWithStandoffV1].utf8str == "Zusammengebunden mit zwei weiteren Drucken von Johann Amerbach", "comment utf8str value did not match")
// expected Standoff information for <http://rdfh.ch/0803/e41ab5695c/values/d3398239089e04> in incunabula-data.ttl
val standoff = Vector(
StandoffTagV2(
standoffTagClassIri = OntologyConstants.Standoff.StandoffRootTag.toSmartIri,
startPosition = 0,
endPosition = 62,
uuid = UUID.fromString("4800e53e-3835-498e-b658-6cc4f93ab894"),
originalXMLID = None,
startIndex = 0
), StandoffTagV2(
standoffTagClassIri = OntologyConstants.Standoff.StandoffBoldTag.toSmartIri,
startPosition = 21,
endPosition = 25,
uuid = UUID.fromString("4bc24696-5dde-4ced-9687-6f8e4519efe8"),
originalXMLID = None,
startIndex = 1,
startParentIndex = Some(0)
)
)
assert(response.value.asInstanceOf[TextValueWithStandoffV1].standoff.sortBy(_.standoffTagClassIri) == standoff.sortBy(_.standoffTagClassIri), "standoff did not match")
}
private def checkComment1bResponse(response: ChangeValueResponseV1, utf8str: String, standoff: Seq[StandoffTagV2] = Seq.empty[StandoffTagV2]): Unit = {
assert(response.rights == 8, "rights was not 8")
assert(response.value.asInstanceOf[TextValueV1].utf8str == utf8str, "comment value did not match")
if (standoff.nonEmpty) {
response.value match {
case textValueWithStandoff: TextValueWithStandoffV1 =>
assert(textValueWithStandoff.standoff.sortBy(standoffTag => (standoffTag.standoffTagClassIri.toString, standoffTag.startPosition)) == standoff.sortBy(standoffTag => (standoffTag.standoffTagClassIri.toString, standoffTag.startPosition)), "standoff did not match")
case _ => throw AssertionException("response should be of type TextValueWithStandoffV1")
}
}
commentIri.set(response.id)
}
private def checkOrderInResource(response: ResourceFullResponseV1): Unit = {
val comments = response.props.get.properties.filter(_.pid == "http://www.knora.org/ontology/0803/incunabula#book_comment").head
assert(comments.values == Vector(
TextValueSimpleV1(utf8str = "Comment 1b"),
TextValueSimpleV1("Comment 2")
), "Values of book_comment did not match")
}
private def checkTextValue(expected: TextValueV1, received: TextValueV1): Unit = {
assert(received.utf8str == expected.utf8str)
// if standoff is expected, compare the standoff tags
expected match {
case expectedWithStandoff: TextValueWithStandoffV1 =>
assert(received.asInstanceOf[TextValueWithStandoffV1].resource_reference == expectedWithStandoff.resource_reference)
assert(received.asInstanceOf[TextValueWithStandoffV1].standoff.map(_.standoffTagClassIri).sorted == expectedWithStandoff.standoff.map(_.standoffTagClassIri).sorted)
assert(received.asInstanceOf[TextValueWithStandoffV1].standoff.sortBy(standoffTag => (standoffTag.standoffTagClassIri.toString, standoffTag.startPosition)) == expectedWithStandoff.standoff.sortBy(standoffTag => (standoffTag.standoffTagClassIri.toString, standoffTag.startPosition)))
case _ =>
}
}
private def getLastModificationDate(resourceIri: IRI): Option[String] = {
val lastModSparqlQuery = queries.sparql.v1.txt.getLastModificationDate(
triplestore = settings.triplestoreType,
resourceIri = resourceIri
).toString()
storeManager ! SparqlSelectRequest(lastModSparqlQuery)
expectMsgPF(timeout) {
case response: SparqlSelectResponse =>
val rows = response.results.bindings
assert(rows.size <= 1, s"Resource $resourceIri has more than one instance of knora-base:lastModificationDate")
if (rows.size == 1) {
Some(rows.head.rowMap("lastModificationDate"))
} else {
None
}
}
}
private def checkImageFileValueChange(received: ChangeFileValueResponseV1, request: ChangeFileValueRequestV1): Unit = {
assert(received.locations.size == 1, "Expected one file value to have been changed")
received.locations.foreach {
location: LocationV1 => assert(location.origname == request.file.originalFilename, "wrong original file name")
}
}
"The values responder" should {
"add a new text value without Standoff" in {
val lastModBeforeUpdate = getLastModificationDate(zeitglöckleinIri)
val utf8str = "Comment 1a"
responderManager ! CreateValueRequestV1(
resourceIri = zeitglöckleinIri,
propertyIri = "http://www.knora.org/ontology/0803/incunabula#book_comment",
value = TextValueSimpleV1(utf8str = utf8str),
userProfile = incunabulaUser,
apiRequestID = UUID.randomUUID
)
expectMsgPF(timeout) {
case msg: CreateValueResponseV1 => checkComment1aResponse(msg, utf8str)
}
// Check that the resource's last modification date got updated.
val lastModAfterUpdate = getLastModificationDate(zeitglöckleinIri)
lastModBeforeUpdate != lastModAfterUpdate should ===(true)
}
"attempt to add a duplicate text value without standoff" in {
val utf8str = "Comment 1a"
responderManager ! CreateValueRequestV1(
resourceIri = zeitglöckleinIri,
propertyIri = "http://www.knora.org/ontology/0803/incunabula#book_comment",
value = TextValueSimpleV1(utf8str = utf8str),
userProfile = incunabulaUser,
apiRequestID = UUID.randomUUID
)
expectMsgPF(timeout) {
case msg: akka.actor.Status.Failure => msg.cause.isInstanceOf[DuplicateValueException] should ===(true)
}
}
"query a text value without Standoff" in {
responderManager ! ValueGetRequestV1(
valueIri = commentIri.get,
userProfile = incunabulaUser
)
expectMsgPF(timeout) {
case msg: ValueGetResponseV1 => checkValueGetResponse(msg)
}
}
"query a text value containing Standoff" in {
responderManager ! ValueGetRequestV1(
valueIri = "http://rdfh.ch/0803/e41ab5695c/values/d3398239089e04",
userProfile = incunabulaUser
)
expectMsgPF(timeout) {
case msg: ValueGetResponseV1 =>
checkValueGetResponseWithStandoff(msg)
}
}
"query a standoff link as an ordinary value" in {
responderManager ! ValueGetRequestV1(
valueIri = "http://rdfh.ch/0001/a-thing-with-text-values/values/0",
userProfile = anythingUser
)
expectMsgPF(timeout) {
case msg: ValueGetResponseV1 => msg.rights should ===(2)
}
}
"query a LinkValue" in {
responderManager ! LinkValueGetRequestV1(
subjectIri = "http://rdfh.ch/0803/8a0b1e75",
predicateIri = "http://www.knora.org/ontology/0803/incunabula#partOf",
objectIri = zeitglöckleinIri,
userProfile = incunabulaUser
)
expectMsgPF(timeout) {
case msg: ValueGetResponseV1 =>
msg.valuetype should ===(OntologyConstants.KnoraBase.LinkValue)
msg.value should ===(LinkValueV1(
subjectIri = "http://rdfh.ch/0803/8a0b1e75",
predicateIri = "http://www.knora.org/ontology/0803/incunabula#partOf",
objectIri = zeitglöckleinIri,
referenceCount = 1
))
msg.rights should ===(2)
}
}
"add a new version of a text value without Standoff" in {
val lastModBeforeUpdate = getLastModificationDate(zeitglöckleinIri)
val utf8str = "Comment 1b"
val oldIri = commentIri.get
responderManager ! ChangeValueRequestV1(
valueIri = commentIri.get,
value = TextValueSimpleV1(utf8str = utf8str),
userProfile = incunabulaUser,
apiRequestID = UUID.randomUUID
)
expectMsgPF(timeout) {
case msg: ChangeValueResponseV1 => checkComment1bResponse(msg, utf8str)
}
// Check that the resource's last modification date got updated.
val lastModAfterUpdate = getLastModificationDate(zeitglöckleinIri)
lastModBeforeUpdate != lastModAfterUpdate should ===(true)
// Check that the permissions and UUID were deleted from the previous version of the value.
val sparqlQuery =
s"""
|PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
|PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
|PREFIX xsd: <http://www.w3.org/2001/XMLSchema#>
|PREFIX knora-base: <http://www.knora.org/ontology/knora-base#>
|
|SELECT ?value ?uuid ?permissions WHERE {
| BIND(IRI("$oldIri") AS ?value)
|
| OPTIONAL {
| ?value knora-base:valueHasUUID ?uuid .
| }
|
| OPTIONAL {
| ?value knora-base:hasPermissions ?permissions .
| }
|}
""".stripMargin
storeManager ! SparqlSelectRequest(sparqlQuery)
expectMsgPF(timeout) {
case sparqlSelectResponse: SparqlSelectResponse =>
assert(sparqlSelectResponse.results.bindings.head.rowMap.keySet == Set("value"))
}
}
"not add a new version of a value that's exactly the same as the current version" in {
val utf8str = "Comment 1b"
responderManager ! ChangeValueRequestV1(
valueIri = commentIri.get,
value = TextValueSimpleV1(utf8str = utf8str),
userProfile = incunabulaUser,
apiRequestID = UUID.randomUUID
)
expectMsgPF(timeout) {
case msg: akka.actor.Status.Failure => msg.cause.isInstanceOf[DuplicateValueException] should ===(true)
}
}
"not create a new value that would duplicate an existing value" in {
val utf8str = "Comment 1b"
responderManager ! CreateValueRequestV1(
resourceIri = zeitglöckleinIri,
propertyIri = "http://www.knora.org/ontology/0803/incunabula#book_comment",
value = TextValueSimpleV1(utf8str = utf8str),
userProfile = incunabulaUser,
apiRequestID = UUID.randomUUID
)
expectMsgPF(timeout) {
case msg: akka.actor.Status.Failure => msg.cause.isInstanceOf[DuplicateValueException] should ===(true)
}
}
"not add a new version of a value that would duplicate an existing value" in {
val utf8str = "GW 4168"
responderManager ! ChangeValueRequestV1(
valueIri = "http://rdfh.ch/0803/c5058f3a/values/184e99ca01",
value = TextValueSimpleV1(utf8str = utf8str),
userProfile = incunabulaUser,
apiRequestID = UUID.randomUUID
)
expectMsgPF(timeout) {
case msg: akka.actor.Status.Failure => msg.cause.isInstanceOf[DuplicateValueException] should ===(true)
}
}
"insert valueHasOrder correctly for each value" in {
responderManager ! CreateValueRequestV1(
resourceIri = zeitglöckleinIri,
propertyIri = "http://www.knora.org/ontology/0803/incunabula#book_comment",
value = TextValueSimpleV1("Comment 2"),
userProfile = incunabulaUser,
apiRequestID = UUID.randomUUID
)
expectMsgPF(timeout) {
case msg: CreateValueResponseV1 => ()
}
responderManager ! ResourceFullGetRequestV1(
iri = zeitglöckleinIri,
userADM = incunabulaUser
)
expectMsgPF(timeout) {
case msg: ResourceFullResponseV1 => checkOrderInResource(msg)
}
}
"return the version history of a value" in {
responderManager ! ValueVersionHistoryGetRequestV1(
resourceIri = zeitglöckleinIri,
propertyIri = "http://www.knora.org/ontology/0803/incunabula#book_comment",
currentValueIri = commentIri.get,
userProfile = incunabulaUser
)
expectMsgPF(timeout) {
case msg: ValueVersionHistoryGetResponseV1 => msg.valueVersions.length should ===(2)
}
}
"mark a value as deleted" in {
val lastModBeforeUpdate = getLastModificationDate(zeitglöckleinIri)
responderManager ! DeleteValueRequestV1(
valueIri = commentIri.get,
userProfile = incunabulaUser,
apiRequestID = UUID.randomUUID
)
expectMsgPF(timeout) {
case msg: DeleteValueResponseV1 => commentIri.set(msg.id)
}
responderManager ! ValueGetRequestV1(
valueIri = commentIri.get,
userProfile = incunabulaUser
)
expectMsgPF(timeout) {
case msg: akka.actor.Status.Failure => msg.cause.isInstanceOf[NotFoundException] should ===(true)
}
// Check that the resource's last modification date got updated.
val lastModAfterUpdate = getLastModificationDate(zeitglöckleinIri)
lastModBeforeUpdate != lastModAfterUpdate should ===(true)
}
"not add a new value to a nonexistent resource" in {
responderManager ! CreateValueRequestV1(
resourceIri = "http://rdfh.ch/0803/nonexistent",
propertyIri = "http://www.knora.org/ontology/0803/incunabula#book_comment",
value = TextValueSimpleV1("Comment 1"),
userProfile = incunabulaUser,
apiRequestID = UUID.randomUUID
)
expectMsgPF(timeout) {
case msg: akka.actor.Status.Failure => msg.cause.isInstanceOf[NotFoundException] should ===(true)
}
}
"not add a new value to a deleted resource" in {
responderManager ! CreateValueRequestV1(
resourceIri = "http://rdfh.ch/0803/9935159f67",
propertyIri = "http://www.knora.org/ontology/0803/incunabula#book_comment",
value = TextValueSimpleV1("Comment 1"),
userProfile = incunabulaUser,
apiRequestID = UUID.randomUUID
)
expectMsgPF(timeout) {
case msg: akka.actor.Status.Failure => msg.cause.isInstanceOf[NotFoundException] should ===(true)
}
}
"not add a new version of a deleted value" in {
responderManager ! ChangeValueRequestV1(
valueIri = commentIri.get,
value = TextValueSimpleV1("Comment 1c"),
userProfile = incunabulaUser,
apiRequestID = UUID.randomUUID
)
expectMsgPF(timeout) {
case msg: akka.actor.Status.Failure => msg.cause.isInstanceOf[NotFoundException] should ===(true)
}
}
"not add a new value to a resource that the user doesn't have permission to modify" in {
responderManager ! CreateValueRequestV1(
resourceIri = "http://rdfh.ch/0803/e41ab5695c",
propertyIri = "http://www.knora.org/ontology/0803/incunabula#book_comment",
value = TextValueSimpleV1("Comment 1"),
userProfile = incunabulaUser,
apiRequestID = UUID.randomUUID
)
expectMsgPF(timeout) {
case msg: akka.actor.Status.Failure => msg.cause.isInstanceOf[ForbiddenException] should ===(true)
}
}
"not add a new value of the wrong type" in {
responderManager ! CreateValueRequestV1(
resourceIri = "http://rdfh.ch/0803/21abac2162",
propertyIri = "http://www.knora.org/ontology/0803/incunabula#pubdate",
value = TextValueSimpleV1("this is not a date"),
userProfile = incunabulaUser,
apiRequestID = UUID.randomUUID
)
expectMsgPF(timeout) {
case msg: akka.actor.Status.Failure => msg.cause.isInstanceOf[OntologyConstraintException] should ===(true)
}
}
"not add a new version to a value that the user doesn't have permission to modify" in {
responderManager ! ChangeValueRequestV1(
valueIri = "http://rdfh.ch/0803/c5058f3a/values/c3295339",
value = TextValueSimpleV1("Zeitglöcklein des Lebens und Leidens Christi modified"),
userProfile = incunabulaUser,
apiRequestID = UUID.randomUUID
)
expectMsgPF(timeout) {
case msg: akka.actor.Status.Failure => msg.cause.isInstanceOf[ForbiddenException] should ===(true)
}
}
"not add a new version of a value of the wrong type" in {
responderManager ! ChangeValueRequestV1(
valueIri = "http://rdfh.ch/0803/c5058f3a/values/cfd09f1e01",
value = TextValueSimpleV1("this is not a date"),
userProfile = incunabulaUser,
apiRequestID = UUID.randomUUID
)
expectMsgPF(timeout) {
case msg: akka.actor.Status.Failure =>
msg.cause.isInstanceOf[OntologyConstraintException] should ===(true)
}
}
"not add a new value that would violate a cardinality restriction" in {
// The cardinality of incunabula:partOf in incunabula:page is 1, and page http://rdfh.ch/0803/4f11adaf is already part of a book.
responderManager ! CreateValueRequestV1(
resourceIri = "http://rdfh.ch/0803/4f11adaf",
propertyIri = "http://www.knora.org/ontology/0803/incunabula#partOf",
value = LinkUpdateV1(targetResourceIri = "http://rdfh.ch/0803/e41ab5695c"),
userProfile = incunabulaUser,
apiRequestID = UUID.randomUUID
)
expectMsgPF(timeout) {
case msg: akka.actor.Status.Failure => msg.cause.isInstanceOf[OntologyConstraintException] should ===(true)
}
// The cardinality of incunabula:seqnum in incunabula:page is 0-1, and page http://rdfh.ch/0803/4f11adaf already has a seqnum.
responderManager ! CreateValueRequestV1(
resourceIri = "http://rdfh.ch/0803/4f11adaf",
propertyIri = "http://www.knora.org/ontology/0803/incunabula#seqnum",
value = IntegerValueV1(1),
userProfile = incunabulaUser,
apiRequestID = UUID.randomUUID
)
expectMsgPF(timeout) {
case msg: akka.actor.Status.Failure => msg.cause.isInstanceOf[OntologyConstraintException] should ===(true)
}
}
"create a color value" in {
val color = "#000000"
responderManager ! CreateValueRequestV1(
resourceIri = miscResourceIri,
propertyIri = "http://www.knora.org/ontology/0803/incunabula#miscHasColor",
value = ColorValueV1(color),
userProfile = incunabulaUser,
apiRequestID = UUID.randomUUID)
expectMsgPF(timeout) {
case msg: CreateValueResponseV1 =>
currentColorValueIri.set(msg.id)
msg.value should ===(ColorValueV1(color))
}
}
"change an existing color value" in {
val color = "#FFFFFF"
responderManager ! ChangeValueRequestV1(
value = ColorValueV1(color),
userProfile = incunabulaUser,
valueIri = currentColorValueIri.get,
apiRequestID = UUID.randomUUID
)
expectMsgPF(timeout) {
case msg: ChangeValueResponseV1 =>
currentColorValueIri.set(msg.id)
msg.value should ===(ColorValueV1(color))
}
}
"create a geometry value" in {
val geom = "{\\"status\\":\\"active\\",\\"lineColor\\":\\"#ff3333\\",\\"lineWidth\\":2,\\"points\\":[{\\"x\\":0.5516074450084602,\\"y\\":0.4444444444444444},{\\"x\\":0.2791878172588832,\\"y\\":0.5}],\\"type\\":\\"rectangle\\",\\"original_index\\":0}"
responderManager ! CreateValueRequestV1(
resourceIri = miscResourceIri,
propertyIri = "http://www.knora.org/ontology/0803/incunabula#miscHasGeometry",
value = GeomValueV1(geom),
userProfile = incunabulaUser,
apiRequestID = UUID.randomUUID)
expectMsgPF(timeout) {
case msg: CreateValueResponseV1 =>
currentGeomValueIri.set(msg.id)
msg.value should ===(GeomValueV1(geom))
}
}
"change a geometry value for a region" in {
val geom = "{\\"status\\":\\"active\\",\\"lineColor\\":\\"#ff4433\\",\\"lineWidth\\":1,\\"points\\":[{\\"x\\":0.5516074450084602,\\"y\\":0.4444444444444444},{\\"x\\":0.2791878172588832,\\"y\\":0.5}],\\"type\\":\\"rectangle\\",\\"original_index\\":0}"
responderManager ! ChangeValueRequestV1(
value = GeomValueV1(geom),
valueIri = currentGeomValueIri.get,
userProfile = incunabulaUser,
apiRequestID = UUID.randomUUID
)
expectMsgPF(timeout) {
case msg: ChangeValueResponseV1 =>
currentGeomValueIri.set(msg.id)
msg.value should ===(GeomValueV1(geom))
}
}
"add a new text value with Standoff" in {
val utf8str = "Comment 1aa"
responderManager ! CreateValueRequestV1(
resourceIri = zeitglöckleinIri,
propertyIri = "http://www.knora.org/ontology/0803/incunabula#book_comment",
value = TextValueWithStandoffV1(utf8str = utf8str, standoff = sampleStandoff, mapping = dummyMapping, mappingIri = "http://rdfh.ch/standoff/mappings/StandardMapping"),
userProfile = incunabulaUser,
apiRequestID = UUID.randomUUID
)
expectMsgPF(timeout) {
case msg: CreateValueResponseV1 => checkComment1aResponse(msg, utf8str, sampleStandoff)
}
}
"attempt to add a duplicate text value with standoff" in {
val utf8str = "Comment 1aa"
responderManager ! CreateValueRequestV1(
resourceIri = zeitglöckleinIri,
propertyIri = "http://www.knora.org/ontology/0803/incunabula#book_comment",
value = TextValueWithStandoffV1(utf8str = utf8str, standoff = sampleStandoff, mapping = dummyMapping, mappingIri = "http://rdfh.ch/standoff/mappings/StandardMapping"),
userProfile = incunabulaUser,
apiRequestID = UUID.randomUUID
)
expectMsgPF(timeout) {
case msg: akka.actor.Status.Failure => msg.cause.isInstanceOf[DuplicateValueException] should ===(true)
}
}
"add a new version of a text value with Standoff" in {
val utf8str = "Comment 1bb"
responderManager ! ChangeValueRequestV1(
valueIri = commentIri.get,
value = TextValueWithStandoffV1(utf8str = utf8str, standoff = sampleStandoff, mapping = dummyMapping, mappingIri = "http://rdfh.ch/standoff/mappings/StandardMapping"),
userProfile = incunabulaUser,
apiRequestID = UUID.randomUUID
)
expectMsgPF(timeout) {
case msg: ChangeValueResponseV1 => checkComment1bResponse(msg, utf8str, sampleStandoff)
}
}
"attempt to add a redundant version of a text value with standoff" in {
val utf8str = "Comment 1bb"
responderManager ! ChangeValueRequestV1(
valueIri = commentIri.get,
value = TextValueWithStandoffV1(utf8str = utf8str, standoff = sampleStandoff, mapping = dummyMapping, mappingIri = "http://rdfh.ch/standoff/mappings/StandardMapping"),
userProfile = incunabulaUser,
apiRequestID = UUID.randomUUID
)
expectMsgPF(timeout) {
case msg: akka.actor.Status.Failure => msg.cause.isInstanceOf[DuplicateValueException] should ===(true)
}
}
"add a new text value containing a Standoff resource reference, and create a hasStandoffLinkTo direct link and a corresponding LinkValue" in {
val textValueWithResourceRef = TextValueWithStandoffV1(
utf8str = "This comment refers to another resource",
standoff = Vector(
StandoffTagV2(
dataType = Some(StandoffDataTypeClasses.StandoffLinkTag),
standoffTagClassIri = OntologyConstants.KnoraBase.StandoffLinkTag.toSmartIri,
startPosition = 31,
endPosition = 39,
attributes = Vector(StandoffTagIriAttributeV2(standoffPropertyIri = OntologyConstants.KnoraBase.StandoffTagHasLink.toSmartIri, value = zeitglöckleinIri)),
uuid = UUID.randomUUID(),
originalXMLID = None,
startIndex = 0
)
),
resource_reference = Set(zeitglöckleinIri),
mappingIri = "http://rdfh.ch/standoff/mappings/StandardMapping",
mapping = dummyMapping
)
responderManager ! CreateValueRequestV1(
resourceIri = "http://rdfh.ch/0803/21abac2162",
propertyIri = "http://www.knora.org/ontology/0803/incunabula#book_comment",
value = textValueWithResourceRef,
userProfile = incunabulaUser,
apiRequestID = UUID.randomUUID
)
expectMsgPF(timeout) {
case CreateValueResponseV1(newValue: TextValueWithStandoffV1, _, newValueIri: IRI, _) =>
firstValueIriWithResourceRef.set(newValueIri)
checkTextValue(received = newValue, expected = textValueWithResourceRef)
}
responderManager ! LinkValueGetRequestV1(
subjectIri = "http://rdfh.ch/0803/21abac2162",
predicateIri = OntologyConstants.KnoraBase.HasStandoffLinkTo,
objectIri = zeitglöckleinIri,
userProfile = incunabulaUser
)
// Since this is the first Standoff resource reference between the source and target resources, we should
// now have version 1 of a LinkValue, with a reference count of 1.
expectMsgPF(timeout) {
case msg: ValueGetResponseV1 =>
msg.valuetype should ===(OntologyConstants.KnoraBase.LinkValue)
msg.value should ===(LinkValueV1(
subjectIri = "http://rdfh.ch/0803/21abac2162",
predicateIri = OntologyConstants.KnoraBase.HasStandoffLinkTo,
objectIri = zeitglöckleinIri,
referenceCount = 1
))
msg.rights should ===(2)
}
val sparqlQuery = queries.sparql.v1.txt.findLinkValueByObject(
triplestore = settings.triplestoreType,
subjectIri = "http://rdfh.ch/0803/21abac2162",
predicateIri = OntologyConstants.KnoraBase.HasStandoffLinkTo,
objectIri = zeitglöckleinIri
).toString()
storeManager ! SparqlSelectRequest(sparqlQuery)
// The new LinkValue should have no previous version, and there should be a direct link between the resources.
expectMsgPF(timeout) {
case response: SparqlSelectResponse =>
val rows = response.results.bindings
rows.groupBy(_.rowMap("linkValue")).size should ===(1)
rows.exists(_.rowMap("objPred") == OntologyConstants.KnoraBase.PreviousValue) should ===(false)
rows.head.rowMap.get("directLinkExists").exists(_.toBoolean) should ===(true)
}
}
"add a new version of a text value containing a Standoff resource reference, without needlessly making a new version of the LinkValue" in {
// The new version contains two references to the same resource.
val textValueWithResourceRef = TextValueWithStandoffV1(
utf8str = "This updated comment refers to another resource",
standoff = Vector(
StandoffTagV2(
dataType = Some(StandoffDataTypeClasses.StandoffLinkTag),
standoffTagClassIri = OntologyConstants.KnoraBase.StandoffLinkTag.toSmartIri,
startPosition = 0,
endPosition = 4,
attributes = Vector(StandoffTagIriAttributeV2(standoffPropertyIri = OntologyConstants.KnoraBase.StandoffTagHasLink.toSmartIri, value = zeitglöckleinIri)),
uuid = UUID.randomUUID(),
originalXMLID = None,
startIndex = 0
),
StandoffTagV2(
dataType = Some(StandoffDataTypeClasses.StandoffLinkTag),
standoffTagClassIri = OntologyConstants.KnoraBase.StandoffLinkTag.toSmartIri,
startPosition = 39,
endPosition = 47,
attributes = Vector(StandoffTagIriAttributeV2(standoffPropertyIri = OntologyConstants.KnoraBase.StandoffTagHasLink.toSmartIri, value = zeitglöckleinIri)),
uuid = UUID.randomUUID(),
originalXMLID = None,
startIndex = 1
)
),
resource_reference = Set(zeitglöckleinIri),
mapping = dummyMapping,
mappingIri = "http://rdfh.ch/standoff/mappings/StandardMapping"
)
responderManager ! ChangeValueRequestV1(
valueIri = firstValueIriWithResourceRef.get,
value = textValueWithResourceRef,
userProfile = incunabulaUser,
apiRequestID = UUID.randomUUID
)
expectMsgPF(timeout) {
case ChangeValueResponseV1(newValue: TextValueWithStandoffV1, _, newValueIri: IRI, _) =>
firstValueIriWithResourceRef.set(newValueIri)
checkTextValue(received = newValue, expected = textValueWithResourceRef)
}
responderManager ! LinkValueGetRequestV1(
subjectIri = "http://rdfh.ch/0803/21abac2162",
predicateIri = OntologyConstants.KnoraBase.HasStandoffLinkTo,
objectIri = zeitglöckleinIri,
userProfile = incunabulaUser
)
// Since the new version still refers to the same resource, the reference count of the LinkValue should not
// change.
expectMsgPF(timeout) {
case msg: ValueGetResponseV1 =>
msg.valuetype should ===(OntologyConstants.KnoraBase.LinkValue)
msg.value should ===(LinkValueV1(
subjectIri = "http://rdfh.ch/0803/21abac2162",
predicateIri = OntologyConstants.KnoraBase.HasStandoffLinkTo,
objectIri = zeitglöckleinIri,
referenceCount = 1
))
msg.rights should ===(2)
}
val sparqlQuery = queries.sparql.v1.txt.findLinkValueByObject(
triplestore = settings.triplestoreType,
subjectIri = "http://rdfh.ch/0803/21abac2162",
predicateIri = OntologyConstants.KnoraBase.HasStandoffLinkTo,
objectIri = zeitglöckleinIri
).toString()
storeManager ! SparqlSelectRequest(sparqlQuery)
// There should be no new version of the LinkValue, and the direct link should still be there.
expectMsgPF(timeout) {
case response: SparqlSelectResponse =>
val rows = response.results.bindings
rows.groupBy(_.rowMap("linkValue")).size should ===(1)
rows.exists(_.rowMap("objPred") == OntologyConstants.KnoraBase.PreviousValue) should ===(false)
rows.head.rowMap.get("directLinkExists").exists(_.toBoolean) should ===(true)
}
}
"add another new text value containing a Standoff resource reference, and make a new version of the LinkValue" in {
val textValueWithResourceRef = TextValueWithStandoffV1(
utf8str = "This remark refers to another resource",
standoff = Vector(
StandoffTagV2(
dataType = Some(StandoffDataTypeClasses.StandoffLinkTag),
standoffTagClassIri = OntologyConstants.KnoraBase.StandoffLinkTag.toSmartIri,
startPosition = 30,
endPosition = 38,
attributes = Vector(StandoffTagIriAttributeV2(standoffPropertyIri = OntologyConstants.KnoraBase.StandoffTagHasLink.toSmartIri, value = zeitglöckleinIri)),
uuid = UUID.randomUUID(),
originalXMLID = None,
startIndex = 0
)
),
resource_reference = Set(zeitglöckleinIri),
mapping = dummyMapping,
mappingIri = "http://rdfh.ch/standoff/mappings/StandardMapping"
)
responderManager ! CreateValueRequestV1(
resourceIri = "http://rdfh.ch/0803/21abac2162",
propertyIri = "http://www.knora.org/ontology/0803/incunabula#book_comment",
value = textValueWithResourceRef,
userProfile = incunabulaUser,
apiRequestID = UUID.randomUUID
)
expectMsgPF(timeout) {
case CreateValueResponseV1(newValue: TextValueWithStandoffV1, _, newValueIri: IRI, _) =>
secondValueIriWithResourceRef.set(newValueIri)
checkTextValue(received = newValue, expected = textValueWithResourceRef)
}
responderManager ! LinkValueGetRequestV1(
subjectIri = "http://rdfh.ch/0803/21abac2162",
predicateIri = OntologyConstants.KnoraBase.HasStandoffLinkTo,
objectIri = zeitglöckleinIri,
userProfile = incunabulaUser
)
// Now that we've added a different TextValue that refers to the same resource, we should have version 2
// of the LinkValue, with a reference count of 2.
expectMsgPF(timeout) {
case msg: ValueGetResponseV1 =>
msg.valuetype should ===(OntologyConstants.KnoraBase.LinkValue)
msg.value should ===(LinkValueV1(
subjectIri = "http://rdfh.ch/0803/21abac2162",
predicateIri = OntologyConstants.KnoraBase.HasStandoffLinkTo,
objectIri = zeitglöckleinIri,
referenceCount = 2
))
msg.rights should ===(2)
}
val sparqlQuery = queries.sparql.v1.txt.findLinkValueByObject(
triplestore = settings.triplestoreType,
subjectIri = "http://rdfh.ch/0803/21abac2162",
predicateIri = OntologyConstants.KnoraBase.HasStandoffLinkTo,
objectIri = zeitglöckleinIri
).toString()
storeManager ! SparqlSelectRequest(sparqlQuery)
// It should have a previousValue pointing to the previous version, and the direct link should
// still be there.
expectMsgPF(timeout) {
case response: SparqlSelectResponse =>
val rows = response.results.bindings
rows.groupBy(_.rowMap("linkValue")).size should ===(1)
rows.exists(_.rowMap("objPred") == OntologyConstants.KnoraBase.PreviousValue) should ===(true)
rows.head.rowMap.get("directLinkExists").exists(_.toBoolean) should ===(true)
}
}
"add a new version of a text value with the Standoff resource reference removed, and make a new version of the LinkValue" in {
val textValue = TextValueSimpleV1(utf8str = "No resource reference here")
responderManager ! ChangeValueRequestV1(
valueIri = firstValueIriWithResourceRef.get,
value = textValue,
userProfile = incunabulaUser,
apiRequestID = UUID.randomUUID
)
expectMsgPF(timeout) {
case ChangeValueResponseV1(newValue: TextValueSimpleV1, _, newValueIri: IRI, _) =>
firstValueIriWithResourceRef.set(newValueIri)
checkTextValue(received = textValue, expected = newValue)
}
responderManager ! LinkValueGetRequestV1(
subjectIri = "http://rdfh.ch/0803/21abac2162",
predicateIri = OntologyConstants.KnoraBase.HasStandoffLinkTo,
objectIri = zeitglöckleinIri,
userProfile = incunabulaUser
)
// Version 3 of the LinkValue should have a reference count of 1.
expectMsgPF(timeout) {
case msg: ValueGetResponseV1 =>
msg.valuetype should ===(OntologyConstants.KnoraBase.LinkValue)
msg.value should ===(LinkValueV1(
subjectIri = "http://rdfh.ch/0803/21abac2162",
predicateIri = OntologyConstants.KnoraBase.HasStandoffLinkTo,
objectIri = zeitglöckleinIri,
referenceCount = 1
))
msg.rights should ===(2)
}
val sparqlQuery = queries.sparql.v1.txt.findLinkValueByObject(
triplestore = settings.triplestoreType,
subjectIri = "http://rdfh.ch/0803/21abac2162",
predicateIri = OntologyConstants.KnoraBase.HasStandoffLinkTo,
objectIri = zeitglöckleinIri
).toString()
storeManager ! SparqlSelectRequest(sparqlQuery)
// The LinkValue should point to its previous version, and the direct link should still be there.
expectMsgPF(timeout) {
case response: SparqlSelectResponse =>
standoffLinkValueIri.set(response.results.bindings.head.rowMap("linkValue"))
val rows = response.results.bindings
rows.groupBy(_.rowMap("linkValue")).size should ===(1)
rows.exists(_.rowMap("objPred") == OntologyConstants.KnoraBase.PreviousValue) should ===(true)
rows.head.rowMap.get("directLinkExists").exists(_.toBoolean) should ===(true)
}
// The LinkValue should have 3 versions in its version history.
responderManager ! ValueVersionHistoryGetRequestV1(
resourceIri = "http://rdfh.ch/0803/21abac2162",
propertyIri = OntologyConstants.KnoraBase.HasStandoffLinkToValue,
currentValueIri = standoffLinkValueIri.get,
userProfile = incunabulaUser
)
expectMsgPF(timeout) {
case msg: ValueVersionHistoryGetResponseV1 => msg.valueVersions.length should ===(3)
}
}
"delete a hasStandoffLinkTo direct link when the reference count of the corresponding LinkValue reaches 0" in {
val textValue = TextValueSimpleV1(utf8str = "No resource reference here either")
responderManager ! ChangeValueRequestV1(
valueIri = secondValueIriWithResourceRef.get,
value = textValue,
userProfile = incunabulaUser,
apiRequestID = UUID.randomUUID
)
expectMsgPF(timeout) {
case ChangeValueResponseV1(newValue: TextValueSimpleV1, _, newValueIri: IRI, _) =>
secondValueIriWithResourceRef.set(newValueIri)
checkTextValue(received = newValue, expected = textValue)
}
// The new version of the LinkValue should be marked as deleted.
responderManager ! LinkValueGetRequestV1(
subjectIri = "http://rdfh.ch/0803/21abac2162",
predicateIri = OntologyConstants.KnoraBase.HasStandoffLinkTo,
objectIri = zeitglöckleinIri,
userProfile = incunabulaUser
)
expectMsgPF(timeout) {
case msg: akka.actor.Status.Failure => msg.cause.isInstanceOf[NotFoundException] should ===(true)
}
val sparqlQuery = queries.sparql.v1.txt.findLinkValueByObject(
triplestore = settings.triplestoreType,
subjectIri = "http://rdfh.ch/0803/21abac2162",
predicateIri = OntologyConstants.KnoraBase.HasStandoffLinkTo,
objectIri = zeitglöckleinIri,
includeDeleted = true
).toString()
storeManager ! SparqlSelectRequest(sparqlQuery)
// The LinkValue should point to its previous version. There should be no direct link.
expectMsgPF(timeout) {
case response: SparqlSelectResponse =>
standoffLinkValueIri.unset()
val rows = response.results.bindings
rows.groupBy(_.rowMap("linkValue")).size should ===(1)
rows.exists(row => row.rowMap("objPred") == OntologyConstants.KnoraBase.IsDeleted && row.rowMap("objObj").toBoolean) should ===(true)
rows.exists(_.rowMap("objPred") == OntologyConstants.KnoraBase.PreviousValue) should ===(true)
rows.head.rowMap.get("directLinkExists").exists(_.toBoolean) should ===(false)
}
}
"recreate the hasStandoffLinkTo direct link when a new standoff resource reference is added" in {
val textValueWithResourceRef = TextValueWithStandoffV1(
utf8str = "This updated comment refers again to another resource",
standoff = Vector(
StandoffTagV2(
dataType = Some(StandoffDataTypeClasses.StandoffLinkTag),
standoffTagClassIri = OntologyConstants.KnoraBase.StandoffLinkTag.toSmartIri,
startPosition = 45,
endPosition = 53,
attributes = Vector(StandoffTagIriAttributeV2(standoffPropertyIri = OntologyConstants.KnoraBase.StandoffTagHasLink.toSmartIri, value = zeitglöckleinIri)),
uuid = UUID.randomUUID(),
originalXMLID = None,
startIndex = 0
)
),
resource_reference = Set(zeitglöckleinIri),
mapping = dummyMapping,
mappingIri = "http://rdfh.ch/standoff/mappings/StandardMapping"
)
responderManager ! ChangeValueRequestV1(
valueIri = firstValueIriWithResourceRef.get,
value = textValueWithResourceRef,
userProfile = incunabulaUser,
apiRequestID = UUID.randomUUID
)
expectMsgPF(timeout) {
case ChangeValueResponseV1(newValue: TextValueWithStandoffV1, _, newValueIri: IRI, _) =>
firstValueIriWithResourceRef.set(newValueIri)
checkTextValue(received = newValue, expected = textValueWithResourceRef)
}
responderManager ! LinkValueGetRequestV1(
subjectIri = "http://rdfh.ch/0803/21abac2162",
predicateIri = OntologyConstants.KnoraBase.HasStandoffLinkTo,
objectIri = zeitglöckleinIri,
userProfile = incunabulaUser
)
// There should now be a new LinkValue with no previous versions and a reference count of 1, and
// there should once again be a direct link.
expectMsgPF(timeout) {
case msg: ValueGetResponseV1 =>
msg.valuetype should ===(OntologyConstants.KnoraBase.LinkValue)
msg.value should ===(LinkValueV1(
subjectIri = "http://rdfh.ch/0803/21abac2162",
predicateIri = OntologyConstants.KnoraBase.HasStandoffLinkTo,
objectIri = zeitglöckleinIri,
referenceCount = 1
))
msg.rights should ===(2)
}
val sparqlQuery = queries.sparql.v1.txt.findLinkValueByObject(
triplestore = settings.triplestoreType,
subjectIri = "http://rdfh.ch/0803/21abac2162",
predicateIri = OntologyConstants.KnoraBase.HasStandoffLinkTo,
objectIri = zeitglöckleinIri
).toString()
storeManager ! SparqlSelectRequest(sparqlQuery)
expectMsgPF(timeout) {
case response: SparqlSelectResponse =>
val rows = response.results.bindings
rows.groupBy(_.rowMap("linkValue")).size should ===(1)
rows.exists(_.rowMap("objPred") == OntologyConstants.KnoraBase.PreviousValue) should ===(false)
rows.head.rowMap.get("directLinkExists").exists(_.toBoolean) should ===(true)
}
}
"add a new Integer value (seqnum of a page)" in {
val seqnum = 4
responderManager ! CreateValueRequestV1(
resourceIri = "http://rdfh.ch/0803/8a0b1e75",
propertyIri = "http://www.knora.org/ontology/0803/incunabula#seqnum",
value = IntegerValueV1(seqnum),
userProfile = incunabulaUser,
apiRequestID = UUID.randomUUID
)
expectMsgPF(timeout) {
case CreateValueResponseV1(newValue: IntegerValueV1, _, newValueIri: IRI, _) =>
currentSeqnumValueIri.set(newValueIri)
newValue should ===(IntegerValueV1(seqnum))
}
}
"change an existing Integer value (seqnum of a page)" in {
val seqnum = 8
responderManager ! ChangeValueRequestV1(
value = IntegerValueV1(seqnum),
userProfile = incunabulaUser,
valueIri = currentSeqnumValueIri.get,
apiRequestID = UUID.randomUUID
)
expectMsgPF(timeout) {
case ChangeValueResponseV1(newValue: IntegerValueV1, _, newValueIri: IRI, _) =>
newValue should ===(IntegerValueV1(seqnum))
}
}
"add a new Date value (pubdate of a book)" in {
// great resource to verify that expected conversion result from and to JDC is correct:
// https://www.fourmilab.ch/documents/calendar/
responderManager ! CreateValueRequestV1(
resourceIri = "http://rdfh.ch/0803/21abac2162",
propertyIri = "http://www.knora.org/ontology/0803/incunabula#pubdate",
value = JulianDayNumberValueV1(
dateval1 = 2451545,
dateval2 = 2457044,
dateprecision1 = KnoraPrecisionV1.YEAR,
dateprecision2 = KnoraPrecisionV1.DAY,
calendar = KnoraCalendarV1.GREGORIAN
),
userProfile = incunabulaUser,
apiRequestID = UUID.randomUUID
)
expectMsgPF(timeout) {
case msg: CreateValueResponseV1 =>
currentPubdateValueIri.set(msg.id)
msg.value should ===(DateValueV1("2000", "2015-01-21", "CE", "CE", KnoraCalendarV1.GREGORIAN))
}
}
"change an existing date (pubdate of a book)" in {
responderManager ! ChangeValueRequestV1(
value = JulianDayNumberValueV1(
dateval1 = 2265854,
dateval2 = 2265854,
dateprecision1 = KnoraPrecisionV1.DAY,
dateprecision2 = KnoraPrecisionV1.DAY,
calendar = KnoraCalendarV1.JULIAN
),
userProfile = incunabulaUser,
valueIri = currentPubdateValueIri.get,
apiRequestID = UUID.randomUUID
)
expectMsgPF(timeout) {
case msg: ChangeValueResponseV1 =>
currentPubdateValueIri.set(msg.id)
msg.value should ===(DateValueV1("1491-07-28", "1491-07-28", "CE", "CE", KnoraCalendarV1.JULIAN))
}
}
"create a link between two resources" in {
val resourceIri = "http://rdfh.ch/0803/cb1a74e3e2f6"
val lastModBeforeUpdate = getLastModificationDate(resourceIri)
val createValueRequest = CreateValueRequestV1(
resourceIri = resourceIri,
propertyIri = OntologyConstants.KnoraBase.HasLinkTo,
value = LinkUpdateV1(
targetResourceIri = zeitglöckleinIri
),
userProfile = incunabulaUser,
apiRequestID = UUID.randomUUID
)
responderManager ! createValueRequest
expectMsgPF(timeout) {
case CreateValueResponseV1(linkV1: LinkV1, _, newLinkValueIri: IRI, _) =>
linkObjLinkValueIri.set(newLinkValueIri)
linkV1.targetResourceIri should ===(zeitglöckleinIri)
linkV1.valueResourceClass should ===(Some("http://www.knora.org/ontology/0803/incunabula#book"))
}
// The new LinkValue should have no previous version, and there should be a direct link between the resources.
val sparqlQuery = queries.sparql.v1.txt.findLinkValueByObject(
triplestore = settings.triplestoreType,
subjectIri = resourceIri,
predicateIri = OntologyConstants.KnoraBase.HasLinkTo,
objectIri = zeitglöckleinIri
).toString()
storeManager ! SparqlSelectRequest(sparqlQuery)
expectMsgPF(timeout) {
case response: SparqlSelectResponse =>
val rows = response.results.bindings
rows.groupBy(_.rowMap("linkValue")).size should ===(1)
rows.exists(_.rowMap("objPred") == OntologyConstants.KnoraBase.PreviousValue) should ===(false)
rows.head.rowMap.get("directLinkExists").exists(_.toBoolean) should ===(true)
}
// Check that the resource's last modification date got updated.
val lastModAfterUpdate = getLastModificationDate(resourceIri)
lastModBeforeUpdate != lastModAfterUpdate should ===(true)
}
"not create a duplicate link" in {
val createValueRequest = CreateValueRequestV1(
resourceIri = "http://rdfh.ch/0803/cb1a74e3e2f6",
propertyIri = OntologyConstants.KnoraBase.HasLinkTo,
value = LinkUpdateV1(
targetResourceIri = zeitglöckleinIri
),
userProfile = incunabulaUser,
apiRequestID = UUID.randomUUID
)
responderManager ! createValueRequest
expectMsgPF(timeout) {
case msg: akka.actor.Status.Failure => msg.cause.isInstanceOf[DuplicateValueException] should ===(true)
}
}
"not create a link that points to a resource of the wrong class" in {
responderManager ! CreateValueRequestV1(
resourceIri = miscResourceIri,
propertyIri = "http://www.knora.org/ontology/0803/incunabula#miscHasBook", // can only point to an incunabula:book
value = LinkUpdateV1(
targetResourceIri = "http://rdfh.ch/0803/8a0b1e75" // an incunabula:page, not an incunabula:book
),
userProfile = incunabulaUser,
apiRequestID = UUID.randomUUID
)
expectMsgPF(timeout) {
case msg: akka.actor.Status.Failure => msg.cause.isInstanceOf[OntologyConstraintException] should ===(true)
}
}
"change a link" in {
val linkSourceIri = "http://rdfh.ch/0803/cb1a74e3e2f6"
val linkTargetIri = "http://rdfh.ch/0803/21abac2162"
val lastModBeforeUpdate = getLastModificationDate(linkSourceIri)
val changeValueRequest = ChangeValueRequestV1(
value = LinkUpdateV1(
targetResourceIri = linkTargetIri
),
userProfile = incunabulaUser,
valueIri = linkObjLinkValueIri.get,
apiRequestID = UUID.randomUUID
)
responderManager ! changeValueRequest
expectMsgPF(timeout) {
case ChangeValueResponseV1(linkValue: LinkV1, _, newLinkValueIri: IRI, _) =>
linkObjLinkValueIri.set(newLinkValueIri)
linkValue.targetResourceIri should ===(linkTargetIri)
}
// The old LinkValue should be deleted now, and the old direct link should have been removed.
val oldLinkValueSparqlQuery = queries.sparql.v1.txt.findLinkValueByObject(
triplestore = settings.triplestoreType,
subjectIri = linkSourceIri,
predicateIri = OntologyConstants.KnoraBase.HasLinkTo,
objectIri = zeitglöckleinIri,
includeDeleted = true
).toString()
storeManager ! SparqlSelectRequest(oldLinkValueSparqlQuery)
expectMsgPF(timeout) {
case response: SparqlSelectResponse =>
val rows = response.results.bindings
rows.groupBy(_.rowMap("linkValue")).size should ===(1)
rows.exists(row => row.rowMap("objPred") == OntologyConstants.KnoraBase.IsDeleted && row.rowMap("objObj").toBoolean) should ===(true)
rows.exists(_.rowMap("objPred") == OntologyConstants.KnoraBase.PreviousValue) should ===(true)
rows.head.rowMap.get("directLinkExists").exists(_.toBoolean) should ===(false)
}
// The new LinkValue should have no previous version, and there should be a direct link between the resources.
val newLinkValueSparqlQuery = queries.sparql.v1.txt.findLinkValueByObject(
triplestore = settings.triplestoreType,
subjectIri = linkSourceIri,
predicateIri = OntologyConstants.KnoraBase.HasLinkTo,
objectIri = linkTargetIri
).toString()
storeManager ! SparqlSelectRequest(newLinkValueSparqlQuery)
expectMsgPF(timeout) {
case response: SparqlSelectResponse =>
val rows = response.results.bindings
rows.groupBy(_.rowMap("linkValue")).size should ===(1)
rows.exists(_.rowMap("objPred") == OntologyConstants.KnoraBase.PreviousValue) should ===(false)
rows.head.rowMap.get("directLinkExists").exists(_.toBoolean) should ===(true)
}
// Check that the link source's last modification date got updated.
val lastModAfterUpdate = getLastModificationDate(linkSourceIri)
lastModBeforeUpdate != lastModAfterUpdate should ===(true)
}
"delete a link between two resources" in {
val linkSourceIri = "http://rdfh.ch/0803/cb1a74e3e2f6"
val linkTargetIri = "http://rdfh.ch/0803/21abac2162"
val lastModBeforeUpdate = getLastModificationDate(linkSourceIri)
val comment = "This link is no longer needed"
responderManager ! DeleteValueRequestV1(
valueIri = linkObjLinkValueIri.get,
deleteComment = Some(comment),
userProfile = incunabulaUser,
apiRequestID = UUID.randomUUID
)
expectMsgPF(timeout) {
case msg: DeleteValueResponseV1 => linkObjLinkValueIri.set(msg.id)
}
val deletedLinkValueSparqlQuery = queries.sparql.v1.txt.findLinkValueByObject(
triplestore = settings.triplestoreType,
subjectIri = linkSourceIri,
predicateIri = OntologyConstants.KnoraBase.HasLinkTo,
objectIri = linkTargetIri,
includeDeleted = true
).toString()
storeManager ! SparqlSelectRequest(deletedLinkValueSparqlQuery)
expectMsgPF(timeout) {
case response: SparqlSelectResponse =>
val rows = response.results.bindings
rows.groupBy(_.rowMap("linkValue")).size should ===(1)
rows.exists(row => row.rowMap("objPred") == OntologyConstants.KnoraBase.IsDeleted && row.rowMap("objObj").toBoolean) should ===(true)
rows.exists(_.rowMap("objPred") == OntologyConstants.KnoraBase.PreviousValue) should ===(true)
rows.head.rowMap.get("directLinkExists").exists(_.toBoolean) should ===(false)
rows.exists(row => row.rowMap("objPred") == OntologyConstants.KnoraBase.DeleteComment && row.rowMap("objObj") == comment) should ===(true)
}
// Check that the link source's last modification date got updated.
val lastModAfterUpdate = getLastModificationDate(linkSourceIri)
lastModBeforeUpdate != lastModAfterUpdate should ===(true)
}
"change the partOf property of a page" in {
// A test UserProfile.
val userProfile = SharedTestDataADM.incunabulaCreatorUser
val linkTargetIri = "http://rdfh.ch/0803/e41ab5695c"
partOfLinkValueIri.set("http://rdfh.ch/0803/8a0b1e75/values/3a7b5130-22c2-4400-a794-062b7a3e3436")
val changeValueRequest = ChangeValueRequestV1(
value = LinkUpdateV1(
targetResourceIri = linkTargetIri
),
userProfile = userProfile,
valueIri = partOfLinkValueIri.get,
apiRequestID = UUID.randomUUID
)
responderManager ! changeValueRequest
expectMsgPF(timeout) {
case ChangeValueResponseV1(linkValue: LinkV1, _, newLinkValueIri: IRI, _) =>
// save valueIri for next test
partOfLinkValueIri.set(newLinkValueIri)
linkValue.targetResourceIri should ===(linkTargetIri)
}
}
"try to change the partOf property of a page, but submit the current target Iri" in {
// A test UserADM.
val userProfile = SharedTestDataADM.incunabulaProjectAdminUser
val linkTargetIri = "http://rdfh.ch/0803/e41ab5695c"
val changeValueRequest = ChangeValueRequestV1(
value = LinkUpdateV1(
targetResourceIri = linkTargetIri
),
userProfile = userProfile,
valueIri = partOfLinkValueIri.get, // use valueIri from previous test
apiRequestID = UUID.randomUUID
)
responderManager ! changeValueRequest
expectMsgPF(timeout) {
case msg: akka.actor.Status.Failure => msg.cause.isInstanceOf[DuplicateValueException] should ===(true)
}
}
"add a new text value with a comment" in {
val comment = "This is a comment"
val metaComment = "This is a metacomment"
responderManager ! CreateValueRequestV1(
resourceIri = zeitglöckleinIri,
propertyIri = "http://www.knora.org/ontology/0803/incunabula#book_comment",
value = TextValueSimpleV1(utf8str = comment),
comment = Some(metaComment),
userProfile = incunabulaUser,
apiRequestID = UUID.randomUUID
)
expectMsgPF(timeout) {
case msg: CreateValueResponseV1 =>
msg.value.toString should ===(comment)
msg.comment should ===(Some(metaComment))
}
}
"add a comment to a value" in {
val lastModBeforeUpdate = getLastModificationDate(zeitglöckleinIri)
val comment = Some("This is wrong. I am the author!")
val changeCommentRequest = ChangeCommentRequestV1(
valueIri = "http://rdfh.ch/0803/c5058f3a/values/8653a672",
comment = comment,
userProfile = incunabulaUser,
apiRequestID = UUID.randomUUID
)
responderManager ! changeCommentRequest
expectMsgPF(timeout) {
case msg: ChangeValueResponseV1 =>
msg.value should ===(TextValueSimpleV1(utf8str = "Berthold, der Bruder"))
msg.comment should ===(comment)
}
// Check that the resource's last modification date got updated.
val lastModAfterUpdate = getLastModificationDate(zeitglöckleinIri)
lastModBeforeUpdate != lastModAfterUpdate should ===(true)
}
"add a new image file value to an incunabula:page" in {
val fileRequest = SipiConversionFileRequestV1(
originalFilename = "Chlaus.jpg",
originalMimeType = "image/jpeg",
projectShortcode = "0803",
filename = "./test_server/images/Chlaus.jpg",
userProfile = incunabulaUser.asUserProfileV1
)
val fileChangeRequest = ChangeFileValueRequestV1(
resourceIri = "http://rdfh.ch/0803/8a0b1e75",
file = fileRequest,
apiRequestID = UUID.randomUUID,
userProfile = incunabulaUser)
responderManager ! fileChangeRequest
expectMsgPF(timeout) {
case msg: ChangeFileValueResponseV1 => checkImageFileValueChange(msg, fileChangeRequest)
}
}
"change the season of a image:bild from summer to winter" in {
val winter = "http://rdfh.ch/lists/00FF/eda2792605"
responderManager ! ChangeValueRequestV1(
value = HierarchicalListValueV1(winter),
userProfile = imagesUser,
valueIri = "http://rdfh.ch/00FF/d208fb9357d5/values/bc90a9c5091004",
apiRequestID = UUID.randomUUID
)
expectMsgPF(timeout) {
case ChangeValueResponseV1(newListValue: HierarchicalListValueV1, _, _, _) =>
newListValue should ===(HierarchicalListValueV1(winter))
}
}
"create a season of a image:bild" in {
val summer = "http://rdfh.ch/lists/00FF/526f26ed04"
responderManager ! CreateValueRequestV1(
value = HierarchicalListValueV1(summer),
userProfile = imagesUser,
propertyIri = s"$IMAGES_ONTOLOGY_IRI#jahreszeit",
resourceIri = "http://rdfh.ch/00FF/691e7e2244d5",
apiRequestID = UUID.randomUUID
)
expectMsgPF(timeout) {
case CreateValueResponseV1(newListValue: HierarchicalListValueV1, _, _, _) =>
newListValue should ===(HierarchicalListValueV1(summer))
}
}
"add a decimal value to an anything:Thing" in {
val decimalValue = DecimalValueV1(BigDecimal("5.6"))
responderManager ! CreateValueRequestV1(
value = decimalValue,
userProfile = anythingUser,
propertyIri = "http://www.knora.org/ontology/0001/anything#hasDecimal",
resourceIri = aThingIri,
apiRequestID = UUID.randomUUID
)
expectMsgPF(timeout) {
case CreateValueResponseV1(newDecimalValue: DecimalValueV1, _, _, _) =>
newDecimalValue should ===(decimalValue)
}
}
"add an interval value to an anything:Thing" in {
val intervalValue = IntervalValueV1(timeval1 = BigDecimal("1000000000000000.0000000000000001"), timeval2 = BigDecimal("1000000000000000.0000000000000002"))
responderManager ! CreateValueRequestV1(
value = intervalValue,
userProfile = anythingUser,
propertyIri = "http://www.knora.org/ontology/0001/anything#hasInterval",
resourceIri = aThingIri,
apiRequestID = UUID.randomUUID
)
expectMsgPF(timeout) {
case CreateValueResponseV1(newIntervalValue: IntervalValueV1, _, _, _) =>
newIntervalValue should ===(intervalValue)
}
}
"add a color value to an anything:Thing" in {
val colorValue = ColorValueV1("#4169E1")
responderManager ! CreateValueRequestV1(
value = colorValue,
userProfile = anythingUser,
propertyIri = "http://www.knora.org/ontology/0001/anything#hasColor",
resourceIri = aThingIri,
apiRequestID = UUID.randomUUID
)
expectMsgPF(timeout) {
case CreateValueResponseV1(newColorValue: ColorValueV1, _, _, _) =>
newColorValue should ===(colorValue)
}
}
// TODO: commented out because of compaibility issues with the GUI
/*"add a geometry value to an anything:Thing" in {
val geomValue = GeomValueV1("{\\"status\\":\\"active\\",\\"lineColor\\":\\"#ff3333\\",\\"lineWidth\\":2,\\"points\\":[{\\"x\\":0.5516074450084602,\\"y\\":0.4444444444444444},{\\"x\\":0.2791878172588832,\\"y\\":0.5}],\\"type\\":\\"rectangle\\",\\"original_index\\":0}")
actorUnderTest ! CreateValueRequestV1(
value = geomValue,
userProfile = anythingUser,
propertyIri = "http://www.knora.org/ontology/0001/anything#hasGeometry",
resourceIri = aThingIri,
projectIri = anythingProjectIri,
apiRequestID = UUID.randomUUID
)
expectMsgPF(timeout) {
case CreateValueResponseV1(newGeomValue: GeomValueV1, _ , _, _, _) =>
newGeomValue should ===(geomValue)
}
}
"add a geoname value to an anything:Thing" in {
val geonameValue = GeonameValueV1("2661602")
actorUnderTest ! CreateValueRequestV1(
value = geonameValue,
userProfile = anythingUser,
propertyIri = "http://www.knora.org/ontology/0001/anything#hasGeoname",
resourceIri = aThingIri,
projectIri = anythingProjectIri,
apiRequestID = UUID.randomUUID
)
expectMsgPF(timeout) {
case CreateValueResponseV1(newGeonameValue: GeonameValueV1, _ , _, _, _) =>
newGeonameValue should ===(geonameValue)
}
}*/
"add a boolean value to an anything:Thing" in {
val booleanValue = BooleanValueV1(true)
responderManager ! CreateValueRequestV1(
value = booleanValue,
userProfile = anythingUser,
propertyIri = "http://www.knora.org/ontology/0001/anything#hasBoolean",
resourceIri = aThingIri,
apiRequestID = UUID.randomUUID
)
expectMsgPF(timeout) {
case CreateValueResponseV1(newBooleanValue: BooleanValueV1, _, _, _) =>
newBooleanValue should ===(booleanValue)
}
}
"add a URI value to an anything:Thing" in {
val uriValue = UriValueV1("http://dhlab.unibas.ch")
responderManager ! CreateValueRequestV1(
value = uriValue,
userProfile = anythingUser,
propertyIri = "http://www.knora.org/ontology/0001/anything#hasUri",
resourceIri = aThingIri,
apiRequestID = UUID.randomUUID
)
expectMsgPF(timeout) {
case CreateValueResponseV1(newUriValue: UriValueV1, _, _, _) =>
newUriValue should ===(uriValue)
}
}
"delete two text values containing the same standoff resource reference" in {
val thingWithTextValues = "http://rdfh.ch/0001/a-thing-with-text-values"
val firstTextValue = "http://rdfh.ch/0001/a-thing-with-text-values/values/1"
val secondTextValue = "http://rdfh.ch/0001/a-thing-with-text-values/values/2"
val lastModBeforeFirstDelete = getLastModificationDate(thingWithTextValues)
// Check that the link value has an initial reference count of 2.
responderManager ! LinkValueGetRequestV1(
subjectIri = thingWithTextValues,
predicateIri = OntologyConstants.KnoraBase.HasStandoffLinkTo,
objectIri = aThingIri,
userProfile = anythingUser
)
expectMsgPF(timeout) {
case msg: ValueGetResponseV1 =>
msg.valuetype should ===(OntologyConstants.KnoraBase.LinkValue)
msg.value should ===(LinkValueV1(
subjectIri = thingWithTextValues,
predicateIri = OntologyConstants.KnoraBase.HasStandoffLinkTo,
objectIri = aThingIri,
referenceCount = 2
))
msg.rights should ===(2)
}
val initialLinkValueSparqlQuery = queries.sparql.v1.txt.findLinkValueByObject(
triplestore = settings.triplestoreType,
subjectIri = thingWithTextValues,
predicateIri = OntologyConstants.KnoraBase.HasStandoffLinkTo,
objectIri = aThingIri
).toString()
storeManager ! SparqlSelectRequest(initialLinkValueSparqlQuery)
// It should have no previousValue, and the direct link should exist.
expectMsgPF(timeout) {
case response: SparqlSelectResponse =>
val rows = response.results.bindings
rows.groupBy(_.rowMap("linkValue")).size should ===(1)
rows.exists(_.rowMap("objPred") == OntologyConstants.KnoraBase.PreviousValue) should ===(false)
rows.head.rowMap.get("directLinkExists").exists(_.toBoolean) should ===(true)
}
// Now delete the first text value.
responderManager ! DeleteValueRequestV1(
valueIri = firstTextValue,
userProfile = anythingUser,
apiRequestID = UUID.randomUUID
)
val deletedFirstTextValue = expectMsgPF(timeout) {
case msg: DeleteValueResponseV1 => msg.id
}
responderManager ! ValueGetRequestV1(
valueIri = deletedFirstTextValue,
userProfile = anythingUser
)
expectMsgPF(timeout) {
case msg: akka.actor.Status.Failure => msg.cause.isInstanceOf[NotFoundException] should ===(true)
}
// Check that the resource's last modification date got updated.
val lastModAfterFirstDelete = getLastModificationDate(thingWithTextValues)
lastModBeforeFirstDelete != lastModAfterFirstDelete should ===(true)
// The link value should now have a reference count of 1.
responderManager ! LinkValueGetRequestV1(
subjectIri = thingWithTextValues,
predicateIri = OntologyConstants.KnoraBase.HasStandoffLinkTo,
objectIri = aThingIri,
userProfile = anythingUser
)
expectMsgPF(timeout) {
case msg: ValueGetResponseV1 =>
msg.valuetype should ===(OntologyConstants.KnoraBase.LinkValue)
msg.value should ===(LinkValueV1(
subjectIri = thingWithTextValues,
predicateIri = OntologyConstants.KnoraBase.HasStandoffLinkTo,
objectIri = aThingIri,
referenceCount = 1
))
msg.rights should ===(2)
}
// It should have a previousValue, and the direct link should still exist.
val decrementedLinkValueSparqlQuery = queries.sparql.v1.txt.findLinkValueByObject(
triplestore = settings.triplestoreType,
subjectIri = thingWithTextValues,
predicateIri = OntologyConstants.KnoraBase.HasStandoffLinkTo,
objectIri = aThingIri
).toString()
storeManager ! SparqlSelectRequest(decrementedLinkValueSparqlQuery)
expectMsgPF(timeout) {
case response: SparqlSelectResponse =>
val rows = response.results.bindings
rows.groupBy(_.rowMap("linkValue")).size should ===(1)
rows.exists(_.rowMap("objPred") == OntologyConstants.KnoraBase.PreviousValue) should ===(true)
rows.head.rowMap.get("directLinkExists").exists(_.toBoolean) should ===(true)
}
// Now delete the second text value.
responderManager ! DeleteValueRequestV1(
valueIri = secondTextValue,
userProfile = anythingUser,
apiRequestID = UUID.randomUUID
)
val deletedSecondTextValue = expectMsgPF(timeout) {
case msg: DeleteValueResponseV1 => msg.id
}
responderManager ! ValueGetRequestV1(
valueIri = deletedSecondTextValue,
userProfile = anythingUser
)
expectMsgPF(timeout) {
case msg: akka.actor.Status.Failure => msg.cause.isInstanceOf[NotFoundException] should ===(true)
}
// Check that the resource's last modification date got updated.
val lastModAfterSecondDelete = getLastModificationDate(thingWithTextValues)
lastModBeforeFirstDelete != lastModAfterSecondDelete should ===(true)
// The new version of the LinkValue should be marked as deleted.
responderManager ! LinkValueGetRequestV1(
subjectIri = thingWithTextValues,
predicateIri = OntologyConstants.KnoraBase.HasStandoffLinkTo,
objectIri = aThingIri,
userProfile = anythingUser
)
expectMsgPF(timeout) {
case msg: akka.actor.Status.Failure => msg.cause.isInstanceOf[NotFoundException] should ===(true)
}
// The LinkValue should point to its previous version. There should be no direct link.
val deletedLinkValueSparqlQuery = queries.sparql.v1.txt.findLinkValueByObject(
triplestore = settings.triplestoreType,
subjectIri = thingWithTextValues,
predicateIri = OntologyConstants.KnoraBase.HasStandoffLinkTo,
objectIri = aThingIri,
includeDeleted = true
).toString()
storeManager ! SparqlSelectRequest(deletedLinkValueSparqlQuery)
expectMsgPF(timeout) {
case response: SparqlSelectResponse =>
standoffLinkValueIri.unset()
val rows = response.results.bindings
rows.groupBy(_.rowMap("linkValue")).size should ===(1)
rows.exists(row => row.rowMap("objPred") == OntologyConstants.KnoraBase.IsDeleted && row.rowMap("objObj").toBoolean) should ===(true)
rows.exists(_.rowMap("objPred") == OntologyConstants.KnoraBase.PreviousValue) should ===(true)
rows.head.rowMap.get("directLinkExists").exists(_.toBoolean) should ===(false)
}
}
"not add a text value containing a standoff reference to a nonexistent resource" in {
val nonexistentIri = "http://rdfh.ch/0803/nonexistent"
val textValueWithResourceRef = TextValueWithStandoffV1(
utf8str = "This comment refers to another resource",
standoff = Vector(
StandoffTagV2(
standoffTagClassIri = OntologyConstants.KnoraBase.StandoffLinkTag.toSmartIri,
dataType = Some(StandoffDataTypeClasses.StandoffLinkTag),
startPosition = 31,
endPosition = 39,
startIndex = 0,
attributes = Vector(StandoffTagIriAttributeV2(standoffPropertyIri = OntologyConstants.KnoraBase.StandoffTagHasLink.toSmartIri, value = nonexistentIri)),
uuid = UUID.randomUUID(),
originalXMLID = None
)
),
resource_reference = Set(nonexistentIri),
mapping = ResourcesResponderV1SpecFullData.dummyMapping,
mappingIri = "http://rdfh.ch/standoff/mappings/StandardMapping"
)
responderManager ! CreateValueRequestV1(
resourceIri = zeitglöckleinIri,
propertyIri = "http://www.knora.org/ontology/0803/incunabula#book_comment",
value = textValueWithResourceRef,
userProfile = incunabulaUser,
apiRequestID = UUID.randomUUID
)
expectMsgPF(timeout) {
case msg: akka.actor.Status.Failure =>
msg.cause.isInstanceOf[NotFoundException] should ===(true)
}
}
"add a new text value with language" in {
responderManager ! CreateValueRequestV1(
value = TextValueSimpleV1(utf8str = "Hello World!", language = Some("en")),
userProfile = anythingUser,
propertyIri = "http://www.knora.org/ontology/0001/anything#hasText",
resourceIri = "http://rdfh.ch/0001/a-thing-with-text-valuesLanguage",
apiRequestID = UUID.randomUUID
)
expectMsgPF(timeout) {
case msg: CreateValueResponseV1 =>
msg.value should ===(TextValueSimpleV1(utf8str = "Hello World!", language = Some("en")))
}
}
}
}
| musicEnfanthen/Knora | webapi/src/test/scala/org/knora/webapi/responders/v1/ValuesResponderV1Spec.scala | Scala | agpl-3.0 | 87,723 |
/*
* Copyright 2016 Codnos Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.codnos.dbgp.internal.commands
import com.codnos.dbgp.api.StatusChangeHandler
import com.codnos.dbgp.internal.arguments.ArgumentConfiguration.Builder._
import com.codnos.dbgp.internal.arguments.ArgumentFormat._
import com.codnos.dbgp.internal.commands.run.{RunCommand, RunCommandHandler}
import com.codnos.dbgp.internal.impl.StatusChangeHandlerFactory
import org.mockito.Matchers._
import org.mockito.Mockito._
class RunSpec extends CommandSpec {
val argumentConfiguration = configuration.withCommand("run", numeric("i")).build
"Command" should "have message constructed from the parameters" in {
val command = new RunCommand("456")
command should have(
'name ("run"),
'message ("run -i 456"),
'handlerKey ("status:456")
)
}
"CommandHandler" should "register status change handler and run" in {
val handler = new RunCommandHandler(engine, new StatusChangeHandlerFactory, argumentConfiguration)
handler.channelRead(ctx, "run -i 456")
verify(engine).registerStatusChangeHandler(any(classOf[StatusChangeHandler]))
verify(engine).run()
}
}
| Codnos/dbgp-interfaces | src/test/scala/com/codnos/dbgp/internal/commands/RunSpec.scala | Scala | apache-2.0 | 1,703 |
object Test {
util.lazily[Functor[Rec]]
}
| martijnhoekstra/scala | test/files/pos/byname-implicits-31/byname-implicits-29/Main_2.scala | Scala | apache-2.0 | 44 |
package scalacookbook.chapter06
/**
* Created by liguodong on 2016/7/5.
*/
object Main extends App{
type Throwable = java.lang.Throwable
type Exception = java.lang.Exception
type Error = java.lang.Error
type Seq[+A] = scala.collection.Seq[A]
val Seq = scala.collection.Seq
case class Person(name:String)
val siblings = List(Person("Kim"), Person("Julia"), Person("Kenny"))
println(siblings)
}
| liguodongIOT/java-scala-mix-sbt | src/main/scala/scalacookbook/chapter06/Main.scala | Scala | apache-2.0 | 417 |
/*
* Copyright (C) 2016 University of Basel, Graphics and Vision Research Group
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package scalismo.ui.view.util
import java.awt.Color
import scala.swing.event.ValueChanged
import scala.swing.{Label, Slider}
class FancySlider extends Slider {
// intended to be overwritten in subclasses if needed
def formattedValue(sliderValue: Int): String = sliderValue.toString
val minLabel: Label = new Label {
foreground = Color.GRAY
}
val maxLabel: Label = new Label {
foreground = Color.GRAY
}
val valueLabel: Label = new Label {
foreground = Color.BLACK
}
protected def updateLabels(): Unit = {
if (paintLabels) {
val texts = Seq(min, max, value) map formattedValue
val tuples = texts.zip(Seq(minLabel, maxLabel, valueLabel))
val needsUpdate = tuples.exists { case (v, l) => l.text != v }
if (needsUpdate) {
tuples.foreach { case (v, l) => l.text = v }
labels = Seq((min, minLabel), (max, maxLabel), (value, valueLabel)).filter(_._2.visible).toMap
}
}
}
paintLabels = true
reactions += {
case ValueChanged(c) if c eq this => updateLabels()
}
}
| unibas-gravis/scalismo-ui | src/main/scala/scalismo/ui/view/util/FancySlider.scala | Scala | gpl-3.0 | 1,794 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.parquet
import org.apache.hadoop.fs.Path
import org.scalatest.BeforeAndAfterAll
import org.apache.spark.sql.types._
import org.apache.spark.sql.{SQLConf, QueryTest}
import org.apache.spark.sql.catalyst.expressions.Row
import org.apache.spark.sql.test.TestSQLContext
import org.apache.spark.sql.test.TestSQLContext._
/**
* A test suite that tests various Parquet queries.
*/
class ParquetQuerySuiteBase extends QueryTest with ParquetTest {
val sqlContext = TestSQLContext
test("simple select queries") {
withParquetTable((0 until 10).map(i => (i, i.toString)), "t") {
checkAnswer(sql("SELECT _1 FROM t where t._1 > 5"), (6 until 10).map(Row.apply(_)))
checkAnswer(sql("SELECT _1 FROM t as tmp where tmp._1 < 5"), (0 until 5).map(Row.apply(_)))
}
}
test("appending") {
val data = (0 until 10).map(i => (i, i.toString))
createDataFrame(data).toDF("c1", "c2").registerTempTable("tmp")
withParquetTable(data, "t") {
sql("INSERT INTO TABLE t SELECT * FROM tmp")
checkAnswer(table("t"), (data ++ data).map(Row.fromTuple))
}
catalog.unregisterTable(Seq("tmp"))
}
test("overwriting") {
val data = (0 until 10).map(i => (i, i.toString))
createDataFrame(data).toDF("c1", "c2").registerTempTable("tmp")
withParquetTable(data, "t") {
sql("INSERT OVERWRITE TABLE t SELECT * FROM tmp")
checkAnswer(table("t"), data.map(Row.fromTuple))
}
catalog.unregisterTable(Seq("tmp"))
}
test("self-join") {
// 4 rows, cells of column 1 of row 2 and row 4 are null
val data = (1 to 4).map { i =>
val maybeInt = if (i % 2 == 0) None else Some(i)
(maybeInt, i.toString)
}
withParquetTable(data, "t") {
val selfJoin = sql("SELECT * FROM t x JOIN t y WHERE x._1 = y._1")
val queryOutput = selfJoin.queryExecution.analyzed.output
assertResult(4, "Field count mismatches")(queryOutput.size)
assertResult(2, "Duplicated expression ID in query plan:\\n $selfJoin") {
queryOutput.filter(_.name == "_1").map(_.exprId).size
}
checkAnswer(selfJoin, List(Row(1, "1", 1, "1"), Row(3, "3", 3, "3")))
}
}
test("nested data - struct with array field") {
val data = (1 to 10).map(i => Tuple1((i, Seq("val_$i"))))
withParquetTable(data, "t") {
checkAnswer(sql("SELECT _1._2[0] FROM t"), data.map {
case Tuple1((_, Seq(string))) => Row(string)
})
}
}
test("nested data - array of struct") {
val data = (1 to 10).map(i => Tuple1(Seq(i -> "val_$i")))
withParquetTable(data, "t") {
checkAnswer(sql("SELECT _1[0]._2 FROM t"), data.map {
case Tuple1(Seq((_, string))) => Row(string)
})
}
}
test("SPARK-1913 regression: columns only referenced by pushed down filters should remain") {
withParquetTable((1 to 10).map(Tuple1.apply), "t") {
checkAnswer(sql("SELECT _1 FROM t WHERE _1 < 10"), (1 to 9).map(Row.apply(_)))
}
}
test("SPARK-5309 strings stored using dictionary compression in parquet") {
withParquetTable((0 until 1000).map(i => ("same", "run_" + i /100, 1)), "t") {
checkAnswer(sql("SELECT _1, _2, SUM(_3) FROM t GROUP BY _1, _2"),
(0 until 10).map(i => Row("same", "run_" + i, 100)))
checkAnswer(sql("SELECT _1, _2, SUM(_3) FROM t WHERE _2 = 'run_5' GROUP BY _1, _2"),
List(Row("same", "run_5", 100)))
}
}
test("SPARK-6917 DecimalType should work with non-native types") {
val data = (1 to 10).map(i => Row(Decimal(i, 18, 0), new java.sql.Timestamp(i)))
val schema = StructType(List(StructField("d", DecimalType(18, 0), false),
StructField("time", TimestampType, false)).toArray)
withTempPath { file =>
val df = sqlContext.createDataFrame(sparkContext.parallelize(data), schema)
df.write.parquet(file.getCanonicalPath)
val df2 = sqlContext.read.parquet(file.getCanonicalPath)
checkAnswer(df2, df.collect().toSeq)
}
}
test("SPARK-8990 DataFrameReader.parquet() should respect user specified options") {
withTempPath { dir =>
val basePath = dir.getCanonicalPath
sqlContext.range(0, 10).toDF("a").write.parquet(new Path(basePath, "foo=1").toString)
sqlContext.range(0, 10).toDF("b").write.parquet(new Path(basePath, "foo=a").toString)
assertResult(2) {
// Disables schema merging via data source option
sqlContext.read.option("mergeSchema", "false").parquet(basePath).columns.length
}
assertResult(3) {
// Enables schema merging via data source option
sqlContext.read.option("mergeSchema", "true").parquet(basePath).columns.length
}
}
}
}
class ParquetDataSourceOnQuerySuite extends ParquetQuerySuiteBase with BeforeAndAfterAll {
val originalConf = sqlContext.conf.parquetUseDataSourceApi
override protected def beforeAll(): Unit = {
sqlContext.conf.setConf(SQLConf.PARQUET_USE_DATA_SOURCE_API, "true")
}
override protected def afterAll(): Unit = {
sqlContext.setConf(SQLConf.PARQUET_USE_DATA_SOURCE_API, originalConf.toString)
}
}
class ParquetDataSourceOffQuerySuite extends ParquetQuerySuiteBase with BeforeAndAfterAll {
val originalConf = sqlContext.conf.parquetUseDataSourceApi
override protected def beforeAll(): Unit = {
sqlContext.conf.setConf(SQLConf.PARQUET_USE_DATA_SOURCE_API, "false")
}
override protected def afterAll(): Unit = {
sqlContext.setConf(SQLConf.PARQUET_USE_DATA_SOURCE_API, originalConf.toString)
}
}
| andrewor14/iolap | sql/core/src/test/scala/org/apache/spark/sql/parquet/ParquetQuerySuite.scala | Scala | apache-2.0 | 6,319 |
/*
* Copyright 2014–2020 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.qscript.rewrites
import slamdata.Predef.{Map => _, _}
import quasar.RenderTreeT
import quasar.contrib.iota._
import quasar.fp.PrismNT
import quasar.qscript._
import iotaz.CopK
import matryoshka.{Hole => _, _}
import matryoshka.implicits._
import scalaz.{~>, Functor}
import scalaz.syntax.functor._
class NormalizeQScriptFreeMap[T[_[_]]: BirecursiveT: EqualT: ShowT: RenderTreeT]
extends Normalize[T] {
val QSC = CopK.Inject[QScriptCore[T, ?], QSNorm]
val EJ = CopK.Inject[EquiJoin[T, ?], QSNorm]
def norm[F[_], G[_]: Functor](
FToNorm: F ~> OptNorm,
NormToG: QSNorm ~> G,
prismGF: PrismNT[G, F])
: F[T[G]] => G[T[G]] =
ftg => FToNorm(ftg) match {
case Some(QSC(value)) => value match {
case Map(src, fm) =>
NormToG(QSC.inj(Map(src, recNorm(fm))))
case LeftShift(src, struct, status, tpe, undef, repair) =>
NormToG(QSC.inj(LeftShift(
src,
recNorm(struct),
status,
tpe,
undef,
MapFuncCore.normalized(repair))))
case Reduce(src, bucket, reducers, repair) =>
NormToG(QSC.inj(Reduce(
src,
bucket.map(MapFuncCore.normalized(_)),
reducers.map(_.map(MapFuncCore.normalized(_))),
MapFuncCore.normalized(repair))))
case Sort(src, bucket, order) =>
NormToG(QSC.inj(Sort(
src,
bucket.map(MapFuncCore.normalized(_)),
order map { case (fm, dir) => (MapFuncCore.normalized(fm), dir) })))
case Union(src, lBranch, rBranch) =>
NormToG(QSC.inj(Union(
src,
branchNorm(lBranch),
branchNorm(rBranch))))
case Filter(src, fm) =>
NormToG(QSC.inj(Filter(src, recNorm(fm))))
case Subset(src, from, op, count) =>
NormToG(QSC.inj(Subset(
src,
branchNorm(from),
op,
branchNorm(count))))
case Unreferenced() =>
prismGF(ftg)
}
case Some(EJ(EquiJoin(src, lBranch, rBranch, key, tpe, combine))) =>
NormToG(EJ.inj(EquiJoin(
src,
branchNorm(lBranch),
branchNorm(rBranch),
key map { case (l, r) => (MapFuncCore.normalized(l), MapFuncCore.normalized(r)) },
tpe,
MapFuncCore.normalized(combine))))
case _ => prismGF(ftg)
}
private def recNorm(fm: RecFreeMap[T]): RecFreeMap[T] =
RecFreeS.fromFree(MapFuncCore.normalized(fm.linearize))
}
object NormalizeQScriptFreeMap {
def apply[T[_[_]]: BirecursiveT: EqualT: ShowT: RenderTreeT](
qs: T[QScriptNormalized[T, ?]])
: T[QScriptNormalized[T, ?]] = {
val N = new NormalizeQScriptFreeMap[T]
qs.transCata[T[QScriptNormalized[T, ?]]](N.normQS)
}
}
| slamdata/quasar | qscript/src/main/scala/quasar/qscript/rewrites/NormalizeQScriptFreeMap.scala | Scala | apache-2.0 | 3,437 |
package almanac.api
import almanac.model.{Criteria, Metric, MetricsQuery}
import scala.concurrent.Future
trait AlmanacService {
def createSpace(space: String): Unit
def retrieve(query: MetricsQuery): Future[Seq[Metric]]
def stream(query: MetricsQuery, batch: Int): Stream[Seq[Metric]] = ???
def buckets(criteria: Criteria): Future[Seq[String]]
def buckets(criteria: Criteria, pattern: String): Future[Seq[String]]
def distinctValues(fact: String, bucket: String, geohash: String = ""): Future[Seq[String]]
def record(metrics: Metric*): Unit
}
| adcade/almanac-oss | src/main/scala/almanac/api/AlmanacService.scala | Scala | mit | 566 |
package com.morenware.tvcrawler.config
/**
* Created by david on 20/04/2016.
*/
class CrawlerConfig(
var name: String,
var siteId: String,
var baseUrl: String,
var language: String,
var sections: List[WebsiteSection]) {
}
| dfernandezm/tv-crawler-scala | src/main/scala/com/morenware/tvcrawler/config/CrawlerConfig.scala | Scala | mit | 331 |
package ch.autoecole.api
import akka.actor._
import akka.io._
import java.net.InetSocketAddress
import spray.can._
import spray.routing._
import spray.http._
object Main extends App {
implicit val system = ActorSystem()
val handler = system.actorOf(Props[ExampleServiceActor], name = "example-service")
IO(Http) ! Http.Bind(handler, interface = "0.0.0.0", port = 8080)
}
class ExampleServiceActor extends Actor with ExampleService {
def actorRefFactory = context
def receive = runRoute(route)
}
trait ExampleService extends HttpService {
import MediaTypes._
val route =
path("") {
get {
respondWithMediaType(`text/html`) {
complete {
<html>
<body>
<h1>Hello World!</h1>
<p>Database host: <pre>{sys.env("DB_PORT_5432_TCP_ADDR")}:{sys.env("DB_PORT_5432_TCP_PORT")}</pre></p>
</body>
</html>
}
}
}
}
} | tmonney/autoecole | api/src/main/scala/ch/autoecole/api/Boot.scala | Scala | mit | 959 |
/***********************************************************************
* Copyright (c) 2013-2020 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.curve
import org.junit.runner.RunWith
import org.locationtech.geomesa.curve.NormalizedDimension.{NormalizedLat, NormalizedLon}
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
@RunWith(classOf[JUnitRunner])
class NormalizedDimensionTest extends Specification {
val precision = 31
val NormLat = NormalizedLat(precision)
val NormLon = NormalizedLon(precision)
val maxBin = (math.pow(2, precision) - 1).toInt // note: at 31 bits this is Int.MaxValue
"NormalizedDimension" should {
"Round-trip normalize minimum" >> {
NormLat.normalize(NormLat.denormalize(0)) mustEqual 0
NormLon.normalize(NormLon.denormalize(0)) mustEqual 0
}
"Round-trip normalize maximum" >> {
NormLat.normalize(NormLat.denormalize(maxBin)) mustEqual maxBin
NormLon.normalize(NormLon.denormalize(maxBin)) mustEqual maxBin
}
"Normalize mininimum" >> {
NormLat.normalize(NormLat.min) mustEqual 0
NormLon.normalize(NormLon.min) mustEqual 0
}
"Normalize maximum" >> {
NormLat.normalize(NormLat.max) mustEqual maxBin
NormLon.normalize(NormLon.max) mustEqual maxBin
}
"Denormalize [0,max - 1] to bin middle" >> {
// for any index/bin denormalize will return value in middle of range of coordinates
// that could result in index/bin from normalize call
val latExtent = NormLat.max - NormLat.min
val lonExtent = NormLon.max - NormLon.min
val latWidth = latExtent / (maxBin.toLong + 1)
val lonWidth = lonExtent / (maxBin.toLong + 1)
NormLat.denormalize(0) mustEqual NormLat.min + latWidth / 2d
NormLat.denormalize(maxBin) mustEqual NormLat.max - latWidth / 2d
NormLon.denormalize(0) mustEqual NormLon.min + lonWidth / 2d
NormLon.denormalize(maxBin) mustEqual NormLon.max - lonWidth / 2d
}
}
}
| aheyne/geomesa | geomesa-z3/src/test/scala/org/locationtech/geomesa/curve/NormalizedDimensionTest.scala | Scala | apache-2.0 | 2,346 |
package org.catapult.sa.fulgurite.spark
import org.apache.hadoop.fs.Path
import org.apache.hadoop.io.{BytesWritable, LongWritable}
import org.apache.hadoop.mapreduce.{InputSplit, JobContext, RecordReader, TaskAttemptContext}
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat
import org.apache.spark.Logging
import org.apache.spark.deploy.SparkHadoopUtil
object GeoTiffBinaryInputFormat {
val RECORD_LENGTH_PROPERTY = "org.catapult.sa.spark.GeoTiffBinaryInputFormat.recordLength"
val RECORD_START_OFFSET_PROPERTY = "org.catapult.sa.spark.GeoTiffBinaryInputFormat.startOffset"
val RECORD_END_OFFSET_PROPERTY = "org.catapult.sa.spark.GeoTiffBinaryInputFormat.endOffset"
def getRecordLength(context: JobContext): Int = {
SparkHadoopUtil.get.getConfigurationFromJobContext(context).get(RECORD_LENGTH_PROPERTY).toInt
}
def getStartOffset(context: JobContext) : Long = {
SparkHadoopUtil.get.getConfigurationFromJobContext(context).get(RECORD_START_OFFSET_PROPERTY).toLong
}
def getEndOffset(context: JobContext) : Long = {
SparkHadoopUtil.get.getConfigurationFromJobContext(context).get(RECORD_END_OFFSET_PROPERTY).toLong
}
}
class GeoTiffBinaryInputFormat extends FileInputFormat[LongWritable, BytesWritable] with Logging {
private var recordLength = -1
/**
* Override of isSplitable to ensure initial computation of the record length
*/
override def isSplitable(context: JobContext, filename: Path): Boolean = {
if (recordLength == -1) {
recordLength = GeoTiffBinaryInputFormat.getRecordLength(context)
}
if (recordLength <= 0) {
logDebug("record length is less than 0, file cannot be split")
false
} else {
true
}
}
/**
* This input format overrides computeSplitSize() to make sure that each split
* only contains full records. Each InputSplit passed to FixedLengthBinaryRecordReader
* will start at the first byte of a record, and the last byte will the last byte of a record.
*/
override def computeSplitSize(blockSize: Long, minSize: Long, maxSize: Long): Long = {
val defaultSize = super.computeSplitSize(blockSize, minSize, maxSize)
// If the default size is less than the length of a record, make it equal to it
// Otherwise, make sure the split size is as close to possible as the default size,
// but still contains a complete set of records, with the first record
// starting at the first byte in the split and the last record ending with the last byte
if (defaultSize < recordLength) {
recordLength.toLong
} else {
(Math.floor(defaultSize / recordLength) * recordLength).toLong
}
}
/**
* Create a FixedLengthBinaryRecordReader
*/
override def createRecordReader(split: InputSplit, context: TaskAttemptContext)
: RecordReader[LongWritable, BytesWritable] = {
new GeoTiffBinaryRecordReader
}
}
| SatelliteApplicationsCatapult/fulgurite | fulgurite-core/src/main/scala/org/catapult/sa/fulgurite/spark/GeoTiffBinaryInputFormat.scala | Scala | lgpl-3.0 | 2,973 |
package org.jetbrains.plugins.scala.worksheet.ammonite.runconfiguration
import javax.swing.Icon
import com.intellij.execution.configurations.{ConfigurationFactory, ConfigurationType}
import com.intellij.openapi.project.DumbAware
import org.jetbrains.plugins.scala.icons.Icons
import org.jetbrains.plugins.scala.worksheet.WorksheetBundle
class AmmoniteRunConfigurationType extends ConfigurationType with DumbAware {
private val factory = new AmmoniteRunConfigurationFactory(this)
override def getId: String = "ScalaAmmoniteRunConfigurationType"
override def getDisplayName: String = WorksheetBundle.message("ammonite.config.display.name")
override def getConfigurationTypeDescription: String = WorksheetBundle.message("ammonite.config.run.ammonite.script")
override def getConfigurationFactories: Array[ConfigurationFactory] = Array[ConfigurationFactory](factory)
override def getIcon: Icon = Icons.SCALA_CONSOLE
}
| JetBrains/intellij-scala | scala/worksheet/src/org/jetbrains/plugins/scala/worksheet/ammonite/runconfiguration/AmmoniteRunConfigurationType.scala | Scala | apache-2.0 | 935 |
package hlt
import java.io.{FileWriter, IOException}
object DebugLog {
private var writer: FileWriter = _
def addLog(message: String): Unit =
try {
writer.write(message)
writer.write('\\n')
writer.flush()
} catch {
case e: IOException =>
e.printStackTrace()
}
private[hlt] def initialize(f: FileWriter): Unit = writer = f
}
| lanyudhy/Halite-II | airesources/Scala/hlt/DebugLog.scala | Scala | mit | 377 |
/*
* Copyright 2010 Twitter, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License. You may obtain
* a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.twitter.logging
import java.util.concurrent.ConcurrentHashMap
import java.util.{logging => javalog}
import scala.annotation.{tailrec, varargs}
import scala.collection.JavaConverters._
import scala.collection.Map
// replace java's ridiculous log levels with the standard ones.
sealed abstract class Level(val name: String, val value: Int) extends javalog.Level(name, value) {
// for java compat
def get(): Level = this
}
object Level {
case object OFF extends Level("OFF", Int.MaxValue)
case object FATAL extends Level("FATAL", 1000)
case object CRITICAL extends Level("CRITICAL", 970)
case object ERROR extends Level("ERROR", 930)
case object WARNING extends Level("WARNING", 900)
case object INFO extends Level("INFO", 800)
case object DEBUG extends Level("DEBUG", 500)
case object TRACE extends Level("TRACE", 400)
case object ALL extends Level("ALL", Int.MinValue)
private[logging] val AllLevels: Seq[Level] =
Seq(OFF, FATAL, CRITICAL, ERROR, WARNING, INFO, DEBUG, TRACE, ALL)
/**
* Associate [[java.util.logging.Level]] and `Level` by their integer
* values. If there is no match, we return `None`.
*/
def fromJava(level: javalog.Level): Option[Level] =
AllLevels.find(_.value == level.intValue)
/**
* Get a `Level` by its name, or `None` if there is no match.
*/
def parse(name: String): Option[Level] = AllLevels.find(_.name == name)
}
/**
* Typically mixed into `Exceptions` to indicate what [[Level]]
* they should be logged at.
*
* @see Finagle's `com.twitter.finagle.Failure`.
*/
trait HasLogLevel {
def logLevel: Level
}
object HasLogLevel {
/**
* Finds the first [[HasLogLevel]] for the given `Throwable` including
* its chain of causes and returns its `logLevel`.
*
* @note this finds the first [[HasLogLevel]], and should be there
* be multiple in the chain of causes, it '''will not use'''
* the most severe.
*
* @return `None` if neither `t` nor any of its causes are a
* [[HasLogLevel]]. Otherwise, returns `Some` of the first
* one found.
*/
@tailrec
def unapply(t: Throwable): Option[Level] = t match {
case null => None
case hll: HasLogLevel => Some(hll.logLevel)
case _ => unapply(t.getCause)
}
}
class LoggingException(reason: String) extends Exception(reason)
/**
* Scala wrapper for logging.
*/
class Logger protected(val name: String, private val wrapped: javalog.Logger) {
// wrapped methods:
def addHandler(handler: javalog.Handler) = wrapped.addHandler(handler)
def getFilter() = wrapped.getFilter()
def getHandlers() = wrapped.getHandlers()
def getLevel() = wrapped.getLevel()
def getParent() = wrapped.getParent()
def getUseParentHandlers() = wrapped.getUseParentHandlers()
def isLoggable(level: javalog.Level) = wrapped.isLoggable(level)
def log(record: javalog.LogRecord) = wrapped.log(record)
def removeHandler(handler: javalog.Handler) = wrapped.removeHandler(handler)
def setFilter(filter: javalog.Filter) = wrapped.setFilter(filter)
def setLevel(level: javalog.Level) = wrapped.setLevel(level)
def setUseParentHandlers(use: Boolean) = wrapped.setUseParentHandlers(use)
override def toString = {
"<%s name='%s' level=%s handlers=%s use_parent=%s>".format(getClass.getName, name, getLevel(),
getHandlers().toList.mkString("[", ", ", "]"), if (getUseParentHandlers()) "true" else "false")
}
/**
* Log a message, with sprintf formatting, at the desired level.
*/
@varargs
final def log(level: Level, message: String, items: Any*): Unit =
log(level, null: Throwable, message, items: _*)
/**
* Log a message, with sprintf formatting, at the desired level, and
* attach an exception and stack trace. The message is lazily formatted if
* formatting is required.
*/
@varargs
final def log(level: Level, thrown: Throwable, message: String, items: Any*) {
val myLevel = getLevel
if ((myLevel eq null) || (level.intValue >= myLevel.intValue)) {
val record =
if (items.size > 0) new LazyLogRecordUnformatted(level, message, items: _*)
else new LogRecord(level, message)
record.setLoggerName(wrapped.getName)
if (thrown ne null) {
record.setThrown(thrown)
}
wrapped.log(record)
}
}
final def apply(level: Level, message: String, items: Any*) = log(level, message, items: _*)
final def apply(level: Level, thrown: Throwable, message: String, items: Any*) = log(level, thrown, message, items)
// convenience methods:
@varargs
def fatal(msg: String, items: Any*) = log(Level.FATAL, msg, items: _*)
@varargs
def fatal(thrown: Throwable, msg: String, items: Any*) = log(Level.FATAL, thrown, msg, items: _*)
@varargs
def critical(msg: String, items: Any*) = log(Level.CRITICAL, msg, items: _*)
@varargs
def critical(thrown: Throwable, msg: String, items: Any*) = log(Level.CRITICAL, thrown, msg, items: _*)
@varargs
def error(msg: String, items: Any*) = log(Level.ERROR, msg, items: _*)
@varargs
def error(thrown: Throwable, msg: String, items: Any*) = log(Level.ERROR, thrown, msg, items: _*)
@varargs
def warning(msg: String, items: Any*) = log(Level.WARNING, msg, items: _*)
@varargs
def warning(thrown: Throwable, msg: String, items: Any*) = log(Level.WARNING, thrown, msg, items: _*)
@varargs
def info(msg: String, items: Any*) = log(Level.INFO, msg, items: _*)
@varargs
def info(thrown: Throwable, msg: String, items: Any*) = log(Level.INFO, thrown, msg, items: _*)
@varargs
def debug(msg: String, items: Any*) = log(Level.DEBUG, msg, items: _*)
@varargs
def debug(thrown: Throwable, msg: String, items: Any*) = log(Level.DEBUG, thrown, msg, items: _*)
@varargs
def trace(msg: String, items: Any*) = log(Level.TRACE, msg, items: _*)
@varargs
def trace(thrown: Throwable, msg: String, items: Any*) = log(Level.TRACE, thrown, msg, items: _*)
def debugLazy(msg: => AnyRef): Unit = logLazy(Level.DEBUG, null, msg)
def traceLazy(msg: => AnyRef): Unit = logLazy(Level.TRACE, null, msg)
/**
* Log a message, with lazy (call-by-name) computation of the message,
* at the desired level.
*/
def logLazy(level: Level, message: => AnyRef): Unit = logLazy(level, null: Throwable, message)
/**
* Log a message, with lazy (call-by-name) computation of the message,
* and attach an exception and stack trace.
*/
def logLazy(level: Level, thrown: Throwable, message: => AnyRef): Unit = {
val myLevel = getLevel
if ((myLevel eq null) || (level.intValue >= myLevel.intValue)) {
val record = new LazyLogRecord(level, message)
record.setLoggerName(wrapped.getName)
if (thrown ne null) {
record.setThrown(thrown)
}
wrapped.log(record)
}
}
// convenience methods:
def ifFatal(message: => AnyRef) = logLazy(Level.FATAL, message)
def ifFatal(thrown: Throwable, message: => AnyRef) = logLazy(Level.FATAL, thrown, message)
def ifCritical(message: => AnyRef) = logLazy(Level.CRITICAL, message)
def ifCritical(thrown: Throwable, message: => AnyRef) = logLazy(Level.CRITICAL, thrown, message)
def ifError(message: => AnyRef) = logLazy(Level.ERROR, message)
def ifError(thrown: Throwable, message: => AnyRef) = logLazy(Level.ERROR, thrown, message)
def ifWarning(message: => AnyRef) = logLazy(Level.WARNING, message)
def ifWarning(thrown: Throwable, message: => AnyRef) = logLazy(Level.WARNING, thrown, message)
def ifInfo(message: => AnyRef) = logLazy(Level.INFO, message)
def ifInfo(thrown: Throwable, message: => AnyRef) = logLazy(Level.INFO, thrown, message)
def ifDebug(message: => AnyRef) = logLazy(Level.DEBUG, message)
def ifDebug(thrown: Throwable, message: => AnyRef) = logLazy(Level.DEBUG, thrown, message)
def ifTrace(message: => AnyRef) = logLazy(Level.TRACE, message)
def ifTrace(thrown: Throwable, message: => AnyRef) = logLazy(Level.TRACE, thrown, message)
/**
* Remove all existing log handlers.
*/
def clearHandlers() = {
// some custom Logger implementations may return null from getHandlers
val handlers = getHandlers()
if (handlers ne null) {
for (handler <- handlers) {
try {
handler.close()
} catch { case _: Throwable => () }
removeHandler(handler)
}
}
}
}
object NullLogger extends Logger(
"null",
{
val jLog = javalog.Logger.getLogger("null")
jLog.setLevel(Level.OFF)
jLog
}
)
object Logger extends Iterable[Logger] {
private[this] val levelNamesMap: Map[String, Level] =
Level.AllLevels.map { level =>
level.name -> level
}.toMap
private[this] val levelsMap: Map[Int, Level] =
Level.AllLevels.map { level =>
level.value -> level
}.toMap
// A cache of scala Logger objects by name.
// Using a low concurrencyLevel (2), with the assumption that we aren't ever creating too
// many loggers at the same time.
private[this] val loggersCache = new ConcurrentHashMap[String, Logger](128, 0.75f, 2)
// A cache of LoggerFactory functions passed into Logger.configure.
@volatile private[this] var loggerFactoryCache = List[() => Logger]()
private[logging] val root: Logger = get("")
// ----- convenience methods:
/** OFF is used to turn off logging entirely. */
def OFF = Level.OFF
/** Describes an event which will cause the application to exit immediately, in failure. */
def FATAL = Level.FATAL
/** Describes an event which will cause the application to fail to work correctly, but
* keep attempt to continue. The application may be unusable.
*/
def CRITICAL = Level.CRITICAL
/** Describes a user-visible error that may be transient or not affect other users. */
def ERROR = Level.ERROR
/** Describes a problem which is probably not user-visible but is notable and/or may be
* an early indication of a future error.
*/
def WARNING = Level.WARNING
/** Describes information about the normal, functioning state of the application. */
def INFO = Level.INFO
/** Describes information useful for general debugging, but probably not normal,
* day-to-day use.
*/
def DEBUG = Level.DEBUG
/** Describes information useful for intense debugging. */
def TRACE = Level.TRACE
/** ALL is used to log everything. */
def ALL = Level.ALL
/**
* Return a map of log level values to the corresponding Level objects.
*/
def levels: Map[Int, Level] = levelsMap
/**
* Return a map of log level names to the corresponding Level objects.
*/
def levelNames: Map[String, Level] = levelNamesMap
/**
* Reset logging to an initial state, where all logging is set at
* INFO level and goes to the console (stderr). Any existing log
* handlers are removed.
*/
def reset() = {
clearHandlers()
loggersCache.clear()
root.addHandler(new ConsoleHandler(new Formatter(), None))
}
/**
* Remove all existing log handlers from all existing loggers.
*/
def clearHandlers() = {
foreach { logger =>
logger.clearHandlers()
logger.setLevel(null)
}
}
/**
* Execute a block with a given set of handlers, reverting back to the original
* handlers upon completion.
*/
def withLoggers(loggerFactories: List[() => Logger])(f: => Unit): Unit =
withLazyLoggers(loggerFactories.map(_()))(f)
/**
* Execute a block with a given set of handlers, reverting back to the original
* handlers upon completion.
*/
def withLazyLoggers(loggers: => List[Logger])(f: => Unit): Unit = {
// Hold onto a local copy of loggerFactoryCache in case Logger.configure is called within f.
val localLoggerFactoryCache = loggerFactoryCache
clearHandlers()
loggers
f
reset()
loggerFactoryCache = localLoggerFactoryCache
loggerFactoryCache.foreach { _() }
}
/**
* Return a logger for the given package name. If one doesn't already
* exist, a new logger will be created and returned.
*/
def get(name: String): Logger = {
loggersCache.get(name) match {
case logger: Logger =>
logger
case null =>
val logger = new Logger(name, javalog.Logger.getLogger(name))
val oldLogger = loggersCache.putIfAbsent(name, logger)
if (oldLogger != null) {
oldLogger
} else {
logger
}
}
}
/** An alias for `get(name)` */
def apply(name: String) = get(name)
private def get(depth: Int): Logger = getForClassName(new Throwable().getStackTrace()(depth).getClassName)
/**
* Return a logger for the class name of the class/object that called
* this method. Normally you would use this in a "private val"
* declaration on the class/object. The class name is determined
* by sniffing around on the stack.
*/
def get(): Logger = get(2)
/** An alias for `get()` */
def apply() = get(2)
private def getForClassName(className: String) = {
if (className.endsWith("$")) {
get(className.substring(0, className.length - 1))
} else {
get(className)
}
}
/**
* Return a logger for the package of the class given.
*/
def get(cls: Class[_]): Logger = getForClassName(cls.getName)
/** An alias for `get(class)` */
def apply(cls: Class[_]) = get(cls)
/**
* Iterate the Logger objects that have been created.
*/
def iterator: Iterator[Logger] = loggersCache.values.iterator.asScala
/**
* Reset all the loggers and register new loggers
* @note Only one logger is permitted per namespace
*/
def configure(loggerFactories: List[() => Logger]) {
loggerFactoryCache = loggerFactories
clearHandlers()
loggerFactories.foreach { _() }
}
}
| BuoyantIO/twitter-util | util-logging/src/main/scala/com/twitter/logging/Logger.scala | Scala | apache-2.0 | 14,299 |
package ml.wolfe.nlp
import scala.collection.mutable
/**
* Trait describing the types in the relationship
* @author Ingolf Becker
*/
trait ObjectGraphRelation {
type Parent
type Child
}
/**
* An object graph keeps track of all the relation
* @author Sebastian Riedel
* Ingolf Becker
*
*/
trait ObjectGraph[types <: ObjectGraphRelation] {
/**
* Links one child to one parent with the specified relation.
* @param parent The parent object
* @param child The Iterable of child object
* @return The Child
*/
def link1to1(parent: types#Parent, child: types#Child): types#Child
/**
* Links all children to the parent with the specified relation.
* @param parent The parent object
* @param children The Iterable of child objects
* @return The children
*/
def link1toN[I <: Iterable[types#Child]](parent: types#Parent, children: I): I
/**
* Gets the parent of this child under this relation
* @param child The child to find the parent of
* @return The parent
*/
def receive(child: types#Child): types#Parent
}
/**
* A simple implementation of an ObjectGraph based on a mutable HashMap.
* @tparam types The instance of ObjectGraphRelation describing the types of this ObjectGraph.
*/
class SimpleObjectGraph[types <: ObjectGraphRelation] extends ObjectGraph[types] {
private val map = new mutable.HashMap[types#Child, types#Parent]()
def link1to1(parent: types#Parent, child: types#Child): types#Child = {
map(child) = parent
child
}
def link1toN[I <: Iterable[types#Child]](parent: types#Parent, children: I): I = {
children.map(map(_) = parent)
children
}
def receive(child: types#Child): types#Parent = map(child)
}
| wolfe-pack/wolfe | wolfe-nlp/src/main/scala/ml/wolfe/nlp/ObjectGraph.scala | Scala | apache-2.0 | 1,725 |
package is.hail.utils.richUtils
import java.io.{OutputStream, OutputStreamWriter}
import is.hail.expr.ir.ExecuteContext
import is.hail.io.FileWriteMetadata
import is.hail.rvd.RVDContext
import is.hail.sparkextras._
import is.hail.utils._
import is.hail.io.compress.{BGzipCodec, ComposableBGzipCodec, ComposableBGzipOutputStream}
import is.hail.io.fs.FS
import org.apache.hadoop
import org.apache.hadoop.io.compress.CompressionCodecFactory
import org.apache.spark.{NarrowDependency, Partition, Partitioner, TaskContext}
import org.apache.spark.rdd.RDD
import scala.reflect.ClassTag
import scala.collection.mutable
case class SubsetRDDPartition(index: Int, parentPartition: Partition) extends Partition
case class SupersetRDDPartition(index: Int, maybeParentPartition: Option[Partition]) extends Partition
class RichRDD[T](val r: RDD[T]) extends AnyVal {
def reorderPartitions(oldIndices: Array[Int])(implicit tct: ClassTag[T]): RDD[T] =
new ReorderedPartitionsRDD[T](r, oldIndices)
def forall(p: T => Boolean)(implicit tct: ClassTag[T]): Boolean = !exists(x => !p(x))
def exists(p: T => Boolean)(implicit tct: ClassTag[T]): Boolean = r.mapPartitions { it =>
Iterator(it.exists(p))
}.fold(false)(_ || _)
def writeTable(ctx: ExecuteContext, filename: String, header: Option[String] = None, exportType: String = ExportType.CONCATENATED) {
val hConf = r.sparkContext.hadoopConfiguration
val codecFactory = new CompressionCodecFactory(hConf)
val codec = {
val codec = codecFactory.getCodec(new hadoop.fs.Path(filename))
if (codec != null && codec.isInstanceOf[BGzipCodec] && exportType == ExportType.PARALLEL_COMPOSABLE)
new ComposableBGzipCodec
else
codec
}
val fs = ctx.fs
fs.delete(filename, recursive = true) // overwriting by default
val parallelOutputPath =
if (exportType == ExportType.CONCATENATED)
ctx.createTmpPath("write-table-concatenated")
else
filename
val rWithHeader: RDD[_] = header.map { h =>
if (r.getNumPartitions == 0 && exportType != ExportType.PARALLEL_SEPARATE_HEADER)
r.sparkContext.parallelize(List(h), numSlices = 1)
else {
exportType match {
case ExportType.CONCATENATED =>
r.mapPartitionsWithIndex { case (i, it) =>
if (i == 0)
Iterator(h) ++ it
else
it
}
case ExportType.PARALLEL_SEPARATE_HEADER =>
r
case ExportType.PARALLEL_COMPOSABLE =>
r
case ExportType.PARALLEL_HEADER_IN_SHARD =>
r.mapPartitions { it => Iterator(h) ++ it }
case _ => fatal(s"Unknown export type: $exportType")
}
}
}.getOrElse(r)
Option(codec) match {
case Some(x) => rWithHeader.saveAsTextFile(parallelOutputPath, x.getClass)
case None => rWithHeader.saveAsTextFile(parallelOutputPath)
}
if (exportType == ExportType.PARALLEL_SEPARATE_HEADER) {
val headerExt = fs.getCodecExtension(filename)
using(new OutputStreamWriter(fs.create(parallelOutputPath + "/header" + headerExt))) { out =>
header.foreach { h =>
out.write(h)
out.write('\\n')
}
}
}
if (exportType == ExportType.PARALLEL_COMPOSABLE) {
val ext = fs.getCodecExtension(filename)
val headerPath = parallelOutputPath + "/header" + ext
val headerOs = if (ext == ".bgz") {
val os = fs.createNoCompression(headerPath)
new ComposableBGzipOutputStream(os)
} else {
fs.create(headerPath)
}
using(new OutputStreamWriter(headerOs)) { out =>
header.foreach { h =>
out.write(h)
out.write('\\n')
}
}
// this filename should sort after every partition
using(new OutputStreamWriter(fs.create(parallelOutputPath + "/part-composable-end" + ext))) { out =>
// do nothing, for bgzip, this will write the empty block
}
}
if (!fs.exists(parallelOutputPath + "/_SUCCESS"))
fatal("write failed: no success indicator found")
if (exportType == ExportType.CONCATENATED) {
fs.copyMerge(parallelOutputPath, filename, rWithHeader.getNumPartitions, header = false)
}
}
def collectOrdered()(implicit tct: ClassTag[T]): Array[T] =
r.zipWithIndex().collect().sortBy(_._2).map(_._1)
def find(f: T => Boolean): Option[T] = r.filter(f).take(1) match {
case Array(elem) => Some(elem)
case _ => None
}
def collectAsSet(): collection.Set[T] = {
r.aggregate(mutable.Set.empty[T])(
{ case (s, elem) => s += elem },
{ case (s1, s2) => s1 ++ s2 }
)
}
def subsetPartitions(keep: IndexedSeq[Int], newPartitioner: Option[Partitioner] = None)(implicit ct: ClassTag[T]): RDD[T] = {
require(keep.length <= r.partitions.length,
s"tried to subset to more partitions than exist ${keep.toSeq} ${r.partitions.toSeq}")
require(keep.isIncreasing && (keep.isEmpty || (keep.head >= 0 && keep.last < r.partitions.length)),
"values not sorted or not in range [0, number of partitions)")
val parentPartitions = r.partitions
new RDD[T](r.sparkContext, FastSeq(new NarrowDependency[T](r) {
def getParents(partitionId: Int): Seq[Int] = FastSeq(keep(partitionId))
})) {
def getPartitions: Array[Partition] = keep.indices.map { i =>
SubsetRDDPartition(i, parentPartitions(keep(i)))
}.toArray
def compute(split: Partition, context: TaskContext): Iterator[T] =
r.compute(split.asInstanceOf[SubsetRDDPartition].parentPartition, context)
@transient override val partitioner: Option[Partitioner] = newPartitioner
}
}
def supersetPartitions(
oldToNewPI: IndexedSeq[Int],
newNPartitions: Int,
newPIPartition: Int => Iterator[T],
newPartitioner: Option[Partitioner] = None)(implicit ct: ClassTag[T]): RDD[T] = {
require(oldToNewPI.length == r.partitions.length)
require(oldToNewPI.forall(pi => pi >= 0 && pi < newNPartitions))
require(oldToNewPI.areDistinct())
val parentPartitions = r.partitions
val newToOldPI = oldToNewPI.zipWithIndex.toMap
new RDD[T](r.sparkContext, FastSeq(new NarrowDependency[T](r) {
def getParents(partitionId: Int): Seq[Int] = newToOldPI.get(partitionId) match {
case Some(oldPI) => Array(oldPI)
case None => Array.empty[Int]
}
})) {
def getPartitions: Array[Partition] = Array.tabulate(newNPartitions) { i =>
SupersetRDDPartition(i, newToOldPI.get(i).map(parentPartitions))
}
def compute(split: Partition, context: TaskContext): Iterator[T] = {
split.asInstanceOf[SupersetRDDPartition].maybeParentPartition match {
case Some(part) => r.compute(part, context)
case None => newPIPartition(split.index)
}
}
@transient override val partitioner: Option[Partitioner] = newPartitioner
}
}
def countPerPartition()(implicit ct: ClassTag[T]): Array[Long] = {
val sc = r.sparkContext
sc.runJob(r, getIteratorSize _)
}
def writePartitions(
ctx: ExecuteContext,
path: String,
stageLocally: Boolean,
write: (Iterator[T], OutputStream) => (Long, Long)
)(implicit tct: ClassTag[T]
): (Array[FileWriteMetadata]) =
ContextRDD.weaken(r).writePartitions(ctx,
path,
null,
stageLocally,
(_) => null,
(_, it, os, _) => write(it, os))
}
| danking/hail | hail/src/main/scala/is/hail/utils/richUtils/RichRDD.scala | Scala | mit | 7,474 |
/**
* @define BaseComment correctly got $BaseVar comment
*/
trait TV
/**
* $BaseComment
* @define BaseVar derived
*/
class /*s*/CV/*e*/ extends TV
| sschaef/scala-ide | org.scala-ide.sdt.core.tests/test-workspace/pc_doc/src/varz.scala | Scala | bsd-3-clause | 153 |
package com.fxlae.uuidstringifier.impl
import java.nio.ByteBuffer
import java.util.Base64.{Decoder, Encoder}
import java.util.{Base64, UUID}
import com.fxlae.uuidstringifier.UUIDStringifier
import scala.util.Try
/**
* Implementation of the Base64 stringifier.
*
* @param enc The encoder to use.
* @param dec The decoder to use.
*/
class Base64Stringifier private (enc: Encoder, dec: Decoder)
extends UUIDStringifier {
/**
* Transforms an UUID string to the corresponding Base64 representation.
*
* @param uuid The UUID to transform.
* @return The resulting string.
*/
override def UUIDToString(uuid: UUID): String = {
val buffer = ByteBuffer
.allocate(16)
.putLong(uuid.getMostSignificantBits)
.putLong(uuid.getLeastSignificantBits)
enc.encodeToString(buffer.array)
}
/**
* Transforms a Base64 string to the corresponding UUID.
*
* @param string The input string.
* @return The result of the operation, yields either
* Success(uuid) or Failure(exception).
*/
override def UUIDFromString(string: String): Try[UUID] =
Try(fromString(string))
/**
* Constructs UUID based on its string representation.
*
* @param string The input string.
* @return The resulting UUID.
*/
private def fromString(string: String): UUID = {
val bytes = dec.decode(string) // IllegalArgumentException if str is not in valid Base64 scheme
require(bytes.size == 16,
"Input string does not contain 16 bytes of data.")
val buffer = ByteBuffer.wrap(bytes)
new UUID(buffer.getLong, buffer.getLong)
}
}
/**
* The companion object of the [[Base64Stringifier]] class.
*/
object Base64Stringifier {
/**
* Returns a [[Base64Stringifier]] instance.
*
* @param withPadding Indicates whether base 64 padding should be omitted or not.
* @return [[Base64Stringifier]] instance.
*/
def base64(withPadding: Boolean) = {
val encoder =
if (withPadding) Base64.getEncoder
else Base64.getEncoder.withoutPadding
new Base64Stringifier(encoder, Base64.getDecoder)
}
/**
* Returns a [[Base64Stringifier]] instance that produces URL friendly strings.
*
* @param withPadding Indicates whether base 64 padding should be omitted or not.
* @return URL friendly [[Base64Stringifier]] instance.
*/
def base64Url(withPadding: Boolean) = {
val encoder =
if (withPadding) Base64.getUrlEncoder
else Base64.getUrlEncoder.withoutPadding
new Base64Stringifier(encoder, Base64.getUrlDecoder)
}
}
| fxlae/scala-uuid-stringifier | src/main/scala/com/fxlae/uuidstringifier/impl/Base64Stringifier.scala | Scala | mit | 2,595 |
package gapt.proofs.expansion
import gapt.expr._
import gapt.expr.formula.All
import gapt.expr.formula.Atom
import gapt.expr.formula.Formula
import gapt.expr.formula.Iff
import gapt.expr.formula.hol.HOLAtomConst
import gapt.expr.formula.hol.HOLPosition
import gapt.expr.subst.Substitution
import gapt.logic.Polarity
import gapt.proofs.context.Context
object eliminateDefsET {
object DefinitionFormula {
def unapply( f: Formula ): Option[( List[Var], HOLAtomConst, Formula )] = f match {
case All.Block( vs, Iff( Apps( d: HOLAtomConst, vs_ ), r ) ) if vs == vs_ =>
Some( ( vs, d, r ) )
case _ => None
}
}
private val negReplPos = HOLPosition( 1, 2 )
private val posReplPos = HOLPosition( 2, 1 )
def apply( ep: ExpansionProof, pureFolWithoutEq: Boolean )( implicit ctx: Context ): ExpansionProof =
apply( ep, pureFolWithoutEq, atomicExpansionET.getDefinedAtoms( ep ) )
def apply( ep: ExpansionProof, pureFolWithoutEq: Boolean, definitions: Set[Const] )( implicit ctx: Context ): ExpansionProof =
atomicExpansionET( definitions.foldLeft( ep )( apply( _, _, pureFolWithoutEq ) ), definitions, pureFolWithoutEq )
private def apply( ep: ExpansionProof, definitionConst: Const, pureFolWithoutEq: Boolean )( implicit ctx: Context ): ExpansionProof = {
val definitionFormula @ DefinitionFormula( vs, _, definedFormula ) =
ep.expansionSequent.antecedent.map( _.shallow ).find {
case DefinitionFormula( _, `definitionConst`, _ ) => true
case _ => false
}.getOrElse( return ep )
def mkDefAtom( as: Seq[Expr], pol: Polarity ) =
ETDefinition(
Substitution( vs zip as )( definedFormula ),
ETAtom( definitionConst( as ).asInstanceOf[Atom], pol ) )
val insts0 = for {
ETWeakQuantifierBlock( `definitionFormula`, n, insts ) <- ep.expansionSequent.antecedent
( as, inst ) <- insts
// DO NOT INLINE THIS! (otherwise the value of repls changes?!?!?)
negRepls = inst( negReplPos )
posRepls = inst( posReplPos )
repls = negRepls ++ posRepls
repl <- repls
} yield as -> repl
var insts = Map() ++ insts0.groupBy( _._1 ).view.mapValues( _.map( _._2 ) ).toMap
val rest = ep.expansionSequent.filterNot { et => et.polarity.inAnt && et.shallow == definitionFormula }
val usesites: Set[( Seq[Expr], Polarity )] = rest.elements.flatMap { _.subProofs }.
collect { case ETAtom( Apps( `definitionConst`, args ), pol ) => ( args, pol ) }.toSet
insts = Map() ++ ( for ( as <- usesites.map( _._1 ) ++ insts.keys ) yield as -> insts.getOrElse( as, Vector() ) )
if ( !pureFolWithoutEq ) {
val newRepl = Vector() ++ ( for ( ( _, is ) <- insts; i <- is ) yield generalizeET( i, definedFormula ) )
insts = for ( ( as, _ ) <- insts ) yield as -> Substitution( vs zip as )( newRepl ).toVector
}
insts = for ( ( as, is ) <- insts )
yield as -> ( is ++ ( for {
pol <- Polarity.values
if usesites( as -> pol )
if !is.exists( _.polarity == pol )
} yield mkDefAtom( as, pol ) ) )
def repl( et: ExpansionTree ): ExpansionTree =
atomicExpansionET.mapDefinedAtom( et ) {
case ( sh, Apps( `definitionConst`, as ), pol ) =>
ETMerge( sh, pol, insts( as ).filter( _.polarity == pol ) )
}
val newCuts = ETCut {
for {
( _, is ) <- insts
pos = is.filter( _.polarity.positive )
neg = is.filter( _.polarity.negative )
if pos.nonEmpty && neg.nonEmpty
} yield ETMerge( pos ) -> ETMerge( neg )
}
val newES = ETMerge( newCuts +: rest.map( repl ) )
eliminateMerges( ExpansionProof( newES ) )
}
}
| gapt/gapt | core/src/main/scala/gapt/proofs/expansion/eliminateDefsET.scala | Scala | gpl-3.0 | 3,672 |
package nofs.restfs.query.ast
abstract class ProgramStm {
} | megacoder/restfs | src/main/scala/nofs/restfs/query/ast/ProgramStm.scala | Scala | lgpl-2.1 | 65 |
class SomeClass
object Test {
def main(args: Array[String]): Unit = {
val cls: Predef.Class[SomeClass] = classOf[SomeClass]
println("Value types:")
println(classOf[Unit])
println(classOf[Boolean])
println(classOf[Byte])
println(classOf[Short])
println(classOf[Char])
println(classOf[Int])
println(classOf[Long])
println(classOf[Float])
println(classOf[Double])
println("Class types")
println(classOf[SomeClass])
println(classOf[List[Array[Float]]])
println(classOf[(String, Map[Int, String])])
println("Arrays:")
println(classOf[Array[Unit]])
println(classOf[Array[Int]])
println(classOf[Array[Double]])
println(classOf[Array[List[String]]])
println("Functions:")
println(classOf[(Int, Int) => Unit])
println(classOf[Int => Boolean])
}
}
| shimib/scala | test/files/run/classof.scala | Scala | bsd-3-clause | 838 |
/*
* Copyright 2017 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.computations
import uk.gov.hmrc.ct.box.{CtBoxIdentifier, CtInteger, Input}
case class CP113(value: Int) extends CtBoxIdentifier(name = "Net profit on sale of fixed assets") with CtInteger with Input
| liquidarmour/ct-calculations | src/main/scala/uk/gov/hmrc/ct/computations/CP113.scala | Scala | apache-2.0 | 829 |
/*
* ecalogic: a tool for performing energy consumption analysis.
*
* Copyright (c) 2013, J. Neutelings, D. Peelen, M. Schoolderman
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice, this
* list of conditions and the following disclaimer in the documentation and/or
* other materials provided with the distribution.
*
* Neither the name of the Radboud University Nijmegen nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package nl.ru.cs.ecalogic
package model
import ast._
import interpreter.BaseInterpreter
import parser.ModelParser
import util._
import scala.collection.mutable
import scala.io.Source
import scala.util.Try
import java.io.File
import java.lang.reflect.Method
import java.net.{URI, URL}
class ECMModel(val node: Component, protected val errorHandler: ErrorHandler = new DefaultErrorHandler()) extends ComponentModel with BaseInterpreter {
import ECAException._
class CState private[ECMModel](val elements: Map[String, ECAValue]) extends ComponentState {
protected def update(newElements: Map[String, ECAValue]) = new CState(newElements)
}
protected case class IState(locals: Map[String, ECAValue], component: Map[String, ECAValue], mutation: Boolean) extends BaseInterpreterState {
def value(name: String) = component.get(name) orElse locals.get(name)
def substitute(name: String, value: ECAValue, stackTrace: StackTrace) = {
if (node.variables.contains(name)) {
if (!mutation) {
errorHandler.fatalError(new ECAException(s"Mutation of state variable '$name' is not allowed in this context.", stackTrace))
}
checkBoundaries(node.variables(name), value, Right(stackTrace))
IState(locals, component.updated(name, value), mutation)
} else
IState(locals.updated(name, value), component, mutation)
}
def enterFunction(name: String, arguments: Map[String, ECAValue])(block: IState => IState) = {
val postFunState = block(IState(arguments, component, mutation))
(IState(locals, postFunState.component, mutation), postFunState.locals.get(name))
}
}
node.variables.values.foreach { v =>
checkBoundaries(v, v.initialValue.getOrElse(v.upper), Left(v.initializer.map(_.value).getOrElse(v).position))
}
node.functions.get("phi").filterNot(_.arity == 0).foreach { f =>
errorHandler.fatalError(new ECAException(s"Phi function should have no parameters.", f))
}
val name = node.name
val initialState = new CState(node.variables.map { case (k, v) => k -> v.initialValue.getOrElse(v.upper) })
private val methodCache = mutable.Map.empty[FunName, (Method, ECAValue => AnyRef)]
private val imports = node.imports.mapValues { i =>
try {
Class.forName(i.qualifiedName)
} catch {
case e: Exception => errorHandler.fatalError(new ECAException(s"Error loading imported class '${i.qualifiedName}': $e", i))
}
}
private def checkBoundaries(variable: CompVarDecl, value: ECAValue, info: Either[Position, StackTrace]) {
if (value < variable.lower || value > variable.upper) {
errorHandler.fatalError(new ECAException(s"Value $value exceeds the specified boundaries: ${variable.lower} <= ${variable.name} <= ${variable.upper}.",
info.left.toOption, None, info.right.getOrElse(Seq.empty)))
}
}
protected override val componentName = Some(name)
protected val functions = node.functions
private def callReflective(call: FunCall, arguments: Seq[ECAValue], stackTrace: StackTraceBuilder): ECAValue = {
val classAlias = call.name.prefix.get
val methodName = call.name.name
val clazz = imports.getOrElse(classAlias, errorHandler.fatalError(new ECAException(s"Undeclared class: '$classAlias'.", stackTrace.result(call))))
val (method, converter) = methodCache.getOrElseUpdate(call.name, {
Try((clazz.getMethod(methodName, Seq.fill(arguments.length)(classOf[ECAValue]):_*), (v: ECAValue) => v)) orElse
Try((clazz.getMethod(methodName, Seq.fill(arguments.length)(classOf[BigInt]):_*) , (v: ECAValue) => v.toBigInt)) orElse
//Try((clazz.getMethod(methodName, Seq.fill(arguments.length)(classOf[Long]):_*) , (v: ECAValue) => Long.box(v.toLong))) orElse
Try((clazz.getMethod(methodName, Seq.fill(arguments.length)(classOf[Int]):_*) , (v: ECAValue) => Int.box(v.toInt))) getOrElse {
errorHandler.fatalError(new ECAException(s"Method '$name' could not be found.", stackTrace.result(call)))
}
})
val newStackTrace = stackTrace.callFunction(new FunName(methodName, Some(s"<external:${clazz.getName}>")), Some(call.position))
val result = try {
method.invoke(null, arguments.map(converter):_*)
} catch {
case e: Exception =>
val trace = newStackTrace.result()
errorHandler.fatalError(new ECAException(s"Exception while calling '$name': $e", trace.headOption.flatMap(_._2), Some(e), trace))
}
result match {
case v: ECAValue => v
case v: BigInt => ECAValue.bigIntToValue(v)
//case v: Long => ECAValue.bigIntToValue(v)
case v: Integer => ECAValue.intToValue(v)
case v =>
errorHandler.fatalError(new ECAException(s"Method '$name' returned an unsupported result of type: '${v.getClass.getSimpleName}'.", newStackTrace.result()))
}
}
override protected def evalExpression(expr: Expression, state: IState, stackTrace: StackTraceBuilder): (IState, ECAValue) = expr match {
case call @ FunCall(qname, args) if qname.isPrefixed =>
val (postArgsState, values) = evalExprList(args, state, stackTrace)
(postArgsState, callReflective(call, values, stackTrace))
case _ => super.evalExpression(expr, state, stackTrace)
}
override def E(f: String) = node.componentFunctions.get(f).map(_.energy).getOrElse(ECAValue.Zero)
override def T(f: String) = node.componentFunctions.get(f).map(_.time).getOrElse(ECAValue.Zero)
override def delta(f: String)(s: CState) = node.componentFunctions.get(f).map { f =>
new CState(evalFunction(f, Seq.fill(f.arity)(ECAValue.Zero), IState(Map.empty, s.elements, true),
newStackTraceBuilder(new FunName("delta", Some("<internal>"))), None)._1.component)
}.getOrElse(s)
override def eval(f: String)(s: CState, a: Seq[ECAValue]) = node.componentFunctions.get(f).map { f =>
val (stateEnv, result) = evalFunction(f, a, IState(Map.empty, s.elements, true),
newStackTraceBuilder(new FunName("eval", Some("<internal>"))), None)
(new CState(stateEnv.component), result)
}.getOrElse((s, ECAValue.Zero))
override def phi(s: CState) = node.functions.get("phi").map { f =>
evalFunction(f, Seq.empty, IState(Map.empty, s.elements, false), newStackTraceBuilder(new FunName("phi", Some("<internal>"))), None)._2
}.getOrElse(super.phi(s))
override def functionArity(f: String) = node.componentFunctions.get(f).map(_.arity)
override def hasFunctionInfo = true
}
object ECMModel {
def fromSource(sourceText: String, sourceURI: Option[URI] = None, errorHandler: Option[ErrorHandler] = None) = {
val eh = errorHandler.getOrElse(new DefaultErrorHandler(sourceText = Some(sourceText), sourceURI = sourceURI))
val parser = new ModelParser(sourceText, eh)
val node = parser.component()
parser.expectEndOfFile()
eh.successOrElse(s"Parsing${sourceURI.fold(" ")(u => s" '$u' ")}'failed.")
sourceURI.foreach { uri =>
val path = uri.getPath
val fileName = path.substring(path.lastIndexOf('/') + 1, path.length)
if (fileName != node.name + ".ecm") {
eh.warning(new ECAException(s"File name does not match component name '${node.name}'.", node))
}
}
eh.reset()
new ECMModel(node, eh)
}
def fromFile(file: File, errorHandler: Option[ErrorHandler] = None): ECMModel = {
val source = Source.fromFile(file).mkString
fromSource(source, Some(file.toURI), errorHandler)
}
def fromURL(url: URL, errorHandler: Option[ErrorHandler] = None): ECMModel = {
val source = Source.fromURL(url).mkString
fromSource(source, Some(url.toURI), errorHandler)
}
}
| squell/ecalogic | src/main/scala/nl/ru/cs/ecalogic/model/ECMModel.scala | Scala | bsd-3-clause | 9,375 |
import play.api.{ ApplicationLoader, Configuration, Environment, Mode }
import play.api.inject.guice.GuiceApplicationBuilder
object PlayUtil {
def context = {
val env = Environment.simple(mode = Mode.Test)
ApplicationLoader.Context(
env,
None,
new play.core.DefaultWebCommands(),
Configuration.load(env),
new play.api.inject.DefaultApplicationLifecycle()
)
}
@inline def stringList(config: Configuration, key: String) =
Option(config.underlying getStringList key)
def configure(initial: GuiceApplicationBuilder): GuiceApplicationBuilder =
initial.load(
new play.api.i18n.I18nModule(),
new play.api.mvc.CookiesModule(),
new play.api.inject.BuiltinModule(),
new play.modules.reactivemongo.ReactiveMongoModule()
)
}
| ReactiveMongo/Play-ReactiveMongo | src/test/play-2.6/PlayUtil.scala | Scala | apache-2.0 | 804 |
/*******************************************************************************
* This file is part of tiscaf.
*
* tiscaf is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Foobar is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with tiscaf. If not, see <http://www.gnu.org/licenses/>.
******************************************************************************/
package tiscaf
package let
import java.io.RandomAccessFile
import scala.concurrent._
import ExecutionContext.Implicits.global
// doesn't catch IO exception as far as they will be delegated to HServer.onError()
protected class FiLet(path : String, bufSize : Int, plainAsDefault : Boolean) extends HSimpleLet {
def act(tk : HTalk) {
val f = new java.io.File(path)
if (f.exists && f.isFile) {
// deal with simplest Range: bytes=123-
val shift = tk.req.header("Range") match {
case None => 0L
case Some(x) =>
val range = x.toLowerCase.replace("bytes", "").replace("=", "")
if (range.endsWith("-")) try {
val tmp = range.substring(0, range.length - 1).trim.toLong
if (tmp >= 0 && tmp < f.length) tmp else -1L
} catch { case _: Exception => -1L }
else -1L
}
shift match {
case x if (x > 0) =>
tk.setStatus(HStatus.PartialContent)
.setHeader("Content-Range", "bytes " + shift + "-" + (f.length - 1) + "/" + f.length)
case x if (x == 0) => // nothing additional
case x if (x < 0) => tk.setStatus(HStatus.RequestRangeNotSatisfiable)
}
if (shift >= 0) {
tk.setHeader("Last-Modified", HResponse.stdDateString(f.lastModified))
.setContentLength(f.length - shift)
val cType = HMime.exts.keysIterator.find(ext => path.toLowerCase.endsWith("." + ext)) match {
case Some(e) => HMime.exts(e)
case None => if (plainAsDefault) "text/plain" else "application/octet-stream"
}
tk.setContentType(cType)
val ar = new Array[Byte](bufSize)
val raf = new RandomAccessFile(f, "r")
if (shift > 0) raf.seek(shift)
@scala.annotation.tailrec
def writeBuf : Unit = {
val wasRead = raf.read(ar)
if (wasRead > 0) {
tk.write(ar, 0, wasRead)
writeBuf
}
}
writeBuf
raf.close
} // if (shift >= 0)
} // if (f.exists && f.isFile)
else new ErrLet(HStatus.NotFound) act (tk)
}
}
| gnieh/tiscaf | core/src/main/scala/tiscaf/let/FiLet.scala | Scala | lgpl-3.0 | 2,971 |
package sri.mobile.examples.uiexplorer.components.android
import sri.core.ElementFactory._
import sri.core._
import sri.mobile.components.android.SwitchAndroid
import sri.mobile.examples.uiexplorer.{UIExample, UIExplorerBlock, UIExplorerPage}
import sri.universal.styles.UniversalStyleSheet
import scala.scalajs.js
import scala.scalajs.js.annotation.ScalaJSDefined
object SwitchAndroidExample extends UIExample {
override val title: String = "SwitchAndroid"
override val description: String = "Standard Android two-state toggle component."
case class State(trueSwitchIsOn: Boolean = true,falseSwitchIsOn: Boolean= false,colorTrueSwitchIsOn:Boolean = true,colorFalseSwitchIsOn:Boolean = false,eventSwitchIsOn: Boolean = false)
@ScalaJSDefined
class Component extends ReactComponent[Unit, State] {
initialState(State())
def render() = {
UIExplorerPage(
UIExplorerBlock("Switches can be set to true or false")(
SwitchAndroid(style = styles.bottom10,onValueChange = (value: Boolean) => setState(state.copy(falseSwitchIsOn = value)),value = state.falseSwitchIsOn)(),
SwitchAndroid(onValueChange = (value: Boolean) => setState(state.copy(trueSwitchIsOn = value)),value = state.trueSwitchIsOn)()
)
)
}
}
val ctor = getTypedConstructor(js.constructorOf[Component], classOf[Component])
val component = () => createElementNoProps(ctor)
object styles extends UniversalStyleSheet {
val bottom10 = style(marginBottom := 10)
}
}
| hamazy/sri | mobile-examples/src/main/scala/sri/mobile/examples/uiexplorer/components/android/SwitchAndroidExample.scala | Scala | apache-2.0 | 1,516 |
package b
class Foo
| tdyas/pants | examples/src/scala/org/pantsbuild/example/strict_deps/B.scala | Scala | apache-2.0 | 21 |
object BasicScalaZ extends App {
import scalaz._
import Scalaz._
println( 1 === 1)
println( "x" === "x")
println( "x" === "y")
println( 1.0 ?|? 2.0 )
println( 1.0 max 2.0 )
"hello".println
} | diegopacheco/scala-playground | scalaz/src/main/scala/BasicScalaZ.scala | Scala | unlicense | 251 |
package lib
import play.api.{ApplicationLoader, Environment, Mode, Play}
trait TestApplication {
private[this] val env = Environment(new java.io.File("."), this.getClass.getClassLoader, Mode.Test)
private[this] val context = ApplicationLoader.createContext(env)
private[this] val app = ApplicationLoader(context).load(context)
Play.start(app)
}
| gheine/apidoc | app/test/lib/TestApplication.scala | Scala | mit | 357 |
package emmy.inference
import breeze.numerics.abs
import emmy.autodiff.{ Evaluable, Expression, Gradient, GradientContext, Node, Parameter, SampleContext, Variable }
import emmy.inference.aevb.VariablePosterior
import scala.collection.mutable
class ModelGradientContext(
posteriors: Map[Node, VariablePosterior],
dependencies: Map[Node, Set[Node]] = Map.empty
)
extends GradientContext {
private val cache = mutable.HashMap[AnyRef, Any]()
override def apply[U[_], V, S](n: Expression[U, V, S]): Evaluable[U[V]] =
n match {
case v: Variable[U, V, S] if posteriors.contains(v) ⇒
val q = posteriors(v).Q.asInstanceOf[Variable[U, V, S]]
cache.getOrElseUpdate(n, apply(q)).asInstanceOf[Evaluable[U[V]]]
case _ ⇒
cache.getOrElseUpdate(n, wrap(n.eval(this), n.toString)).asInstanceOf[Evaluable[U[V]]]
}
private def wrap[U[_], V](eval: Evaluable[U[V]], name: String): Evaluable[U[V]] = new Evaluable[U[V]] {
private var lastContext = -1
private var lastValue: Option[U[V]] = None
override def apply(ec: SampleContext): U[V] = {
if (ec.iteration != lastContext) {
lastValue = Some(eval(ec))
// println(s"Setting ${name} to ${lastValue.get}")
// val asDouble = lastValue.get.asInstanceOf[Double]
// val asStr = asDouble.toString
// if (asStr == "NaN" || asStr == "Infinity" || abs(asDouble) > 10.0) {
// if (asStr == "NaN" || asStr == "Infinity")
// assert(false)
// }
lastContext = ec.iteration
}
lastValue.get
}
}
override def apply[W[_], U[_], V, T, S](
n: Expression[U, V, S],
v: Parameter[W, T]
): Gradient[W, U] = {
val eval = dependencies.get(v).forall {
_.contains(n)
}
if (eval) {
if (posteriors.contains(n)) {
val q = posteriors(n).Q
.asInstanceOf[Variable[U, V, S]]
val g = q.grad(this, v)
// println(s"Replacing ${n} by ${q}")
g
}
else
n.grad(this, v)
}
else {
None
}
/*
val result = n.grad(this, v)
if (!eval && result.isDefined) {
throw new Exception("Expression is not evaluated, but should be")
}
result
*/
}
}
| fvlankvelt/emmy | src/main/scala/emmy/inference/ModelGradientContext.scala | Scala | apache-2.0 | 2,312 |
package controllers
import models.LoginSession
import play.api.i18n.MessagesApi
import play.api.mvc.{MessagesRequest, Request}
class AuthMessagesRequest[A] (
val login: LoginSession,
messagesApi: MessagesApi,
request: Request[A]
) extends MessagesRequest[A](request, messagesApi)
| ruimo/store2 | app/controllers/AuthMessagesRequest.scala | Scala | apache-2.0 | 288 |
package com.ewolff.user_registration.gatling
import io.gatling.core.Predef._
import io.gatling.core.session.Expression
import io.gatling.http.Predef._
import io.gatling.jdbc.Predef._
import scala.concurrent.duration._
import org.springframework.boot.SpringApplication
import com.ewolff.user_registration.RegistrationApplication
class UserRegistration extends Simulation {
val emailFeeder = Iterator.continually(Map("email" -> (scala.math.abs(java.util.UUID.randomUUID.getMostSignificantBits) + "_gatling@dontsend.com")))
val httpProtocol = http
.baseUrl("http://127.0.0.1:8080")
.acceptHeader("text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8")
.acceptEncodingHeader("gzip, deflate")
.acceptLanguageHeader("en,en-us;q=0.5")
.connectionHeader("keep-alive")
.header("Cache-Control", "max-age=0")
val formHeader = Map(
"Content-Type" -> "application/x-www-form-urlencoded")
val scn = scenario("Registration")
.repeat(10) {
(
exec(http("GET index")
.get("/"))
.pause(88 milliseconds)
.exec(http("GET css")
.get("/css/bootstrap.min.css"))
.pause(1)
.exec(http("GET form")
.get("/user"))
.pause(7)
.feed(emailFeeder)
.exec(http("POST user data")
.post("/user")
.headers(formHeader)
.formParam("firstname", "Eberhard")
.formParam("name", "Wolff")
.formParam("email", "${email}"))
.pause(4)
.exec(http("POST delete user")
.post("/userdelete")
.headers(formHeader)
.formParam("email", "${email}")))
}
SpringApplication.run(classOf[RegistrationApplication])
setUp(scn.inject(rampUsers(5) during (10 seconds))).protocols(httpProtocol)
} | ewolff/user-registration-V2 | user-registration-capacitytest-gatling/src/test/scala/com/ewolff/user_registration/gatling/UserRegistration.scala | Scala | apache-2.0 | 1,792 |
package de.choffmeister.secpwd.utils
import java.io.File
import java.io.IOException
import java.io.EOFException
import java.io.InputStream
import java.io.OutputStream
import java.io.ByteArrayOutputStream
import java.security.MessageDigest
import java.security.DigestInputStream
import java.security.DigestOutputStream
import scala.language.implicitConversions
class RichInputStream(val stream: InputStream) {
def preSizedInner(size: Long)(inner: InputStream => Any) {
if (size < 0) throw new IndexOutOfBoundsException()
val wrapper = new PreSizedInnerInputStream(size, stream)
try {
inner(wrapper)
} finally {
wrapper.close()
}
}
/**
* Optimize (for example implement read(Array[Byte], Int, Int) => Int)
*/
class PreSizedInnerInputStream(val size: Long, val inner: InputStream) extends InputStream {
private var position = 0L
override def read(): Int = {
if (position < size) {
val b = inner.read()
if (b >= 0) {
position += 1
b
} else throw new EOFException()
} else -1
}
override def close(): Unit = {
while (position < size) read()
super.close()
}
}
}
class RichOutputStream(val stream: OutputStream) {
def preSizedInner(size: Long)(inner: OutputStream => Any) {
if (size < 0) throw new IndexOutOfBoundsException()
val wrapper = new PreSizedInnerOutputStream(size, stream)
try {
inner(wrapper)
} finally {
wrapper.close()
}
}
def cached(after: ByteArrayOutputStream => Any)(inner: ByteArrayOutputStream => Any) {
val cache = new ByteArrayOutputStream()
inner(cache)
after(cache)
val buf = cache.toByteArray
stream.write(buf)
}
/**
* Optimize (for example implement write(Array[Byte], Int, Int) => Unit)
*/
class PreSizedInnerOutputStream(val size: Long, val inner: OutputStream) extends OutputStream {
private var position = 0L
override def write(b: Int): Unit = {
if (position < size) {
position += 1
inner.write(b)
} else throw new IOException()
}
override def close(): Unit = {
while (position < size) write(0)
super.close()
}
}
}
object RichStream {
implicit def inputStreamToRichInputStream(stream: InputStream) = new RichInputStream(stream)
implicit def outputStreamToRichOutputStream(stream: OutputStream) = new RichOutputStream(stream)
} | choffmeister/secpwd | src/main/scala/de/choffmeister/secpwd/utils/RichStream.scala | Scala | apache-2.0 | 2,432 |
package im.mange.acceptance.driveby.scalatest.browser
import im.mange.acceptance.driveby.scalatest.WebSpecification
import im.mange.driveby.Id
import im.mange.driveby.conditions.ValueEquals
import org.scalatest.Matchers
class ClearSpec extends WebSpecification with Matchers {
def `clear text in a textbox` {
val id = Id("textBox")
given.page(<body><form><input type="text" id={id.id}/></form></body>)
.enter(id, "text")
.clear(id)
.assert(ValueEquals(id, ""))
}
} | alltonp/driveby | src/test/scala/im/mange/acceptance/driveby/scalatest/browser/ClearSpec.scala | Scala | apache-2.0 | 497 |
package Chapter04
object MapsAndTuples {
// topics:
// constructing a map
// accessing map values
// updating map values
// iterating over maps
// sorted maps
// interoperating with java
// tuples
// zipping
// hash table by default, tree map, map in general
// collection of key/value pairs / (k,v) tuples
// creating, querying and traversing maps
// constructing a map
def constructingAMap = {
// Map[String, Int] immutable
val scores = Map("Alice" -> 10, "Bob" -> 3, "Cindy" -> 8)
import scala.collection.mutable
val scoresMutable = mutable.Map("Alice" -> 10, "Bob" -> 3, "Cindy" -> 8)
// new blank
val scoresEmpty = mutable.Map[String, Int]()
// I prefer
val scoresEmpty2 = mutable.Map.empty[String, Int]
// map = collection of pairs, Tuple2
val scoresPairs = List(
("Alice", 10),
("Bob", 3),
("Cindy", 8)
).toMap
}
// accessing map values
def accessingMapValues = {
val scores = Map("Alice" -> 10, "Bob" -> 3, "Cindy" -> 8)
// accessing by apply
val bobsScore = scores("Bob")
// if no key => exception
// def default(key: K): V = throw new NoSuchElementException("key not found: " + key)
// check key presence
val bobsScore2 = if (scores.contains("Bob")) scores("Bob") else 0
// shortcut
val bobsScore3 = scores.getOrElse("Bob", 0)
// more functional: monad Option
val bobsScoreOption = scores.get("Bob") // Some or None
// immutable maps can have 'default'
val scoresWithDefault = scores.withDefaultValue(0)
def tryToPredictValueByAI(str: String) = ???
val scoresWithDefault2 = scores.withDefault(k => tryToPredictValueByAI(k))
}
// updating map values
def updatingMapValues = {
def mutableMaps = {
import scala.collection.mutable
val scores = mutable.Map("Alice" -> 10, "Bob" -> 3, "Cindy" -> 8)
// in mutable map you can update value or add a new one, etc
scores("Bob") = 10
scores("Fred") = 7
// or add/update a few pairs
scores += ("Bob" -> 10, "Fred" -> 7)
// or remove a pair
scores -= "Alice"
}
def immutableMaps = {
val scores = Map("Alice" -> 10, "Bob" -> 3, "Cindy" -> 8)
// obtain a new modified map
val updatedScores = scores + ("Bob" -> 10, "Fred" -> 7)
// you can use a mutable reference to immutable map
// easy way to share data, but may lead to data inconsistency
var scoresMutableRef = scores + ("Bob" -> 10, "Fred" -> 7)
scoresMutableRef += ("Bob" -> 10, "Fred" -> 7)
scoresMutableRef -= "Alice"
}
}
// iterating over maps
def iteratingOverMaps = {
val scores = Map("Alice" -> 10, "Bob" -> 3, "Cindy" -> 8)
// loop over all k/v pairs using pattern matching
for ((k,v) <- scores) println(s"key: ${k}, value: ${v}")
// only keys
for (k <- scores.keySet) println(s"key: ${k}")
// only values
for (v <- scores.values) println(s"value: ${v}")
// reverse a map, naive approach
for ((k,v) <- scores) yield (v,k)
}
// sorted maps
def sortedMaps = {
import scala.collection.mutable
// maps: hash table by default, tree
// hast table unordered
// sorted keys // TreeMap red-black tree
val scores = mutable.SortedMap("Alice" -> 10, "Bob" -> 3, "Cindy" -> 8)
// keys in insertion order
val insordScores = mutable.LinkedHashMap("Alice" -> 10, "Bob" -> 3, "Cindy" -> 8)
}
// interoperating with java
def interoperatingWithJava = {
import scala.collection.mutable
// ? java have a mutable tree map that scala have not (SortedMAp?)
import scala.collection.JavaConverters._
// from java to scala
val scores: mutable.Map[String, Int] = new java.util.TreeMap[String, Int]().asScala
// java.util.Properties = HashTable[Object, Object]
val props: mutable.Map[String, String] = System.getProperties.asScala
// from scala to java
import java.awt.font.TextAttribute._
val attrs = Map(FAMILY -> "Serif", SIZE -> 12) // scala map
val font = new java.awt.Font(attrs.asJava)
}
// tuples
def tuples = {
// pairs is a simplest tuples: Tuple2(x, y)
val tup3 = (1, 3.14, "Fred") // Tuple3[Int, Double, String]
val name = tup3._3
val (i, d, s) = tup3 // pattern matching
println(s"int: ${i}, double: ${d}, string: ${s}")
// useful for compound results
val (upper, lower) = "New York".partition(_.isUpper)
}
// zipping
def zipping = {
// produce pairs for processing together
val symbols = List("<", "-", ">")
val counts = List(2, 10, 2)
val pairs = symbols zip counts
for ((s,n) <- pairs) print(s*n)
}
}
object MapsAndTuples_Exercises {
// map of prices; with discount
def ex1 = {
def gizmosOnSail = {
val gizmos = Map("phone" -> 30, "sail" -> 40, "acesspoint" -> 5)
val withDiscount = for ((k, v) <- gizmos) yield (k, v * 0.9)
}
}
// read words from a file; count words with mutable map
def ex2 = {
import scala.collection.mutable
def wordCount(path: String) = {
val counter = mutable.Map.empty[String, Int].withDefaultValue(0)
def processNextToken(str: String) = counter(str) = counter(str) + 1
val in = new java.util.Scanner(new java.io.File(path))
while (in.hasNext) processNextToken(in.next)
for ((w,c) <- counter) println(s"word: '$w' count: $c")
}
}
// ex2 with immutable map
def ex3 = {
def wordCount(path: String) = {
// mutable reference!
var counter = Map.empty[String, Int].withDefaultValue(0)
def processNextToken(str: String) = counter.updated(str, counter(str) + 1)
val in = new java.util.Scanner(new java.io.File(path))
while (in.hasNext) counter = processNextToken(in.next)
for ((w,c) <- counter) println(s"word: '$w' count: $c")
}
}
// ex3 with a sorted map
def ex4 = {
def wordCount(path: String) = {
import scala.collection.immutable
// mutable reference!
var counter = immutable.SortedMap.empty[String, Int].withDefaultValue(0)
def processNextToken(str: String) = counter.updated(str, counter(str) + 1)
val in = new java.util.Scanner(new java.io.File(path))
while (in.hasNext) counter = processNextToken(in.next)
for ((w,c) <- counter) println(s"word: '$w' count: $c")
}
}
// ex4 with java TreeMap
def ex5 = {
def wordCount(path: String) = {
import scala.collection.JavaConverters._
// mutable map!
val counter = new java.util.TreeMap[String, Int]().asScala.withDefaultValue(0)
def processNextToken(str: String) = counter.update(str, counter(str) + 1)
val in = new java.util.Scanner(new java.io.File(path))
while (in.hasNext) processNextToken(in.next)
for ((w,c) <- counter) println(s"word: '$w' count: $c")
}
}
// linked hash map for week days, insertion order
def ex6 = {
def insertionOrder = {
import java.util.Calendar
import scala.collection.mutable
val days = mutable.LinkedHashMap("Monday" -> Calendar.MONDAY)
for ((k,v) <- List(
"Tuesday" -> Calendar.TUESDAY,
"Wednesday" -> Calendar.WEDNESDAY,
"Thursday" -> Calendar.THURSDAY,
"Friday" -> Calendar.FRIDAY)
) days.update(k, v)
for ((k,v) <- days) println(s"name: ${k}, value: ${v}")
}
}
// all java properties, aligned
def ex7 = {
def java2scala = {
import scala.collection.JavaConverters._
val props = java.lang.System.getProperties.asScala
val longestKeySize = props.keys.maxBy(_.length).length
// def aligned(str: String) = { str + (" " * (longestKeySize - str.length)) }
// for ((k,v) <- props) println(s"${aligned(k)} | ${v}")
for ((k,v) <- props) println(s"""${k.padTo(longestKeySize, ' ')} | ${v}""")
}
}
// return (min, max) tuple
def ex8 = {
def minmaxPair(values: Array[Int]) = (values.min, values.max)
}
// return (lt, eq, gt) tuple
def ex9 = {
def lteqgtTuple(values: Array[Int], v: Int) = {
// can be done in one pass
val cntLess = values.count(_ < v)
val cntGreat = values.count(_ > v)
val cntEq = values.length - (cntLess + cntGreat)
(cntLess, cntEq, cntGreat)
}
}
// zip two words
def ex10 = {
"Hello" zip "World" // Vector((H,W), (e,o), (l,r), (l,l), (o,d))
// codec? compress/decompress?
}
}
| vasnake/scala-for-the-impatient | src/main/scala/Chapter04/MapsAndTuples.scala | Scala | gpl-3.0 | 9,333 |
package com.github.macpersia.planty.views.jira
import java.io.{File, PrintStream}
import java.net.URI
import java.time.LocalTime.NOON
import java.time.format.DateTimeFormatter
import java.time.format.DateTimeFormatter.ofPattern
import java.time._
import java.time.temporal.{ChronoUnit, TemporalUnit}
import java.util
import java.util.Collections._
import java.util._
import java.util.concurrent.TimeUnit.MINUTES
import com.github.macpersia.planty.views.jira.model._
import com.github.macpersia.planty.worklogs.WorklogReporting
import com.github.macpersia.planty.worklogs.model.{WorklogEntry, WorklogFilter}
import com.typesafe.scalalogging.LazyLogging
import play.api.libs.json.{JsError, JsSuccess}
import play.api.libs.ws.WS
import play.api.libs.ws.WSAuthScheme.BASIC
import play.api.libs.ws.ning.NingWSClient
import resource.managed
import scala.collection.JavaConversions._
import scala.collection.immutable
import scala.collection.parallel.immutable.ParSeq
import scala.concurrent.duration.{Duration, SECONDS}
import scala.concurrent.{Await, ExecutionContext}
import scala.util.{Failure, Success, Try}
case class ConnectionConfig(baseUri: URI,
username: String,
password: String
) {
val baseUriWithSlash = {
val baseUriStr = baseUri.toString
if (baseUriStr.endsWith("/")) baseUriStr
else s"$baseUriStr/"
}
}
object JiraWorklogReporter extends LazyLogging {
val DATE_FORMATTER = DateTimeFormatter.ISO_DATE
}
class JiraWorklogReporter(connConfig: ConnectionConfig, filter: JiraWorklogFilter)
(implicit execContext: ExecutionContext)
extends LazyLogging with WorklogReporting {
val zoneId = filter.timeZone.toZoneId
lazy val cacheManager = CacheManager.instance
implicit val sslClient = NingWSClient()
override def close(): Unit = {
if (sslClient != null) sslClient.close()
}
class WorklogComparator(worklogsMap: util.Map[Worklog, BasicIssue])
extends Comparator[Worklog] {
def compare(w1: Worklog, w2: Worklog) = {
Ordering[(Long, String)].compare(
(w1.started.toEpochDay, worklogsMap.get(w1).key),
(w2.started.toEpochDay, worklogsMap.get(w2).key)
)
}
}
def printWorklogsAsCsv(outputFile: Option[File]) {
for (csvPrintStream <- managed(
if (outputFile.isDefined) new PrintStream(outputFile.get)
else Console.out)) {
for (entry <- retrieveWorklogs())
printWorklogAsCsv(entry, csvPrintStream, JiraWorklogReporter.DATE_FORMATTER)
}
}
private def printWorklogAsCsv(entry: WorklogEntry, csvPs: PrintStream, formatter: DateTimeFormatter) {
val date = formatter format entry.date
csvPs.println(s"$date, ${entry.description}, ${entry.duration}")
}
override def retrieveWorklogs(): Seq[WorklogEntry] = {
// val latestIssueTs = Await.result(
// cacheManager.latestIssueTimestamp(connConfig.baseUriWithSlash),
// Duration(10, SECONDS))
// logger.debug(s"Previous timestamp for updates: $latestIssueTs")
//
// logger.debug(s"Searching the JIRA at ${connConfig.baseUriWithSlash} as ${connConfig.username}")
//
// val reqTimeout = Duration(1, MINUTES)
//
// val userUrl = connConfig.baseUriWithSlash + s"rest/api/2/user?username=${connConfig.username}"
// val userReq = WS.clientUrl(userUrl)
// .withAuth(connConfig.username, connConfig.password, BASIC)
// .withHeaders("Content-Type" -> "application/json")
// val userFuture = userReq.get()
// val userResp = Await.result(userFuture, reqTimeout)
// val userResult = userResp.json.validate[User].get
// logger.debug("Current user's time zone: " + ZoneId.of(userResult.timeZone.get))
//
// val dateTimeFormatter: DateTimeFormatter = ofPattern("yyyy-MM-dd")
// val fromDateFormatted: String = dateTimeFormatter.format(filter.fromDate)
// val toDateFormatted: String = dateTimeFormatter.format(filter.toDate)
//
// val searchUrl = connConfig.baseUriWithSlash + "rest/api/2/search"
// val jql: String = Seq(filter.jiraQuery, s"updated>=$fromDateFormatted AND created<=$toDateFormatted")
// .mkString(" AND ")
// val maxResults = 1000
// val searchReq = WS.clientUrl(searchUrl)
// .withAuth(connConfig.username, connConfig.password, BASIC)
// .withHeaders("Content-Type" -> "application/json")
// .withQueryString(
// "jql" -> jql,
// "maxResults" -> s"$maxResults",
// "fields" -> "updated,created"
// )
// def fetchMatchingIssues(startAt: Int, acc: Seq[BasicIssue]): Try[Seq[BasicIssue]] = {
// val searchFuture = searchReq.withQueryString("startAt" -> s"$startAt").get()
// val searchResp = Await.result(searchFuture, reqTimeout)
// searchResp.json.validate[SearchResult] match {
// case JsSuccess(result, path) =>
// val issues: Seq[BasicIssue] = acc ++ result.issues
// if (issues.size < result.total)
// return fetchMatchingIssues(startAt = issues.size, acc = issues)
// else
// return Success(issues)
//
// case JsError(errors) =>
// for (e <- errors) logger.error(e.toString())
// logger.debug("The body of search response: \\n" + searchResp.body)
// return Failure(new RuntimeException("Search Failed!"))
// }
// }
// val issues = fetchMatchingIssues(startAt = 0, acc = Nil).get
// val worklogsMap: util.Map[Worklog, BasicIssue] = extractWorklogs(issues, Option(latestIssueTs))
// TODO: This is a temporary replacement for the integration above, for demo purposes
val issues = Await.result(cacheManager.listIssues(connConfig.baseUriWithSlash), Duration(10, SECONDS))
val worklogsMap: util.Map[Worklog, BasicIssue] = extractWorklogs(issues.toList, Option.empty)
return toWorklogEntries(worklogsMap)
}
def updateWorklogHours(issueKey: String, worklogDate: LocalDate, hoursSpent: Double): Int = {
val worklog = Await.result(
cacheManager.listWorklogs(connConfig.baseUriWithSlash, issueKey), Duration(30, SECONDS)
).find(w => w.started == worklogDate)
updateWorklogHours(issueKey, worklog.get.id, hoursSpent)
}
def updateWorklogHours(issueKey: String, worklogId: String, hoursSpent: Double): Int = {
val reqTimeout = Duration(1, MINUTES)
val updateUrl = connConfig.baseUriWithSlash + s"rest/api/2/issue/$issueKey/worklog/$worklogId"
val updateReq = WS.clientUrl(updateUrl)
.withAuth(connConfig.username, connConfig.password, BASIC)
.withHeaders(
"Content-Type" -> "application/json"
// ).withQueryString(
// "_" -> s"${nonce.toEpochSecond}"
)
/*
"timeSpent": "5m",
"timeSpentSeconds": "300",
"author": {
"self": "${connConfig.baseUriWithSlash}api/2/user?username=${connConfig.username}"
},
"updateAuthor": {
"self": "${connConfig.baseUriWithSlash}api/2/user?username=${connConfig.username}"
},
"visibility": {
"type": "group",
"value": "jira-developers"
},
"comment": "Testing JIRA...",
"id" : "${issueId}"
*/
val secondsSpent = Math.round(hoursSpent * 60 * 60).toInt
val updateFuture = updateReq.put(
s"""
| {
| "timeSpentSeconds": ${secondsSpent}
| }
""".stripMargin)
val updateResp = Await.result(updateFuture, reqTimeout)
logger.debug("The update response JSON: " + updateResp.body)
// logger.debug("The update response JSON: " + updateResp.json)
// updateResp.json.validate[CatsSearchResult] match {
// case JsSuccess(searchResult, path) =>
// val worklogsMap: util.Map[CatsWorklog, BasicIssue] = extractWorklogs(searchResult)
// return toWorklogEntries(worklogsMap)
// case JsError(errors) =>
// for (e <- errors) logger.error(e.toString())
// logger.debug("The body of search response: \\n" + updateResp.body)
// throw new RuntimeException("Search Failed!")
// }
updateResp.status
}
def createWorklog(issueKey: String, worklogDate: LocalDate, zone: ZoneId, hoursSpent: Double, comment: String): Int = {
val reqTimeout = Duration(1, MINUTES)
val createUrl = connConfig.baseUriWithSlash + s"rest/api/2/issue/$issueKey/worklog"
val createReq = WS.clientUrl(createUrl)
.withAuth(connConfig.username, connConfig.password, BASIC)
.withHeaders(
"Content-Type" -> "application/json"
)
// {
// "comment": "I did some work here.",
// "visibility": {
// "type": "group",
// "value": "jira-developers"
// },
// "started": "2016-05-18T12:19:04.126+0000",
// "timeSpentSeconds": 12000
// }
val secondsSpent = Math.round(hoursSpent * 60 * 60).toInt
val formattedDate: String = jiraDTFormatter.format(worklogDate.atStartOfDay(zone).plusHours(12))
val createFuture = createReq.post(
s"""
| {
| "comment": "${comment}",
| "started": "${formattedDate}",
| "timeSpentSeconds": ${secondsSpent}
| }
""".stripMargin)
val createResp = Await.result(createFuture, reqTimeout)
logger.debug("The create response JSON: " + createResp.body)
createResp.status
}
def toWorklogEntries(worklogsMap: util.Map[Worklog, BasicIssue]): Seq[WorklogEntry] = {
if (worklogsMap.isEmpty)
return Seq.empty
else {
val sortedWorklogsMap: util.SortedMap[Worklog, BasicIssue] = new util.TreeMap(new WorklogComparator(worklogsMap))
sortedWorklogsMap.putAll(worklogsMap)
val worklogEntries =
for (worklog <- sortedWorklogsMap.keySet.iterator)
yield toWorklogEntry(sortedWorklogsMap, worklog)
return worklogEntries.toSeq
}
}
def toWorklogEntry(sortedReverseMap: util.SortedMap[Worklog, BasicIssue], worklog: Worklog) = {
val issueKey = sortedReverseMap.get(worklog).key
val secondsSpent = worklog.timeSpentSeconds.toDouble
val hoursPerLog = secondsSpent / 60 / 60
new WorklogEntry(
date = worklog.started.atStartOfDay(zoneId).toLocalDate,
description = issueKey,
duration = hoursPerLog)
}
def extractWorklogs(issues: Seq[BasicIssue], prevSyncTimestamp: Option[ZonedDateTime])
: util.Map[Worklog, BasicIssue] = {
val worklogsMap: util.Map[Worklog, BasicIssue] = synchronizedMap(new util.HashMap)
val myWorklogs: util.List[Worklog] = synchronizedList(new util.LinkedList)
val baseUrlOption = Option(connConfig.baseUriWithSlash)
for (issue <- issues.map(_.copy(baseUrl = baseUrlOption)).par) {
val cachedIssue = Await.result(
cacheManager.getIssueByBaseUrlAndId(issue.baseUrl.get, issue.id),
Duration(30, SECONDS)
)
val allWorklogs =
if (
// TODO: This is a temporary deactivation for the condition below, for demo purposes
false &&
prevSyncTimestamp.isEmpty || cachedIssue.isEmpty || (issue.updated isAfter cachedIssue.get.updated)) {
//val issueUrl = connConfig.baseUri.toString + s"/rest/api/2/issue/${issue.key}"
//logger.debug(s"Retrieving issue ${issue.key} at $issueUrl")
//
//val issueReq = WS.clientUrl(issueUrl)
// .withAuth(connConfig.username, connConfig.password, BASIC)
// .withHeaders("Content-Type" -> "application/json")
// val searchFuture = issueReq.get()
// val searchResp = Await.result(searchFuture, reqTimeout)
// val issueResult = searchResp.json.validate[FullIssue].get
retrieveWorklogsFromRestAPI(issue, connConfig.username, connConfig.password)
} else
Await.result(cacheManager.listWorklogs(issue.baseUrl.get, issue.key), Duration(30, SECONDS))
for (worklog <- allWorklogs) {
val author = filter.author match {
case None => connConfig.username
case Some(username) => if (!username.trim.isEmpty) username else connConfig.username
}
if (isLoggedBy(author, worklog)
&& isWithinPeriod(filter.fromDate, filter.toDate, worklog)) {
myWorklogs.add(worklog)
worklogsMap.put(worklog, issue)
}
}
}
return worklogsMap
}
private def retrieveWorklogsFromRestAPI(issue: BasicIssue, username: String, password: String): ParSeq[Worklog] = {
val worklogsUrl = s"${connConfig.baseUriWithSlash}rest/api/2/issue/${issue.key}/worklog"
val reqTimeout = Duration(2, MINUTES)
val worklogsReq = WS.clientUrl(worklogsUrl)
.withAuth(connConfig.username, connConfig.password, BASIC)
.withHeaders("Content-Type" -> "application/json")
.withQueryString("maxResults" -> "1000")
val respFuture = worklogsReq.get()
val resp = Await.result(respFuture, reqTimeout)
resp.json.validate[IssueWorklogs] match {
case JsSuccess(issueWorklogs, path) =>
val baseUrl = connConfig.baseUriWithSlash
val enhancedWorklogs = issueWorklogs.worklogs.map(_.map(w => w.copy(
issueKey = Option(issue.key), baseUrl = Option(baseUrl)
)))
val enhancedIssueWorklogs = issueWorklogs.copy(
baseUrl = Option(baseUrl), issueKey = Option(issue.key), worklogs = enhancedWorklogs
)
cacheManager.updateIssueWorklogs(enhancedIssueWorklogs) onSuccess {
case lastError => if (lastError.ok)
cacheManager.updateIssue(issue)
}
return (enhancedIssueWorklogs.worklogs getOrElse immutable.Seq.empty).par
case JsError(errors) =>
for (e <- errors) logger.error(e.toString())
logger.debug("The body of search response: \\n" + resp.body)
throw new RuntimeException("Retrieving Worklogs Failed!")
}
}
def isLoggedBy(username: String, worklog: Worklog): Boolean = {
worklog.author.name.equalsIgnoreCase(username)
}
def isWithinPeriod(fromDate: LocalDate, toDate: LocalDate, worklog: Worklog): Boolean = {
val startDate = worklog.started.atStartOfDay(zoneId).toLocalDate
startDate.isEqual(fromDate) || startDate.isEqual(toDate) ||
(startDate.isAfter(fromDate) && startDate.isBefore(toDate))
}
def toFuzzyDuration(totalMinutes: Int): String = {
val hours = totalMinutes / 60
val minutes = totalMinutes % 60
if (minutes == 0)
s"$hours h"
else
s"$hours h, $minutes m"
}
}
| macpersia/planty-jira-view | src/main/scala/com/github/macpersia/planty/views/jira/JiraWorklogReporter.scala | Scala | apache-2.0 | 14,483 |
object UseIvy
{
def yi = PublishedIvy.x
} | matheshar/simple-build-tool | src/sbt-test/dependency-management/configurations/changes/ivy/Use.scala | Scala | bsd-3-clause | 42 |
package svez.akka.stream.stages
import akka.NotUsed
import akka.stream.scaladsl.GraphDSL.Implicits._
import akka.stream.scaladsl.{Flow, GraphDSL, Partition}
import akka.stream.{FanOutShape2, Graph, Outlet}
import cats.data.Ior._
import cats.data.{Ior, NonEmptyList, Validated, ValidatedNel}
import scala.util.{Failure, Success, Try}
object partitions {
object PartitionEither {
def apply[A, B](): Graph[FanOutShape2[Either[A, B], A, B], NotUsed] =
GraphDSL.create[FanOutShape2[Either[A, B], A, B]]() { implicit builder: GraphDSL.Builder[NotUsed] ⇒
val left = builder.add(Flow[Either[A, B]].map (_.left.get))
val right = builder.add(Flow[Either[A, B]].map (_.right.get))
val partition = builder.add(Partition[Either[A, B]](2, _.fold(_ ⇒ 0, _ ⇒ 1)))
partition ~> left
partition ~> right
new FanOutShape2[Either[A, B], A, B](partition.in, left.out, right.out)
}
}
implicit class EitherShape[A, B](val shape: FanOutShape2[Either[A, B], A, B]) extends AnyVal {
def left : Outlet[A] = shape.out0
def right: Outlet[B] = shape.out1
}
object PartitionTry {
def apply[T](): Graph[FanOutShape2[Try[T], Throwable, T], NotUsed] =
GraphDSL.create[FanOutShape2[Try[T], Throwable, T]]() { implicit builder ⇒
val success = builder.add(Flow[Try[T]].collect { case Success(a) ⇒ a })
val failure = builder.add(Flow[Try[T]].collect { case Failure(t) ⇒ t })
val partition = builder.add(Partition[Try[T]](2, _.map(_ ⇒ 1).getOrElse(0)))
partition ~> failure
partition ~> success
new FanOutShape2[Try[T], Throwable, T](partition.in, failure.out, success.out)
}
}
implicit class TryShape[T](val shape: FanOutShape2[Try[T], Throwable, T]) extends AnyVal {
def failure: Outlet[Throwable] = shape.out0
def success: Outlet[T] = shape.out1
}
object PartitionValidated {
def apply[E, A](): Graph[FanOutShape2[Validated[E, A], E, A], NotUsed] =
GraphDSL.create[FanOutShape2[Validated[E, A], E, A]]() { implicit builder: GraphDSL.Builder[NotUsed] ⇒
val toEither = builder.add(Flow.fromFunction((v: Validated[E, A]) ⇒ v.toEither))
val either = builder.add(PartitionEither[E, A]())
toEither ~> either.in
new FanOutShape2[Validated[E, A], E, A](toEither.in, either.left, either.right)
}
}
implicit class ValidatedShape[E, A](val shape: FanOutShape2[Validated[E, A], E, A]) extends AnyVal{
def invalid: Outlet[E] = shape.out0
def valid : Outlet[A] = shape.out1
}
object PartitionValidatedNel {
def apply[E, A](): Graph[FanOutShape2[ValidatedNel[E, A], E, A], NotUsed] =
GraphDSL.create[FanOutShape2[ValidatedNel[E, A], E, A]]() { implicit builder: GraphDSL.Builder[NotUsed] ⇒
val validated = builder.add(PartitionValidated[NonEmptyList[E], A]())
new FanOutShape2[ValidatedNel[E, A], E, A](
validated.in,
validated.invalid.mapConcat(_.toList).outlet,
validated.valid
)
}
}
implicit class ValidatedNelShape[E, A](val shape: FanOutShape2[ValidatedNel[E, A], E, A]) extends AnyVal{
def invalid: Outlet[E] = shape.out0
def valid : Outlet[A] = shape.out1
}
object PartitionIor {
def apply[A, B](): Graph[FanOutShape2[Ior[A, B], A, B], NotUsed] =
GraphDSL.create[FanOutShape2[Ior[A, B], A, B]]() { implicit builder: GraphDSL.Builder[NotUsed] ⇒
val flatten = builder.add(Flow[Ior[A, B]]
.mapConcat(_.fold(a ⇒ List(Left(a)), b ⇒ List(Right(b)), (a, b) ⇒ List(Left(a), Right(b))))
.map(_.onlyLeftOrRight.get)
)
val either = builder.add(PartitionEither[A, B]())
flatten ~> either.in
new FanOutShape2[Ior[A, B], A, B](flatten.in, either.left, either.right)
}
}
implicit class IorShape[A, B](val shape: FanOutShape2[Ior[A, B], A, B]) extends AnyVal{
def left : Outlet[A] = shape.out0
def right: Outlet[B] = shape.out1
}
} | svezfaz/akka-stream-fp | core/src/main/scala/svez/akka/stream/stages/partitions.scala | Scala | apache-2.0 | 4,040 |
package pl.touk.nussknacker.ui.security.oidc
import com.auth0.jwk.{JwkProvider, JwkProviderBuilder}
import pl.touk.nussknacker.engine.util.config.URIExtensions
import pl.touk.nussknacker.ui.security.oauth2.{JwtConfiguration, OAuth2Configuration}
import pl.touk.nussknacker.ui.security.oauth2.ProfileFormat.OIDC
import sttp.client.{NothingT, SttpBackend}
import sttp.model.MediaType
import java.net.URI
import java.security.PublicKey
import java.util.NoSuchElementException
import scala.concurrent.{ExecutionContext, Future}
case class OidcAuthenticationConfiguration(usersFile: URI,
anonymousUserRole: Option[String] = None,
issuer: URI,
clientId: String,
clientSecret: Option[String],
redirectUri: Option[URI] = None,
audience: Option[String] = None,
scope: String = "openid profile",
// The following values are used for overriding the ones obtained
// from the OIDC Discovery or in case it is not supported at all.
// They may be relative to the issuer.
authorizationEndpoint: Option[URI] = None,
tokenEndpoint: Option[URI] = None,
userinfoEndpoint: Option[URI] = None,
jwksUri: Option[URI] = None,
rolesClaim: Option[String] = None,
) extends URIExtensions {
lazy val oAuth2Configuration: OAuth2Configuration = OAuth2Configuration(
usersFile = usersFile,
authorizeUri = authorizationEndpoint.map(resolveAgainstIssuer)
.getOrElse(throw new NoSuchElementException("An authorizationEndpoint must provided or OIDC Discovery available")),
clientSecret = clientSecret
.getOrElse(throw new NoSuchElementException("PKCE not yet supported, provide a client secret")),
clientId = clientId,
profileUri = userinfoEndpoint.map(resolveAgainstIssuer)
.getOrElse(throw new NoSuchElementException("An userinfoEndpoint must provided or OIDC Discovery available")),
profileFormat = Some(OIDC),
accessTokenUri = tokenEndpoint.map(resolveAgainstIssuer)
.getOrElse(throw new NoSuchElementException("A tokenEndpoint must provided or OIDC Discovery available")),
redirectUri = redirectUri,
jwt = Some(new JwtConfiguration {
def accessTokenIsJwt: Boolean = OidcAuthenticationConfiguration.this.audience.isDefined
def userinfoFromIdToken: Boolean = true
def audience: Option[String] = OidcAuthenticationConfiguration.this.audience
def authServerPublicKey: Option[PublicKey] = None
def idTokenNonceVerificationRequired: Boolean = false
}),
authorizeParams = Map("response_type" -> "code", "scope" -> scope),
accessTokenParams = Map("grant_type" -> "authorization_code"),
accessTokenRequestContentType = MediaType.ApplicationXWwwFormUrlencoded.toString(),
anonymousUserRole = anonymousUserRole
)
lazy val jwkProvider: JwkProvider = new JwkProviderBuilder(
jwksUri.map(resolveAgainstIssuer)
.getOrElse(throw new NoSuchElementException("A jwksUri must provided or OIDC Discovery available"))
.toURL
).build()
def withDiscovery(implicit ec: ExecutionContext, sttpBackend: SttpBackend[Future, Nothing, NothingT]): OidcAuthenticationConfiguration = {
val discoveredConfiguration = OidcDiscovery(issuer)
copy(
authorizationEndpoint = authorizationEndpoint.orElse(discoveredConfiguration.map(_.authorizationEndpoint)),
tokenEndpoint = tokenEndpoint.orElse(discoveredConfiguration.map(_.tokenEndpoint)),
userinfoEndpoint = userinfoEndpoint.orElse(discoveredConfiguration.map(_.userinfoEndpoint)),
jwksUri = jwksUri.orElse(discoveredConfiguration.map(_.jwksUri))
)
}
private def resolveAgainstIssuer(uri: URI): URI = issuer.withTrailingSlash.resolve(uri)
}
| TouK/nussknacker | security/src/main/scala/pl/touk/nussknacker/ui/security/oidc/OidcAuthenticationConfiguration.scala | Scala | apache-2.0 | 4,298 |
package com.sksamuel.scapegoat.inspections.collections
import com.sksamuel.scapegoat.{Inspection, InspectionContext, Inspector, Levels}
/**
* @author Stephen Samuel
*/
class AvoidSizeNotEqualsZero
extends Inspection(
text = "Avoid Traversable.size != 0",
defaultLevel = Levels.Warning,
description = "Checks for use of Traversable.size.",
explanation = ".size can be slow for some data structures, prefer .nonEmpty, which is O(1)."
) {
def inspector(context: InspectionContext): Inspector =
new Inspector(context) {
override def postTyperTraverser =
new context.Traverser {
import context.global._
private val Size = TermName("size")
private val Length = TermName("length")
override def inspect(tree: Tree): Unit = {
tree match {
case Apply(
Select(Select(q, Length | Size), TermName("$bang$eq") | TermName("$greater")),
List(Literal(Constant(0)))
) if isTraversable(q) =>
context.warn(tree.pos, self, tree.toString.take(100))
case _ => continue(tree)
}
}
}
}
}
| sksamuel/scalac-scapegoat-plugin | src/main/scala/com/sksamuel/scapegoat/inspections/collections/AvoidSizeNotEqualsZero.scala | Scala | apache-2.0 | 1,210 |
/*
* Copyright (c) 2013 Christos KK Loverdos
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.ckkloverdos.thrift3r.descriptor
import com.ckkloverdos.thrift3r.codec.Codec
/**
*
* @author Christos KK Loverdos <loverdos@gmail.com>
*/
final case class FieldInfo(field: FieldDescriptor, codec: Codec[_]) {
def jvmType = field.jvmType
def jvmClass = field.jvmClass
def id = field.id
def name = field.name
}
| loverdos/thrift3r | src/main/scala/com/ckkloverdos/thrift3r/descriptor/FieldInfo.scala | Scala | apache-2.0 | 943 |
package scalan.monads
import scalan.collections.ListOps
import scalan._
import scala.reflect.runtime.universe._
/**
* Created by slesarenko on 05/01/15.
*/
import language.higherKinds // Disable warnings for type constructor polymorphism
trait Monads extends Base with ListOps { self: MonadsDsl =>
trait Monad[F[_]] extends Functor[F] {
implicit def thisCont: Cont[F] = this
def unit[A:Elem](a: Rep[A]): Rep[F[A]]
def flatMap[A:Elem,B:Elem](ma: Rep[F[A]])(f: Rep[A] => Rep[F[B]]): Rep[F[B]] =
join(map(ma)(f))
def map[A:Elem,B:Elem](ma: Rep[F[A]])(f: Rep[A] => Rep[B]): Rep[F[B]] =
flatMap(ma)(a => unit(f(a)))
def map2[A:Elem,B:Elem,C:Elem](ma: Rep[F[A]], mb: Rep[F[B]])(f: (Rep[A], Rep[B]) => Rep[C]): Rep[F[C]] =
flatMap(ma)(a => map(mb)(b => f(a, b)))
def sequence[A:Elem](lma: Rep[List[F[A]]]): Rep[F[List[A]]] =
lma.foldRight[F[List[A]]](unit(SList.empty[A])) { (p: Rep[(F[A],F[List[A]])]) =>
val Pair(ma, mla) = p
map2(ma, mla)(_ :: _)
}
def traverse[A:Elem,B:Elem](la: Lst[A])(f: Rep[A] => Rep[F[B]]): Rep[F[List[B]]] =
la.foldRight[F[List[B]]](unit(SList.empty[B])){ (in: Rep[(A,F[List[B]])]) =>
val Pair(a, mlb) = in
map2(f(a), mlb)(_ :: _)
}
// // For `List`, the `replicateM` function will generate a list of lists.
// // It will contain all the lists of length `n` with elements selected from the
// // input list.
// // For `Option`, it will generate either `Some` or `None` based on whether the
// // input is `Some` or `None`. The `Some` case will contain a list of length `n`
// // that repeats the element in the input `Option`.
// // The general meaning of `replicateM` is described very well by the
// // implementation `sequence(List.fill(n)(ma))`. It repeats the `ma` monadic value
// // `n` times and gathers the results in a single value, where the monad `M`
// // determines how values are actually combined.
//
// // Recursive version:
// def _replicateM[A](n: Int, ma: F[A]): F[List[A]] =
// if (n <= 0) unit(List[A]()) else map2(ma, replicateM(n - 1, ma))(_ :: _)
//
// Using `sequence` and the `List.fill` function of the standard library:
def replicateM[A:Elem](n: Rep[Int])(ma: Rep[F[A]]): Rep[F[List[A]]] =
sequence(SList.replicate(n, ma))
def replicateM_[A:Elem](n: Rep[Int])(f: Rep[F[A]]): Rep[F[Unit]] =
foreachM(SList.replicate(n, f))(skip)
def foldM[A:Elem,B:Elem](la: Lst[A])(z: Rep[B])(f: (Rep[B],Rep[A]) => Rep[F[B]]): Rep[F[B]] =
la.foldLeft[F[B]](unit(z)){ (in: Rep[(F[B],A)]) =>
val Pair(fb, a) = in
flatMap(fb)(b => f(b,a))
}
def foldM_[A:Elem,B:Elem](l: Lst[A])(z: Rep[B])(f: (Rep[B],Rep[A]) => Rep[F[B]]): Rep[F[Unit]] =
skip { foldM(l)(z)(f) }
def foreachM[A:Elem](l: Rep[List[A]])(f: Rep[A] => Rep[F[Unit]]): Rep[F[Unit]] =
foldM_(l)(())((u,a) => skip(f(a)))
def seq[A:Elem,B:Elem,C:Elem](f: Rep[A] => Rep[F[B]], g: Rep[B] => Rep[F[C]]): Rep[A => F[C]] =
compose(f, g)
def as[A:Elem,B:Elem](a: Rep[F[A]])(b: Rep[B]): Rep[F[B]] = map(a)(_ => b)
def skip[A:Elem](a: Rep[F[A]]): Rep[F[Unit]] = as(a)(())
def when[A:Elem](b: Rep[Boolean])(fa: => Rep[F[A]]): Rep[F[Boolean]] =
IF (b) { as(fa)(true) } ELSE { unit(false) }
// def forever[A,B](a: F[A]): F[B] = {
// lazy val t: F[B] = forever(a)
// a flatMap (_ => t)
// }
// def while_(a: F[Boolean])(b: F[Unit]): F[Unit] = {
// lazy val t: F[Unit] = while_(a)(b)
// a flatMap (c => skip(when(c)(t)))
// }
// def doWhile[A:Elem](a: Rep[F[A]])(cond: Rep[A] => Rep[F[Boolean]]): Rep[F[Unit]] = for {
// a1 <- a
// ok <- cond(a1)
// _ <- IF (ok) { doWhile(a)(cond) } else unit(())
// } yield ()
def compose[A:Elem,B:Elem,C:Elem](f: Rep[A] => Rep[F[B]], g: Rep[B] => Rep[F[C]]): Rep[A => F[C]] =
fun {a => flatMap(f(a))(g)}
// def _flatMap[A,B](ma: F[A])(f: A => F[B]): F[B] =
// compose((_:Unit) => ma, f)(())
def join[A:Elem](mma: Rep[F[F[A]]]): Rep[F[A]] = flatMap(mma)(ma => ma)
def filterM[A:Elem](ms: Lst[A])(f: Rep[A] => Rep[F[Boolean]]): Rep[F[List[A]]] =
ms.foldRight(unit(SList.empty[A])){ (in: Rep[(A,F[List[A]])]) =>
val Pair(x, y) = in
val h = compose(f, (b: Rep[Boolean]) => IF (b) THEN {map2(unit(x),y)(_ :: _)} ELSE { y })
h(x)
}
override def toString = s"Monad[${name}}]"
// syntax
implicit def toMonadic[A:Elem](a: Rep[F[A]]): Monadic[F,A] =
new Monadic[F,A] { val F = Monad.this; val eA = element[A]; def get = a }
}
trait Monadic[F[_],A] {
val F: Monad[F]
implicit val eA: Elem[A]
import F._
def get: Rep[F[A]]
private val a = get
def map[B:Elem](f: Rep[A] => Rep[B]): Rep[F[B]] = F.map(a)(f)
def flatMap[B:Elem](f: Rep[A] => Rep[F[B]]): Rep[F[B]] = F.flatMap(a)(f)
def **[B:Elem](b: Rep[F[B]]) = F.map2(a,b)(Pair(_,_))
def *>[B:Elem](b: Rep[F[B]]) = F.map2(a,b)((_,b) => b)
def map2[B:Elem,C:Elem](b: Rep[F[B]])(f: (Rep[A],Rep[B]) => Rep[C]): Rep[F[C]] = F.map2(a,b)(f)
def as[B:Elem](b: Rep[B]): Rep[F[B]] = F.as(a)(b)
def skip: Rep[F[Unit]] = F.skip(a)
def replicateM(n: Int) = F.replicateM(n)(a)
def replicateM_(n: Int) = F.replicateM_(n)(a)
//def withFilter(p: Rep[A] => Rep[Boolean]): Rep[F[A]] = a
}
object Monad {
def apply[F[_]:Monad]: Monad[F] = implicitly[Monad[F]]
}
type Id[A] = A
trait IdCont extends Cont[Id] {
def tag[T](implicit tT: WeakTypeTag[T]) = tT
def lift[T](implicit eT: Elem[T]) = eT
def unlift[T](implicit eFT: Elem[Id[T]]) = eFT
def getElem[T](fa: Rep[Id[T]]) = !!!("Operation is not supported by Id container " + fa)
def unapply[T](e: Elem[_]) = Some(e.asElem[Id[T]])
}
implicit val identityMonad: Monad[Id] = new Monad[Id] with IdCont {
def unit[A:Elem](a: Rep[A]) = a
override def flatMap[A:Elem,B:Elem](a: Rep[A])(f: Rep[A] => Rep[B]) = f(a)
}
type Oper[A] = Int => (Int, A)
trait OperCont extends Cont[Oper] {
def tag[T](implicit tT: WeakTypeTag[T]) = weakTypeTag[Oper[T]]
def lift[T](implicit eT: Elem[T]) = funcElement(element[Int], element[(Int,T)])
def unlift[T](implicit eFT: Elem[Oper[T]]) = eFT.eRange.eSnd
def getElem[T](fa: Rep[Oper[T]]) = !!!("Operation is not supported by Oper container " + fa)
def unapply[T](e: Elem[_]) = e match {
case te: FuncElem[_, _] => Some(te.asElem[Oper[T]])
case _ => None
}
}
implicit val operationMonad: Monad[Oper] = new Monad[Oper] with OperCont {
def unit[A:Elem](a: Rep[A]) = fun {i => (i, a)}
override def flatMap[A:Elem,B:Elem](op: Rep[Oper[A]])(f: Rep[A] => Rep[Oper[B]]) =
fun { (i: Rep[Int]) =>
val Pair(i1, a) = op(i)
val op1 = f(a)
op1(i1)
}
}
object IdOper extends (Id ~> Oper) {
def cIn = container[Id]
def cOut = container[Oper]
def apply[A:Elem](i: Rep[Id[A]]): Rep[Oper[A]] = eval(i)(element[A])
}
object OperOper extends (Oper ~> Oper) {
def cIn = container[Oper]
def cOut = container[Oper]
def apply[A:Elem](i: Rep[Oper[A]]): Rep[Oper[A]] = i
}
}
trait MonadsDsl extends ScalanDsl with Monads
with FreesDsl
with CoproductsDsl
with ReadersDsl
with StatesDsl
with FreeStatesDsl
with FreeMsDsl
{
def eval[A:Elem](v: Rep[A]): Rep[Oper[A]] = fun {i => console_eval(i,v)}
}
trait MonadsDslStd extends ScalanDslStd
with MonadsDsl
with FreesDslStd
with CoproductsDslStd
with ReadersDslStd
with StatesDslStd
with FreeStatesDslStd
with FreeMsDslStd
{
}
trait MonadsDslExp extends ScalanDslExp
with MonadsDsl
with FreesDslExp
with CoproductsDslExp
with ReadersDslExp
with StatesDslExp
with FreeStatesDslExp
with FreeMsDslExp
{
}
| PCMNN/scalan-ce | effects/src/test/scala/scalan/monads/Monads.scala | Scala | apache-2.0 | 7,993 |
/*
* Copyright 2011-2022 GatlingCorp (https://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.javaapi.http.internal
import java.{ lang => jl, util => ju }
import java.util.{ function => juf }
import io.gatling.commons.validation.{ safely, SuccessWrapper, Validation }
import io.gatling.core.session.{ Expression, Session => ScalaSession }
import io.gatling.core.session.el._
import io.gatling.http.response.Response
import io.gatling.javaapi.core.{ CheckBuilder, Session }
import io.gatling.javaapi.core.internal.Expressions._
import io.gatling.javaapi.core.internal.JavaExpression
import io.gatling.javaapi.http.HttpProtocolBuilder
object ScalaHttpProtocolBuilderConditions {
def untyped(context: io.gatling.http.protocol.HttpProtocolBuilder, condition: String): Untyped =
new Untyped(context, condition.el)
def untyped(context: io.gatling.http.protocol.HttpProtocolBuilder, condition: JavaExpression[jl.Boolean]): Untyped =
new Untyped(context, javaBooleanFunctionToExpression(condition))
final class Untyped(context: io.gatling.http.protocol.HttpProtocolBuilder, condition: Expression[Boolean]) {
def then_(checkBuilders: ju.List[CheckBuilder]): HttpProtocolBuilder =
new HttpProtocolBuilder(context.checkIf(condition)(HttpChecks.toScalaChecks(checkBuilders): _*))
}
def typed(context: io.gatling.http.protocol.HttpProtocolBuilder, condition: juf.BiFunction[Response, Session, jl.Boolean]): Typed =
new Typed(context, (u, session) => safely()(condition.apply(u, new Session(session)).booleanValue.success))
final class Typed(context: io.gatling.http.protocol.HttpProtocolBuilder, condition: (Response, ScalaSession) => Validation[Boolean]) {
def then_(checkBuilders: ju.List[CheckBuilder]): HttpProtocolBuilder =
new HttpProtocolBuilder(context.checkIf(condition)(HttpChecks.toScalaChecks(checkBuilders): _*))
}
}
| gatling/gatling | gatling-http-java/src/main/scala/io/gatling/javaapi/http/internal/ScalaHttpProtocolBuilderConditions.scala | Scala | apache-2.0 | 2,415 |
package pl.writeonly.son2.apis.chain
class ChainImpl[F](val get: PartialFunction[String, F])
| writeonly/son2 | scallions-core/scallions-apis/src/main/scala/pl/writeonly/son2/apis/chain/ChainImpl.scala | Scala | apache-2.0 | 94 |
package gitbucket.core.servlet
import java.io.File
import java.util
import java.util.Date
import scala.util.Using
import gitbucket.core.api
import gitbucket.core.api.JsonFormat.Context
import gitbucket.core.model.WebHook
import gitbucket.core.plugin.{GitRepositoryRouting, PluginRegistry}
import gitbucket.core.service.IssuesService.IssueSearchCondition
import gitbucket.core.service.WebHookService._
import gitbucket.core.service._
import gitbucket.core.util.Implicits._
import gitbucket.core.util._
import gitbucket.core.model.Profile.profile.blockingApi._
import gitbucket.core.model.activity.{
BaseActivityInfo,
CloseIssueInfo,
CreateBranchInfo,
CreateTagInfo,
CreateWikiPageInfo,
DeleteBranchInfo,
DeleteTagInfo,
DeleteWikiInfo,
EditWikiPageInfo,
PushInfo
}
import gitbucket.core.util.JGitUtil.CommitInfo
// Imported names have higher precedence than names, defined in other files.
// If Database is not bound by explicit import, then "Database" refers to the Database introduced by the wildcard import above.
import gitbucket.core.servlet.Database
import org.eclipse.jgit.api.Git
import org.eclipse.jgit.http.server.GitServlet
import org.eclipse.jgit.lib._
import org.eclipse.jgit.transport._
import org.eclipse.jgit.transport.resolver._
import org.slf4j.LoggerFactory
import javax.servlet.ServletConfig
import javax.servlet.http.{HttpServletRequest, HttpServletResponse}
import org.eclipse.jgit.diff.DiffEntry.ChangeType
import org.eclipse.jgit.internal.storage.file.FileRepository
import org.json4s.Formats
import org.json4s.jackson.Serialization._
/**
* Provides Git repository via HTTP.
*
* This servlet provides only Git repository functionality.
* Authentication is provided by [[GitAuthenticationFilter]].
*/
class GitRepositoryServlet extends GitServlet with SystemSettingsService {
private val logger = LoggerFactory.getLogger(classOf[GitRepositoryServlet])
private implicit val jsonFormats: Formats = gitbucket.core.api.JsonFormat.jsonFormats
override def init(config: ServletConfig): Unit = {
setReceivePackFactory(new GitBucketReceivePackFactory())
val root: File = new File(Directory.RepositoryHome)
setRepositoryResolver(new GitBucketRepositoryResolver)
super.init(config)
}
override def service(req: HttpServletRequest, res: HttpServletResponse): Unit = {
val agent = req.getHeader("USER-AGENT")
val index = req.getRequestURI.indexOf(".git")
if (index >= 0 && (agent == null || agent.toLowerCase.indexOf("git") < 0)) {
// redirect for browsers
val paths = req.getRequestURI.substring(0, index).split("/")
res.sendRedirect(baseUrl(req) + "/" + paths.dropRight(1).last + "/" + paths.last)
} else if (req.getMethod.toUpperCase == "POST" && req.getRequestURI.endsWith("/info/lfs/objects/batch")) {
withLockRepository(req) {
serviceGitLfsBatchAPI(req, res)
}
} else {
// response for git client
withLockRepository(req) {
super.service(req, res)
}
}
}
private def withLockRepository[T](req: HttpServletRequest)(f: => T): T = {
if (req.hasAttribute(Keys.Request.RepositoryLockKey)) {
LockUtil.lock(req.getAttribute(Keys.Request.RepositoryLockKey).asInstanceOf[String]) {
f
}
} else {
f
}
}
/**
* Provides GitLFS Batch API
* https://github.com/git-lfs/git-lfs/blob/master/docs/api/batch.md
*/
protected def serviceGitLfsBatchAPI(req: HttpServletRequest, res: HttpServletResponse): Unit = {
val batchRequest = read[GitLfs.BatchRequest](req.getInputStream)
val settings = loadSystemSettings()
settings.baseUrl match {
case None => {
throw new IllegalStateException("lfs.server_url is not configured.")
}
case Some(baseUrl) => {
val index = req.getRequestURI.indexOf(".git")
if (index >= 0) {
req.getRequestURI.substring(0, index).split("/").reverse match {
case Array(repository, owner, _*) =>
val timeout = System.currentTimeMillis + (60000 * 10) // 10 min.
val batchResponse = batchRequest.operation match {
case "upload" =>
GitLfs.BatchUploadResponse(
"basic",
batchRequest.objects.map { requestObject =>
GitLfs.BatchResponseObject(
requestObject.oid,
requestObject.size,
true,
GitLfs.Actions(
upload = Some(
GitLfs.Action(
href = baseUrl + "/git-lfs/" + owner + "/" + repository + "/" + requestObject.oid,
header =
Map("Authorization" -> StringUtil.encodeBlowfish(s"$timeout ${requestObject.oid}")),
expires_at = new Date(timeout)
)
)
)
)
}
)
case "download" =>
GitLfs.BatchUploadResponse(
"basic",
batchRequest.objects.map { requestObject =>
GitLfs.BatchResponseObject(
requestObject.oid,
requestObject.size,
true,
GitLfs.Actions(
download = Some(
GitLfs.Action(
href = baseUrl + "/git-lfs/" + owner + "/" + repository + "/" + requestObject.oid,
header =
Map("Authorization" -> StringUtil.encodeBlowfish(s"$timeout ${requestObject.oid}")),
expires_at = new Date(timeout)
)
)
)
)
}
)
}
res.setContentType("application/vnd.git-lfs+json")
Using.resource(res.getWriter) { out =>
out.print(write(batchResponse))
out.flush()
}
}
}
}
}
}
}
class GitBucketRepositoryResolver extends RepositoryResolver[HttpServletRequest] {
override def open(req: HttpServletRequest, name: String): Repository = {
// Rewrite repository path if routing is marched
PluginRegistry()
.getRepositoryRouting("/" + name)
.map {
case GitRepositoryRouting(urlPattern, localPath, _) =>
val path = urlPattern.r.replaceFirstIn(name, localPath)
new FileRepository(new File(Directory.GitBucketHome, path))
}
.getOrElse {
new FileRepository(new File(Directory.RepositoryHome, name))
}
}
}
class GitBucketReceivePackFactory extends ReceivePackFactory[HttpServletRequest] with SystemSettingsService {
private val logger = LoggerFactory.getLogger(classOf[GitBucketReceivePackFactory])
override def create(request: HttpServletRequest, db: Repository): ReceivePack = {
val receivePack = new ReceivePack(db)
if (PluginRegistry().getRepositoryRouting(request.gitRepositoryPath).isEmpty) {
val pusher = request.getAttribute(Keys.Request.UserName).asInstanceOf[String]
logger.debug("requestURI: " + request.getRequestURI)
logger.debug("pusher:" + pusher)
val paths = request.paths
val owner = paths(1)
val repository = paths(2).stripSuffix(".git")
logger.debug("repository:" + owner + "/" + repository)
val settings = loadSystemSettings()
val baseUrl = settings.baseUrl(request)
val sshUrl = settings.sshUrl(owner, repository)
if (!repository.endsWith(".wiki")) {
val hook = new CommitLogHook(owner, repository, pusher, baseUrl, sshUrl)
receivePack.setPreReceiveHook(hook)
receivePack.setPostReceiveHook(hook)
}
if (repository.endsWith(".wiki")) {
receivePack.setPostReceiveHook(
new WikiCommitHook(owner, repository.stripSuffix(".wiki"), pusher, baseUrl, sshUrl)
)
}
}
receivePack
}
}
import scala.jdk.CollectionConverters._
class CommitLogHook(owner: String, repository: String, pusher: String, baseUrl: String, sshUrl: Option[String])
extends PostReceiveHook
with PreReceiveHook
with RepositoryService
with AccountService
with IssuesService
with ActivityService
with MergeService
with PullRequestService
with WebHookService
with LabelsService
with PrioritiesService
with MilestonesService
with WebHookPullRequestService
with WebHookPullRequestReviewCommentService
with CommitsService
with SystemSettingsService
with RequestCache {
private val logger = LoggerFactory.getLogger(classOf[CommitLogHook])
private var existIds: Seq[String] = Nil
def onPreReceive(receivePack: ReceivePack, commands: java.util.Collection[ReceiveCommand]): Unit = {
Database() withTransaction { implicit session =>
try {
commands.asScala.foreach { command =>
// call pre-commit hook
PluginRegistry().getReceiveHooks
.flatMap(_.preReceive(owner, repository, receivePack, command, pusher, false))
.headOption
.foreach { error =>
command.setResult(ReceiveCommand.Result.REJECTED_OTHER_REASON, error)
}
}
Using.resource(Git.open(Directory.getRepositoryDir(owner, repository))) { git =>
existIds = JGitUtil.getAllCommitIds(git)
}
} catch {
case ex: Exception => {
logger.error(ex.toString, ex)
throw ex
}
}
}
}
def onPostReceive(receivePack: ReceivePack, commands: java.util.Collection[ReceiveCommand]): Unit = {
val settings = loadSystemSettings()
Database() withTransaction { implicit session =>
try {
Using.resource(Git.open(Directory.getRepositoryDir(owner, repository))) { git =>
JGitUtil.removeCache(git)
val pushedIds = scala.collection.mutable.Set[String]()
commands.asScala.foreach { command =>
logger.debug(s"commandType: ${command.getType}, refName: ${command.getRefName}")
implicit val apiContext: Context = api.JsonFormat.Context(baseUrl, sshUrl)
val refName = command.getRefName.split("/")
val branchName = refName.drop(2).mkString("/")
val commits = if (refName(1) == "tags") {
Nil
} else {
command.getType match {
case ReceiveCommand.Type.DELETE => Nil
case _ => JGitUtil.getCommitLog(git, command.getOldId.name, command.getNewId.name)
}
}
val repositoryInfo = getRepository(owner, repository).get
// Update default branch if repository is empty and pushed branch is not current default branch
if (JGitUtil.isEmpty(git) && commits.nonEmpty && branchName != repositoryInfo.repository.defaultBranch) {
saveRepositoryDefaultBranch(owner, repository, branchName)
// Change repository HEAD
Using.resource(Git.open(Directory.getRepositoryDir(owner, repository))) { git =>
git.getRepository.updateRef(Constants.HEAD, true).link(Constants.R_HEADS + branchName)
}
}
// Retrieve all issue count in the repository
val issueCount =
countIssue(IssueSearchCondition(state = "open"), IssueSearchOption.Issues, owner -> repository) +
countIssue(IssueSearchCondition(state = "closed"), IssueSearchOption.Issues, owner -> repository)
// Extract new commit and apply issue comment
val defaultBranch = repositoryInfo.repository.defaultBranch
val newCommits = commits.flatMap { commit =>
if (!existIds.contains(commit.id) && !pushedIds.contains(commit.id)) {
if (issueCount > 0) {
pushedIds.add(commit.id)
createIssueComment(owner, repository, commit)
// close issues
if (refName(1) == "heads" && branchName == defaultBranch && command.getType == ReceiveCommand.Type.UPDATE) {
getAccountByUserName(pusher).foreach { pusherAccount =>
closeIssuesFromMessage(commit.fullMessage, pusher, owner, repository).foreach { issueId =>
getIssue(owner, repository, issueId.toString).foreach { issue =>
callIssuesWebHook("closed", repositoryInfo, issue, pusherAccount, settings)
val closeIssueInfo =
CloseIssueInfo(owner, repository, pusherAccount.userName, issue.issueId, issue.title)
recordActivity(closeIssueInfo)
PluginRegistry().getIssueHooks
.foreach(_.closedByCommitComment(issue, repositoryInfo, commit.fullMessage, pusherAccount))
}
}
}
}
}
Some(commit)
} else None
}
// set PR as merged
val pulls = getPullRequestsByBranch(owner, repository, branchName, Some(false))
pulls.foreach { pull =>
if (commits.exists { c =>
c.id == pull.commitIdTo
}) {
markMergeAndClosePullRequest(pusher, owner, repository, pull)
getAccountByUserName(pusher).foreach { pusherAccount =>
callPullRequestWebHook("closed", repositoryInfo, pull.issueId, pusherAccount, settings)
}
}
}
// record activity
if (refName(1) == "heads") {
command.getType match {
case ReceiveCommand.Type.CREATE =>
val createBranchInfo = CreateBranchInfo(owner, repository, pusher, branchName)
recordActivity(createBranchInfo)
case ReceiveCommand.Type.UPDATE =>
val pushInfo = PushInfo(owner, repository, pusher, branchName, newCommits)
recordActivity(pushInfo)
case ReceiveCommand.Type.DELETE =>
val deleteBranchInfo = DeleteBranchInfo(owner, repository, pusher, branchName)
recordActivity(deleteBranchInfo)
case _ =>
}
} else if (refName(1) == "tags") {
command.getType match {
case ReceiveCommand.Type.CREATE =>
val createTagInfo = CreateTagInfo(owner, repository, pusher, branchName)
recordActivity(createTagInfo)
case ReceiveCommand.Type.DELETE =>
val deleteTagInfo = DeleteTagInfo(owner, repository, pusher, branchName)
recordActivity(deleteTagInfo)
case _ =>
}
}
if (refName(1) == "heads") {
command.getType match {
case ReceiveCommand.Type.CREATE | ReceiveCommand.Type.UPDATE |
ReceiveCommand.Type.UPDATE_NONFASTFORWARD =>
getAccountByUserName(pusher).foreach { pusherAccount =>
updatePullRequests(owner, repository, branchName, pusherAccount, "synchronize", settings)
}
case _ =>
}
}
// call web hook
callWebHookOf(owner, repository, WebHook.Push, settings) {
for {
pusherAccount <- getAccountByUserName(pusher)
ownerAccount <- getAccountByUserName(owner)
} yield {
WebHookPushPayload(
git,
pusherAccount,
command.getRefName,
repositoryInfo,
newCommits,
ownerAccount,
newId = command.getNewId(),
oldId = command.getOldId()
)
}
}
if (command.getType == ReceiveCommand.Type.CREATE) {
callWebHookOf(owner, repository, WebHook.Create, settings) {
for {
pusherAccount <- getAccountByUserName(pusher)
ownerAccount <- getAccountByUserName(owner)
} yield {
val refType = if (refName(1) == "tags") "tag" else "branch"
WebHookCreatePayload(
pusherAccount,
repositoryInfo,
ownerAccount,
ref = branchName,
refType = refType
)
}
}
}
// call post-commit hook
PluginRegistry().getReceiveHooks
.foreach(_.postReceive(owner, repository, receivePack, command, pusher, false))
}
}
// update repository last modified time.
updateLastActivityDate(owner, repository)
} catch {
case ex: Exception => {
logger.error(ex.toString, ex)
throw ex
}
}
}
}
}
class WikiCommitHook(owner: String, repository: String, pusher: String, baseUrl: String, sshUrl: Option[String])
extends PostReceiveHook
with WebHookService
with AccountService
with RepositoryService
with ActivityService
with SystemSettingsService
with RequestCache {
private val logger = LoggerFactory.getLogger(classOf[WikiCommitHook])
override def onPostReceive(receivePack: ReceivePack, commands: util.Collection[ReceiveCommand]): Unit = {
val settings = loadSystemSettings()
Database() withTransaction { implicit session =>
try {
commands.asScala.headOption.foreach { command =>
implicit val apiContext: Context = api.JsonFormat.Context(baseUrl, sshUrl)
val refName = command.getRefName.split("/")
val commitIds = if (refName(1) == "tags") {
None
} else {
command.getType match {
case ReceiveCommand.Type.DELETE => None
case _ => Some((command.getOldId.getName, command.getNewId.name))
}
}
commitIds.foreach {
case (oldCommitId, newCommitId) =>
val commits = Using.resource(Git.open(Directory.getWikiRepositoryDir(owner, repository))) { git =>
JGitUtil.getCommitLog(git, oldCommitId, newCommitId).flatMap { commit =>
val diffs = JGitUtil.getDiffs(git, None, commit.id, false, false)
diffs.collect {
case diff if diff.newPath.toLowerCase.endsWith(".md") =>
val action = mapToAction(diff.changeType)
val fileName = diff.newPath
updateLastActivityDate(owner, repository)
buildWikiRecord(action, owner, repository, commit, fileName).foreach(recordActivity)
(action, fileName, commit.id)
}
}
}
val pages = commits
.groupBy { case (_, fileName, _) => fileName }
.map {
case (fileName, commits) =>
val (commitHeadAction, _, _) = commits.head
val (_, _, commitLastId) = commits.last
(commitHeadAction, fileName, commitLastId)
}
callWebHookOf(owner, repository, WebHook.Gollum, settings) {
for {
pusherAccount <- getAccountByUserName(pusher)
repositoryUser <- getAccountByUserName(owner)
repositoryInfo <- getRepository(owner, repository)
} yield {
WebHookGollumPayload(pages.toSeq, repositoryInfo, repositoryUser, pusherAccount)
}
}
}
}
} catch {
case ex: Exception => {
logger.error(ex.toString, ex)
throw ex
}
}
}
}
private[this] def mapToAction(changeType: ChangeType): String = changeType match {
case ChangeType.ADD | ChangeType.RENAME => "created"
case ChangeType.MODIFY => "edited"
case ChangeType.DELETE => "deleted"
case other =>
logger.error(s"Unsupported Wiki action: $other")
"unsupported action"
}
private[this] def buildWikiRecord(
action: String,
owner: String,
repo: String,
commit: CommitInfo,
fileName: String
): Option[BaseActivityInfo] = {
val pageName = fileName.dropRight(".md".length)
action match {
case "created" => Some(CreateWikiPageInfo(owner, repo, commit.committerName, pageName))
case "edited" => Some(EditWikiPageInfo(owner, repo, commit.committerName, pageName, commit.id))
case "deleted" => Some(DeleteWikiInfo(owner, repo, commit.committerName, pageName))
case other =>
logger.info(s"Attempted to build wiki record for unsupported action: $other")
None
}
}
}
object GitLfs {
case class BatchRequest(
operation: String,
transfers: Seq[String],
objects: Seq[BatchRequestObject]
)
case class BatchRequestObject(
oid: String,
size: Long
)
case class BatchUploadResponse(
transfer: String,
objects: Seq[BatchResponseObject]
)
case class BatchResponseObject(
oid: String,
size: Long,
authenticated: Boolean,
actions: Actions
)
case class Actions(
download: Option[Action] = None,
upload: Option[Action] = None
)
case class Action(
href: String,
header: Map[String, String] = Map.empty,
expires_at: Date
)
case class Error(
message: String
)
}
| takezoe/gitbucket | src/main/scala/gitbucket/core/servlet/GitRepositoryServlet.scala | Scala | apache-2.0 | 22,090 |
package com.sksamuel.scapegoat.io
import scala.xml.Node
import com.sksamuel.scapegoat.{Feedback, Warning}
object ScalastyleReportWriter extends ReportWriter {
private val checkstyleVersion = "5.0"
private val scapegoat = "scapegoat"
override protected val fileName = "scapegoat-scalastyle.xml"
private def toXML(feedback: Feedback): Node =
<checkstyle version={checkstyleVersion} generatedBy={scapegoat}>
{feedback.warningsWithMinimalLevel.groupBy(_.sourceFileFull).map(fileToXml)}
</checkstyle>
private def fileToXml(fileWarningMapEntry: (String, Seq[Warning])): Node = {
val (file, warnings) = fileWarningMapEntry
<file name={file}>
{warnings.map(warningToXml)}
</file>
}
private def warningToXml(warning: Warning) =
<error line={warning.line.toString} message={warning.text} severity={warning.level.toString} source={
warning.inspection
} snippet={warning.snippet.orNull} explanation={warning.explanation}></error>
override protected def generate(feedback: Feedback): String = toXML(feedback).toString()
}
| sksamuel/scalac-scapegoat-plugin | src/main/scala/com/sksamuel/scapegoat/io/ScalastyleReportWriter.scala | Scala | apache-2.0 | 1,080 |
/*
* Copyright 2015 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.ct600.v2
import uk.gov.hmrc.ct.box.{Calculated, CtBoxIdentifier, CtInteger}
import uk.gov.hmrc.ct.computations.HmrcAccountingPeriod
import uk.gov.hmrc.ct.computations.retriever.ComputationsBoxRetriever
import uk.gov.hmrc.ct.ct600.v2.calculations.CorporationTaxCalculator
case class B43(value: Int) extends CtBoxIdentifier("Financial Year") with CtInteger
object B43 extends CorporationTaxCalculator with Calculated[B43, ComputationsBoxRetriever] {
override def calculate(fieldValueRetriever: ComputationsBoxRetriever): B43 =
financialYear1(
HmrcAccountingPeriod(fieldValueRetriever.retrieveCP1(), fieldValueRetriever.retrieveCP2())
)
}
| keithhall/ct-calculations | src/main/scala/uk/gov/hmrc/ct/ct600/v2/B43.scala | Scala | apache-2.0 | 1,284 |
package part02
import scala.annotation.tailrec
object Optional {
def factorial(n: Int): Option[Int] = {
@tailrec
def fac(n: Int, acc: Int): Int = if (n == 0) acc else fac(n - 1, acc * n)
if (n < 0) None
else Some(fac(n, 1))
}
def findPersonByName(lastName: String, firstName: Option[String] = None) = ???
}
| leanovate/scala-schulung | src/main/scala/part02/Optional.scala | Scala | mit | 338 |
package outwatch
import org.scalajs.dom.Element
import outwatch.interpreter.SnabbdomOps
import colibri.{Source, Observable}
import scala.scalajs.js
sealed trait ChildCommand
object ChildCommand {
sealed trait ChildId
object ChildId {
case class Key(key: outwatch.Key.Value) extends ChildId
case class Element(elem: org.scalajs.dom.Element) extends ChildId
}
case class Append(node: VNode) extends ChildCommand
case class Prepend(node: VNode) extends ChildCommand
case class ReplaceAll(list: js.Array[VNode]) extends ChildCommand
case class Insert(index: Int, node: VNode) extends ChildCommand
case class Replace(index: Int, node: VNode) extends ChildCommand
case class Move(fromIndex: Int, toIndex: Int) extends ChildCommand
case class Remove(index: Int) extends ChildCommand
case class ReplaceId(id: ChildId, node: VNode) extends ChildCommand
case class InsertBeforeId(id: ChildId, node: VNode) extends ChildCommand
case class InsertBehindId(id: ChildId, node: VNode) extends ChildCommand
case class MoveId(fromId: ChildId, toIndex: Int) extends ChildCommand
case class MoveBeforeId(fromId: ChildId, toId: ChildId) extends ChildCommand
case class MoveBehindId(fromId: ChildId, toId: ChildId) extends ChildCommand
case class RemoveId(id: ChildId) extends ChildCommand
def stream[F[_] : Source](valueStream: F[Seq[ChildCommand]]): VDomModifier = VDomModifier.delay {
val children = new js.Array[VNodeProxyNode]
Observable.map(valueStream) { cmds =>
val idToIndex: ChildId => Int = {
case ChildId.Key(key) => children.indexWhere { tree =>
tree.proxy.key.fold(false)((k: Key.Value) => k == key)
}
case ChildId.Element(element) => children.indexWhere { tree =>
tree.proxy.elm.fold(false)((e: Element) => e == element)
}
}
def isSaneIndex(index: Int): Boolean = index >= 0 && index < children.length
def replaceByIndex(index: Int, node: VNode): Unit = {
children(index) = VNodeProxyNode(SnabbdomOps.toSnabbdom(node))
}
def moveByIndex(fromIndex: Int, toIndex: Int): Unit = {
if (isSaneIndex(fromIndex) && isSaneIndex(toIndex) && fromIndex != toIndex) {
val tree = children.remove(fromIndex)
children.insert(toIndex, tree)
}
}
def insertByIndex(index: Int, node: VNode): Unit = {
if (isSaneIndex(index)) {
children.insert(index, VNodeProxyNode(SnabbdomOps.toSnabbdom(node)))
}
}
def removeByIndex(index: Int): Unit = {
if (isSaneIndex(index)) {
children.remove(index)
()
}
}
cmds foreach {
case Append(node) =>
children.push(VNodeProxyNode(SnabbdomOps.toSnabbdom(node)))
()
case Prepend(node) =>
children.prepend(VNodeProxyNode(SnabbdomOps.toSnabbdom(node)))
case ReplaceAll(list) =>
children.clear()
list.foreach { node =>
children.push(VNodeProxyNode(SnabbdomOps.toSnabbdom(node)))
}
case Insert(index, node) =>
insertByIndex(index, node)
case InsertBeforeId(id, node) =>
insertByIndex(idToIndex(id), node)
case InsertBehindId(id, node) =>
val index = idToIndex(id)
insertByIndex(if (index == -1) -1 else index + 1, node)
case Replace(index, node) =>
replaceByIndex(index, node)
case ReplaceId(id, node) =>
replaceByIndex(idToIndex(id), node)
case Move(fromIndex, toIndex) =>
moveByIndex(fromIndex, toIndex)
case MoveId(fromId, toIndex) =>
moveByIndex(idToIndex(fromId), toIndex)
case MoveBeforeId(fromId, toId) =>
moveByIndex(idToIndex(fromId), idToIndex(toId))
case MoveBehindId(fromId, toId) =>
val toIdx = idToIndex(toId)
moveByIndex(idToIndex(fromId), if (toIdx == -1) -1 else toIdx + 1)
case Remove(index) =>
removeByIndex(index)
case RemoveId(id) =>
removeByIndex(idToIndex(id))
}
CompositeModifier(children)
}
}
}
| OutWatch/outwatch | outwatch/src/main/scala/outwatch/ChildCommand.scala | Scala | apache-2.0 | 4,146 |
/* __ *\\
** ________ ___ / / ___ Scala API **
** / __/ __// _ | / / / _ | (c) 2007-2013, LAMP/EPFL **
** __\\ \\/ /__/ __ |/ /__/ __ | http://scala-lang.org/ **
** /____/\\___/_/ |_/____/_/ | | **
** |/ **
\\* */
package scala.swing
import java.awt.{GridBagConstraints, GridBagLayout}
object GridBagPanel {
object Fill extends Enumeration {
val None = Value(GridBagConstraints.NONE)
val Horizontal = Value(GridBagConstraints.HORIZONTAL)
val Vertical = Value(GridBagConstraints.VERTICAL)
val Both = Value(GridBagConstraints.BOTH)
}
object Anchor extends Enumeration {
val North = Value(GridBagConstraints.NORTH)
val NorthEast = Value(GridBagConstraints.NORTHEAST)
val East = Value(GridBagConstraints.EAST)
val SouthEast = Value(GridBagConstraints.SOUTHEAST)
val South = Value(GridBagConstraints.SOUTH)
val SouthWest = Value(GridBagConstraints.SOUTHWEST)
val West = Value(GridBagConstraints.WEST)
val NorthWest = Value(GridBagConstraints.NORTHWEST)
val Center = Value(GridBagConstraints.CENTER)
val PageStart = Value(GridBagConstraints.PAGE_START)
val PageEnd = Value(GridBagConstraints.PAGE_END)
val LineStart = Value(GridBagConstraints.LINE_START)
val LineEnd = Value(GridBagConstraints.LINE_END)
val FirstLineStart = Value(GridBagConstraints.FIRST_LINE_START)
val FirstLineEnd = Value(GridBagConstraints.FIRST_LINE_END)
val LastLineStart = Value(GridBagConstraints.LAST_LINE_START)
val LastLineEnd = Value(GridBagConstraints.LAST_LINE_END)
}
}
/**
* A panel that arranges its children in a grid. Layout details can be
* given for each cell of the grid.
*
* @see java.awt.GridBagLayout
*/
class GridBagPanel extends Panel with LayoutContainer {
override lazy val peer = new javax.swing.JPanel(new GridBagLayout) with SuperMixin
import GridBagPanel._
private def layoutManager = peer.getLayout.asInstanceOf[GridBagLayout]
/**
* Convenient conversion from xy-coords given as pairs to
* grid bag constraints.
*/
implicit def pair2Constraints(p: (Int, Int)): Constraints = {
val c = new Constraints
c.gridx = p._1
c.gridy = p._2
c
}
class Constraints(val peer: GridBagConstraints) extends Proxy {
def self = peer
def this(gridx: Int, gridy: Int,
gridwidth: Int, gridheight: Int,
weightx: Double, weighty: Double,
anchor: Int, fill: Int, insets: Insets,
ipadx: Int, ipady: Int) =
this(new GridBagConstraints(gridx, gridy,
gridwidth, gridheight,
weightx, weighty,
anchor, fill, insets,
ipadx, ipady))
def this() = this(new GridBagConstraints())
def gridx: Int = peer.gridx
def gridx_=(x: Int) { peer.gridx = x }
def gridy: Int = peer.gridy
def gridy_=(y: Int) { peer.gridy = y }
def grid: (Int, Int) = (gridx, gridy)
def grid_=(c: (Int, Int)) = {
gridx = c._1
gridy = c._2
}
def gridwidth: Int = peer.gridwidth
def gridwidth_=(w: Int) { peer.gridwidth = w }
def gridheight: Int = peer.gridheight
def gridheight_=(h: Int) { peer.gridheight = h }
def weightx: Double = peer.weightx
def weightx_=(x: Double) { peer.weightx = x }
def weighty: Double = peer.weighty
def weighty_=(y: Double) { peer.weighty = y }
def anchor: Anchor.Value = Anchor(peer.anchor)
def anchor_=(a: Anchor.Value) { peer.anchor = a.id }
def fill: Fill.Value = Fill(peer.fill)
def fill_=(f: Fill.Value) { peer.fill = f.id }
def insets: Insets = peer.insets
def insets_=(i: Insets) { peer.insets = i }
def ipadx: Int = peer.ipadx
def ipadx_=(x: Int) { peer.ipadx = x }
def ipady: Int = peer.ipady
def ipady_=(y: Int) { peer.ipady = y }
}
protected def constraintsFor(comp: Component) =
new Constraints(layoutManager.getConstraints(comp.peer))
protected def areValid(c: Constraints): (Boolean, String) = (true, "")
protected def add(c: Component, l: Constraints) { peer.add(c.peer, l.peer) }
}
| SethTisue/scala-swing | src/main/scala/scala/swing/GridBagPanel.scala | Scala | bsd-3-clause | 4,444 |
class X
class Y
object Test {
type Id[T] = T
val a: 1 = identity(1)
val b: Id[1] = identity(1)
val c: X | Y = identity(if (true) new X else new Y)
val d: Id[X | Y] = identity(if (true) new X else new Y)
def impUnion: Unit = {
class Base
class A extends Base
class B extends Base
class Inv[T]
implicit def invBase: Inv[Base] = new Inv[Base]
def getInv[T](x: T)(implicit inv: Inv[T]): Int = 1
val a: Int = getInv(if (true) new A else new B)
// If we keep unions when doing the implicit search, this would give us: "no implicit argument of type Inv[X | Y]"
val b: Int | Any = getInv(if (true) new A else new B)
}
}
| lampepfl/dotty | tests/pos/i7829.scala | Scala | apache-2.0 | 669 |
package org.littlewings.lucene.multifield
import scala.collection.JavaConverters._
import org.apache.lucene.analysis.{Analyzer, AnalyzerWrapper}
import org.apache.lucene.analysis.core.KeywordAnalyzer
import org.apache.lucene.analysis.ja.JapaneseAnalyzer
import org.apache.lucene.document.{Document, Field, StringField, TextField}
import org.apache.lucene.index.{DirectoryReader, IndexWriter, IndexWriterConfig}
import org.apache.lucene.queryparser.classic.QueryParser
import org.apache.lucene.search.{IndexSearcher, Query, Sort, SortField, TopFieldCollector}
import org.apache.lucene.store.{Directory, RAMDirectory}
import org.apache.lucene.util.Version
object LuceneMultiField {
def main(args: Array[String]): Unit = {
val version = Version.LUCENE_CURRENT
val analyzer = createAnalyzer(version)
val queryAnalyzer = createQueryAnalyzer(version)
for (directory <- new RAMDirectory) {
registerDocuments(directory, version, analyzer)
executeQuery(createQuery("*:*", version, queryAnalyzer),
Sort.RELEVANCE,
directory)
executeQuery(createQuery("title:Lucene title:オープンソース", version, queryAnalyzer),
new Sort(new SortField("title", SortField.Type.STRING, false)),
directory)
executeQuery(createQuery("tags:Lucene", version, queryAnalyzer),
Sort.RELEVANCE,
directory)
executeQuery(createQuery("tags:Lucene", version, queryAnalyzer),
new Sort(new SortField("tags", SortField.Type.STRING, true)),
directory)
executeQuery(createQuery("tags:Lucene +tags:Elasticsearch", version, queryAnalyzer),
Sort.RELEVANCE,
directory)
executeQuery(createQuery("authors:株式会社 authors:関口", version, queryAnalyzer),
Sort.RELEVANCE,
directory)
executeQuery(createQuery("authors:株式会社 authors:ロンウイット", version, queryAnalyzer),
new Sort(new SortField("authors", SortField.Type.STRING, true)),
directory)
}
}
private def createAnalyzer(version: Version): Analyzer =
new JapaneseAnalyzer(version)
private def createQueryAnalyzer(version: Version): Analyzer =
new AnalyzerWrapper(Analyzer.PER_FIELD_REUSE_STRATEGY) {
override def getWrappedAnalyzer(fieldName: String): Analyzer =
fieldName match {
case "isbn" => new KeywordAnalyzer
case "title" => createAnalyzer(version)
case "tags" => new KeywordAnalyzer
case "authors" => createAnalyzer(version)
}
}
private def registerDocuments(directory: Directory, version: Version, analyzer: Analyzer): Unit =
for (writer <- new IndexWriter(directory,
new IndexWriterConfig(version, analyzer))) {
Array(
createDocument(Map("isbn" -> "978-4774127804",
"title" -> "Apache Lucene 入門 ~Java・オープンソース・全文検索システムの構築",
"tags" -> Seq("Java", "Lucene", "全文検索", "オープンソース"),
"authors" -> Seq("関口 宏司"))),
createDocument(Map("isbn" -> "978-4774161631",
"title" -> "[改訂新版] Apache Solr入門 オープンソース全文検索エンジン",
"tags" -> Seq("Java", "Lucene", "Solr", "全文検索", "オープンソース"),
"authors" -> Seq("大谷 純", "阿部 慎一朗", "大須賀 稔", "北野 太郎", "鈴木 教嗣", "平賀 一昭", "株式会社リクルートテクノロジーズ", "株式会社ロンウイット"))),
createDocument(Map("isbn" -> "978-4048662024",
"title" -> "高速スケーラブル検索エンジン ElasticSearch Server",
"tags" -> Seq("Java", "Elasticsearch", "全文検索", "オープンソース"),
"authors" -> Seq("Rafal Kuc", "Marek Rogozinski", "株式会社リクルートテクノロジーズ", "大岩 達也", "大谷 純", "兼山 元太", "水戸 祐介", "守谷 純之介")))
).foreach(writer.addDocument)
writer.commit()
}
private def createDocument(entry: Map[String, Any]): Document = {
val document = new Document
document.add(new StringField("isbn", entry("isbn").toString, Field.Store.YES))
document.add(new TextField("title", entry("title").toString, Field.Store.YES))
for {
Seq(tags @ _*) <- entry.get("tags")
tag <- tags
} {
document.add(new StringField("tags", tag.toString, Field.Store.YES))
}
for {
Seq(authors @ _*) <- entry.get("authors")
author <- authors
} {
document.add(new TextField("authors", author.toString, Field.Store.YES))
}
document
}
private def createQuery(queryString: String, version: Version, analyzer: Analyzer): Query =
new QueryParser(version, "title", analyzer).parse(queryString)
private def executeQuery(query: Query, sort: Sort, directory: Directory): Unit =
for (reader <- DirectoryReader.open(directory)) {
println(s"========== Start ExecuteQuery[$query] ==========")
val searcher = new IndexSearcher(reader)
val limit = 1000
val collector =
TopFieldCollector
.create(sort,
limit,
true,
false,
false,
false)
searcher.search(query, collector)
val topDocs = collector.topDocs
val hits = topDocs.scoreDocs
hits.foreach { h =>
val hitDoc = searcher.doc(h.doc)
println(s"Doc, id[${h.doc}]:" + System.lineSeparator +
hitDoc
.getFields
.asScala
.map(f => s"${f.name}:${f.stringValue}")
.mkString(" ", System.lineSeparator + " ", ""))
}
println(s"========== End ExecuteQuery[$query] ==========")
println()
}
implicit class CloseableWrapper[A <: AutoCloseable](val underlying: A) extends AnyVal {
def foreach(fun: A => Unit): Unit =
try {
fun(underlying)
} finally {
underlying.close()
}
}
}
| kazuhira-r/lucene-examples | lucene-multi-field/src/main/scala/org/littlewings/lucene/multifield/LuceneMultiField.scala | Scala | mit | 6,386 |
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** @author John Miller
* @version 1.2
* @date Mon Sep 21 15:05:06 EDT 2009
* @see LICENSE (MIT style license file).
*/
package scalation.scala2d
import scala.math.{abs, pow, sqrt}
import scalation.scala2d.Colors._
import scalation.scala2d.QCurve.{calcControlPoint, distance}
import scalation.scala2d.Shapes.{Dimension, Graphics, Graphics2D}
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `QCurve` class enhances the `QuadCurve.Double` class (from the `java.awt.geom`
* package) by allowing entities to move along such quadratic curves as well as
* lines. Although the curve could be developed as a quadratic function where
* 'y = ax^2 + bx + c'. The following quadratic bezier formulation is used:
* <br>
* p(t) = (x(t), y(t)) = [(1-t)^2 * p1] + [2 * (1-t) * t * pc] + [t^2 * p2].
* <br>
* @param p1 the starting point for the quad curve
* @param pc the control point for the quad curve
* @param p2 the ending point for the quad curve
* @param straight whether the quad curve is straight (i.e., a line)
*/
case class QCurve (var p1: R2 = R2 (0.0, 0.0),
var pc: R2 = R2 (0.0, 0.0),
var p2: R2 = R2 (0.0, 0.0),
var straight: Boolean = true)
extends java.awt.geom.QuadCurve2D.Double (p1.x, p1.y, pc.x, pc.y, p2.x, p2.y)
with CurvilinearShape
{
/** Length of the `QCurve`
*/
lazy private val _length = (distance (p1, p2) + distance (p1, pc) + distance (p2, pc)) / 2.0
/** Trajectory parameter t ranges from 0. to 1. (indicates how far along the curve)
*/
var _traj = 0.0
/** Number of discrete steps to take along trajectory
*/
private var steps = 200
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Construct a straight line (degenerate quad curve).
* @param p1 the starting point
* @param p2 the ending point
*/
def this (p1: R2, p2: R2)
{
this (p1, calcControlPoint (p1, p2), p2, true)
} // constructor
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Construct a quad curve where bend indicates the distance to the control
* point.
* @param p1 the starting point
* @param p2 the ending point
* @param bend the bend or curvature (1. => line length)
*/
def this (p1: R2, p2: R2, bend: Double)
{
this (p1, calcControlPoint (p1, p2, bend), p2, false)
} // constructor
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Construct a quad curve using an explicitly given control point.
* @param p1 the starting point
* @param pc the control point
* @param p2 the ending point
*/
def this (p1: R2, pc: R2, p2: R2)
{
this (p1, pc, p2, false)
} // constructor
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Get the current trajectory '_traj' of the curve.
*/
def traj: Double = _traj
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Set the trajectory '_traj' to a new value.
* @param traj the new trajectory for the curve
*/
def traj_= (traj: Double) { _traj = traj }
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Get the x-coordinate of the center of the line/curve.
*/
def getCenterX (): Double =
{
if (straight) (p1.x + p2.x) / 2.0
else (p1.x + 2.0 * pc.x + p2.x) / 4.0
} // getCenterX
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Get the y-coordinate of the center of the line/curve.
*/
def getCenterY (): Double =
{
if (straight) (p1.y + p2.y) / 2.0
else (p1.y + 2.0 * pc.y + p2.y) / 4.0
} // getCenterY
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Set (or reset) the location for the `QCurve` as a line.
* @param _p1 the starting point
* @param _p2 the ending point
*/
def setLine (_p1: R2, _p2: R2)
{
p1 = _p1; p2 = _p2
pc = calcControlPoint (p1, p2) // middle, on line => line
super.setCurve (p1, pc, p2)
} // setLine
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Set (or reset) the location for the `QCurve` as a curve using bend to
* calculate the control point.
* @param _p1 the starting point
* @param _p2 the ending point
* @param bend the bend or curvature (1. => line-length)
*/
def setLine (_p1: R2, _p2: R2, bend: Double)
{
p1 = _p1; p2 = _p2
pc = calcControlPoint (p1, p2, bend) // off line => curve
straight = false
super.setCurve (p1, pc, p2)
} // setLine
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Set (or reset) the location for the `QCurve` as a curve using an explicitly
* given control point.
* @param _p1 the starting point
* @param _pc the control point
* @param _p2 the ending point
*/
override def setLine (_p1: R2, _pc: R2, _p2: R2)
{
p1 = _p1; pc = _pc; p2 = _p2
straight = false
super.setCurve (p1, pc, p2)
} // setLine
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Get the first/start point of the quad curve.
*/
def getFirst: R2 = p1
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Get the first/start point of the quad curve, adjusted from top-left to
* center coordinates.
* @param width the width of object traversing the curve
* @param height the height of object traversing the curve
*/
def getFirst (width: Double, height: Double): R2 =
{
R2 (p1.x + width / 2.0, p1.y + height / 2.0)
} // getFirst
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Get the control point of the quad curve.
*/
def getControl: R2 = pc
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Get the last/end-point of the quad curve.
*/
def getLast: R2 = p2
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Get the last/end-point of the quad curve, adjusted from top-left to
* center coordinates.
* @param width the width of object traversing the curve
* @param height the height of object traversing the curve
*/
def getLast (width: Double, height: Double): R2 =
{
R2 (p2.x + width / 2.0, p2.y + height / 2.0)
} // getLast
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Whether ('x', 'y') and ('xe', 'ye') are essentially the same.
*/
def isSame (x: Double, y: Double, xe: Double, ye: Double, step: Double): Boolean =
{
(xe - x) * (xe - x) + (ye - y) * (ye -y) < step * step
} // isSame
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Given a value for the trajectory parameter 't' (in [0., 1.]) calculate
* the point on the curve using the Quadratic Bezier equation.
* @see en.wikipedia.org/wiki/Bézier_curve#Quadratic_curves
*/
def eval (): R2 =
{
R2 (pow (1.0-_traj, 2) * p1.x + 2.0 * (1.0-_traj) * _traj * pc.x + pow (_traj, 2) * p2.x,
pow (1.0-_traj, 2) * p1.y + 2.0 * (1.0-_traj) * _traj * pc.y + pow (_traj, 2) * p2.y)
} // eval
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Return the next point on the quad curve (one step beyond current point).
* Return null if 't > 1.0' (i.e., past end-point).
*/
def next (): R2 =
{
var q: R2 = null // the next point along the curve
if (_traj > 1.0) {
_traj = 0.0 // reset trajectory
} else {
q = eval () // calculate the new point
} // if
_traj += 1.0 / steps.toDouble // increment trajectory parameter
// println ("QCurve.next: q = " + q)
q
} // next
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Return the next point on the quad curve (one step beyond current point)
* and adjust from top-left to center coordinates for the object traversing
* the curve based on its width and height.
* Return null if 't > 1.0' (i.e., past end-point).
* @param width the width of object traversing the curve
* @param height the height of object traversing the curve
*/
override def next (width: Double, height: Double): R2 =
{
val q = next ()
if (q != null) R2 (q.x - width / 2.0, q.y - height / 2.0) else null
} // next
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Set the number of steps for tokens to take as move along the quad curve.
* @param steps the number of steps to take along the quad curve
*/
def setSteps (_steps: Int)
{
steps = _steps
} // setSteps
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Return the length of this `QCurve`.
*/
def length: Double = _length
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Show the start, control and end-points of the the `QCurve`.
*/
override def toString: String =
{
"QCurve ( " + p1 + " , " + pc + " , " + p2 + " )"
} // toString
} // QCurve class
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `QCurve` companion object provides formulas used by the `QCurve` class.
*/
object QCurve
{
/** Tolerance for comparing real numbers
*/
private val EPSILON = 1E-7
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Calculate the slope of the line defined by points `p1` and `p2`.
* Note: if 'deltaX' is 0, the method returns infinity.
* @param p1 the starting point
* @param p2 the ending point
*/
def slope (p1: R2, p2: R2): Double =
{
(p2.y - p1.y) / (p2.x - p1.x)
} // slope
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Calculate the distance or the length of the line connecting points `p1`
* and `p2`.
* @param p1 the starting point
* @param p2 the ending point
*/
def distance (p1: R2, p2: R2): Double =
{
sqrt (pow (p2.x - p1.x, 2) + pow (p2.y - p1.y, 2))
} // slope
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Calculate the location ('x', 'y') of the control point. It is positioned
* orthogonal to the mid point of the line connecting 'p1' and 'p2' at a
* distance 'dist', where 'dist = bend * || p2 - p1 ||'. A bend of 0.0 gives
* a straight line, while 2.0/-2.0 gives a huge bend up-right/down-left.
* @param p1 the starting point
* @param p2 the ending point
* @param bend the bend or curvature
*/
def calcControlPoint (p1: R2, p2: R2, bend: Double = 0.0): R2 =
{
val mid = R2 ((p1.x + p2.x) / 2.0, (p1.y + p2.y) / 2.0)
if (abs (bend) < EPSILON) {
mid
} else {
val m = slope (p1, p2)
val dist = bend * distance (p1, p2)
if (m.isInfinity) {
R2 (mid.x + dist, mid.y)
} else {
R2 (mid.x + dist * m / sqrt (1.0 + pow (m, 2)), mid.y - dist / sqrt (1.0 + pow (m, 2)))
} // if
} // if
} // calcControlPoint
} // QCurve object
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `QCurveTest` object tests the `QCurve` classes' quad curves.
*/
object QCurveTest extends App
{
private val line1 = new QCurve (R2 (200, 200), R2 (400, 200))
private val line2 = new QCurve (R2 (200, 200), R2 (200, 400))
private val line3 = new QCurve (R2 (200, 200), R2 (400, 400))
private val curve1 = new QCurve (R2 (200, 200), R2 (400, 200), 1.0)
private val curve2 = new QCurve (R2 (200, 200), R2 (200, 400), 1.0)
private val curve3 = new QCurve (R2 (200, 200), R2 (400, 400), 1.0)
private val curve4 = new QCurve (R2 (200, 200), R2 (400, 200), -2.0)
private val curve5 = new QCurve (R2 (200, 200), R2 (200, 400), -2.0)
private val curve6 = new QCurve (R2 (200, 200), R2 (400, 400), -2.0)
class Canvas extends Panel
{
setBackground (white)
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Paint the components into the canvas (drawing panel).
* @param gr low-resolution graphics environment
*/
override def paintComponent (gr: Graphics)
{
super.paintComponent (gr)
val g2d = gr.asInstanceOf [Graphics2D] // use hi-resolution
g2d.setPaint (red)
g2d.draw (line1)
g2d.draw (curve1)
g2d.draw (curve4)
g2d.setPaint (blue)
g2d.draw (line2)
g2d.draw (curve2)
g2d.draw (curve5)
g2d.setPaint (purple)
g2d.draw (line3)
g2d.draw (curve3)
g2d.draw (curve6)
} // paintComponent
} // Canvas class
// Put the drawing canvas in the visualization frame
new VizFrame ("QCurveTest", new Canvas (), 600, 600)
} // QCurveTest object
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `QCurveTest2` object tests traversal of `QCurve`'s (quad curves).
*/
object QCurveTest2 extends App
{
class QCurveAnimator extends VizFrame ("QCurveTest2", null, 600, 600) with Runnable
{
val curve = Array (new QCurve (R2 (100, 200), R2 (500, 200)),
new QCurve (R2 (100, 200), R2 (500, 200), .5),
new QCurve (R2 (100, 200), R2 (500, 200), -.5))
val ball = Ellipse ()
def run ()
{
val size = 10.0
var loc: R2 = null
for (i <- 0 until curve.length) {
println ("Move ball along RGB curve " + i)
loc = curve(i).next (size, size)
while (loc != null) {
Thread.sleep (50)
ball.setFrame (loc.x, loc.y, size, size)
repaint ()
loc = curve(i).next (size, size)
} // while
} // for
} // run
class Canvas extends Panel
{
setBackground (white)
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Paint the components into the canvas (drawing panel).
* @param gr low-resolution graphics environment
*/
override def paintComponent (gr: Graphics)
{
super.paintComponent (gr)
val g2d = gr.asInstanceOf [Graphics2D] // use hi-resolution
g2d.setPaint (red) // R in RGB order
g2d.draw (curve(0))
g2d.setPaint (green) // G in RGB order
g2d.draw (curve(1))
g2d.setPaint (blue) // B in RGB order
g2d.draw (curve(2))
g2d.setPaint (purple)
g2d.fill (ball)
} // paintComponent
} // Canvas class
getContentPane ().add (new Canvas ())
setVisible (true)
} // QCurveAnimator class
println ("Run QCurveTest2")
new Thread (new QCurveAnimator ()).start ()
} // QCurveTest2 object
| NBKlepp/fda | scalation_1.2/src/main/scala/scalation/scala2d/QCurve.scala | Scala | mit | 16,256 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql
import scala.collection.mutable.HashSet
import scala.concurrent.duration._
import org.apache.spark.{CleanerListener, SparkConf}
import org.apache.spark.executor.DataReadMethod._
import org.apache.spark.executor.DataReadMethod.DataReadMethod
import org.apache.spark.scheduler.{SparkListener, SparkListenerJobStart}
import org.apache.spark.sql.catalyst.TableIdentifier
import org.apache.spark.sql.catalyst.expressions.SubqueryExpression
import org.apache.spark.sql.catalyst.plans.logical.{BROADCAST, Join, JoinStrategyHint, SHUFFLE_HASH}
import org.apache.spark.sql.catalyst.util.DateTimeConstants
import org.apache.spark.sql.execution.{ExecSubqueryExpression, RDDScanExec, SparkPlan}
import org.apache.spark.sql.execution.adaptive.AdaptiveSparkPlanHelper
import org.apache.spark.sql.execution.columnar._
import org.apache.spark.sql.execution.exchange.ShuffleExchangeExec
import org.apache.spark.sql.functions._
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.test.{SQLTestUtils, SharedSparkSession}
import org.apache.spark.sql.types.{StringType, StructField, StructType}
import org.apache.spark.storage.{RDDBlockId, StorageLevel}
import org.apache.spark.storage.StorageLevel.{MEMORY_AND_DISK_2, MEMORY_ONLY}
import org.apache.spark.unsafe.types.CalendarInterval
import org.apache.spark.util.{AccumulatorContext, Utils}
private case class BigData(s: String)
class CachedTableSuite extends QueryTest with SQLTestUtils
with SharedSparkSession
with AdaptiveSparkPlanHelper {
import testImplicits._
override def sparkConf: SparkConf =
super.sparkConf
.setAppName("test")
.set("spark.sql.parquet.columnarReaderBatchSize", "4096")
.set("spark.sql.sources.useV1SourceList", "avro")
.set("spark.sql.extensions", "com.intel.oap.ColumnarPlugin")
.set("spark.sql.execution.arrow.maxRecordsPerBatch", "4096")
//.set("spark.shuffle.manager", "org.apache.spark.shuffle.sort.ColumnarShuffleManager")
.set("spark.memory.offHeap.enabled", "true")
.set("spark.memory.offHeap.size", "50m")
.set("spark.sql.join.preferSortMergeJoin", "false")
.set("spark.sql.columnar.codegen.hashAggregate", "false")
.set("spark.oap.sql.columnar.wholestagecodegen", "false")
.set("spark.sql.columnar.window", "false")
.set("spark.unsafe.exceptionOnMemoryLeak", "false")
//.set("spark.sql.columnar.tmp_dir", "/codegen/nativesql/")
.set("spark.sql.columnar.sort.broadcastJoin", "true")
.set("spark.oap.sql.columnar.preferColumnar", "true")
setupTestData()
override def afterEach(): Unit = {
try {
spark.catalog.clearCache()
} finally {
super.afterEach()
}
}
def rddIdOf(tableName: String): Int = {
val plan = spark.table(tableName).queryExecution.sparkPlan
plan.collect {
case InMemoryTableScanExec(_, _, relation) =>
relation.cacheBuilder.cachedColumnBuffers.id
case _ =>
fail(s"Table $tableName is not cached\\n" + plan)
}.head
}
def isMaterialized(rddId: Int): Boolean = {
val maybeBlock = sparkContext.env.blockManager.get(RDDBlockId(rddId, 0))
maybeBlock.foreach(_ => sparkContext.env.blockManager.releaseLock(RDDBlockId(rddId, 0)))
maybeBlock.nonEmpty
}
def isExpectStorageLevel(rddId: Int, level: DataReadMethod): Boolean = {
val maybeBlock = sparkContext.env.blockManager.get(RDDBlockId(rddId, 0))
val isExpectLevel = maybeBlock.forall(_.readMethod === level)
maybeBlock.foreach(_ => sparkContext.env.blockManager.releaseLock(RDDBlockId(rddId, 0)))
maybeBlock.nonEmpty && isExpectLevel
}
private def getNumInMemoryRelations(ds: Dataset[_]): Int = {
val plan = ds.queryExecution.withCachedData
var sum = plan.collect { case _: InMemoryRelation => 1 }.sum
plan.transformAllExpressions {
case e: SubqueryExpression =>
sum += getNumInMemoryRelations(e.plan)
e
}
sum
}
private def getNumInMemoryTablesInSubquery(plan: SparkPlan): Int = {
plan.expressions.flatMap(_.collect {
case sub: ExecSubqueryExpression => getNumInMemoryTablesRecursively(sub.plan)
}).sum
}
private def getNumInMemoryTablesRecursively(plan: SparkPlan): Int = {
collect(plan) {
case inMemoryTable @ InMemoryTableScanExec(_, _, relation) =>
getNumInMemoryTablesRecursively(relation.cachedPlan) +
getNumInMemoryTablesInSubquery(inMemoryTable) + 1
case p =>
getNumInMemoryTablesInSubquery(p)
}.sum
}
test("cache temp table") {
withTempView("tempTable") {
testData.select("key").createOrReplaceTempView("tempTable")
assertCached(sql("SELECT COUNT(*) FROM tempTable"), 0)
spark.catalog.cacheTable("tempTable")
assertCached(sql("SELECT COUNT(*) FROM tempTable"))
uncacheTable("tempTable")
}
}
test("unpersist an uncached table will not raise exception") {
assert(None == spark.sharedState.cacheManager.lookupCachedData(testData))
testData.unpersist(blocking = true)
assert(None == spark.sharedState.cacheManager.lookupCachedData(testData))
testData.unpersist(blocking = false)
assert(None == spark.sharedState.cacheManager.lookupCachedData(testData))
testData.persist()
assert(None != spark.sharedState.cacheManager.lookupCachedData(testData))
testData.unpersist(blocking = true)
assert(None == spark.sharedState.cacheManager.lookupCachedData(testData))
testData.unpersist(blocking = false)
assert(None == spark.sharedState.cacheManager.lookupCachedData(testData))
}
test("cache table as select") {
withTempView("tempTable") {
sql("CACHE TABLE tempTable AS SELECT key FROM testData")
assertCached(sql("SELECT COUNT(*) FROM tempTable"))
uncacheTable("tempTable")
}
}
test("uncaching temp table") {
withTempView("tempTable1", "tempTable2") {
testData.select("key").createOrReplaceTempView("tempTable1")
testData.select("key").createOrReplaceTempView("tempTable2")
spark.catalog.cacheTable("tempTable1")
assertCached(sql("SELECT COUNT(*) FROM tempTable1"))
assertCached(sql("SELECT COUNT(*) FROM tempTable2"))
// Is this valid?
uncacheTable("tempTable2")
// Should this be cached?
assertCached(sql("SELECT COUNT(*) FROM tempTable1"), 0)
}
}
test("too big for memory") {
withTempView("bigData") {
val data = "*" * 1000
sparkContext.parallelize(1 to 200000, 1).map(_ => BigData(data)).toDF()
.createOrReplaceTempView("bigData")
spark.table("bigData").persist(StorageLevel.MEMORY_AND_DISK)
assert(spark.table("bigData").count() === 200000L)
spark.table("bigData").unpersist(blocking = true)
}
}
test("calling .cache() should use in-memory columnar caching") {
spark.table("testData").cache()
assertCached(spark.table("testData"))
spark.table("testData").unpersist(blocking = true)
}
test("calling .unpersist() should drop in-memory columnar cache") {
spark.table("testData").cache()
spark.table("testData").count()
spark.table("testData").unpersist(blocking = true)
assertCached(spark.table("testData"), 0)
}
test("isCached") {
spark.catalog.cacheTable("testData")
assertCached(spark.table("testData"))
assert(spark.table("testData").queryExecution.withCachedData match {
case _: InMemoryRelation => true
case _ => false
})
uncacheTable("testData")
assert(!spark.catalog.isCached("testData"))
assert(spark.table("testData").queryExecution.withCachedData match {
case _: InMemoryRelation => false
case _ => true
})
}
test("SPARK-1669: cacheTable should be idempotent") {
assert(!spark.table("testData").logicalPlan.isInstanceOf[InMemoryRelation])
spark.catalog.cacheTable("testData")
assertCached(spark.table("testData"))
assertResult(1, "InMemoryRelation not found, testData should have been cached") {
getNumInMemoryRelations(spark.table("testData"))
}
spark.catalog.cacheTable("testData")
assertResult(0, "Double InMemoryRelations found, cacheTable() is not idempotent") {
spark.table("testData").queryExecution.withCachedData.collect {
case r: InMemoryRelation if r.cachedPlan.isInstanceOf[InMemoryTableScanExec] => r
}.size
}
uncacheTable("testData")
}
test("read from cached table and uncache") {
spark.catalog.cacheTable("testData")
checkAnswer(spark.table("testData"), testData.collect().toSeq)
assertCached(spark.table("testData"))
uncacheTable("testData")
checkAnswer(spark.table("testData"), testData.collect().toSeq)
assertCached(spark.table("testData"), 0)
}
test("SELECT star from cached table") {
withTempView("selectStar") {
sql("SELECT * FROM testData").createOrReplaceTempView("selectStar")
spark.catalog.cacheTable("selectStar")
checkAnswer(
sql("SELECT * FROM selectStar WHERE key = 1"),
Seq(Row(1, "1")))
uncacheTable("selectStar")
}
}
test("Self-join cached") {
val unCachedAnswer =
sql("SELECT * FROM testData a JOIN testData b ON a.key = b.key").collect()
spark.catalog.cacheTable("testData")
checkAnswer(
sql("SELECT * FROM testData a JOIN testData b ON a.key = b.key"),
unCachedAnswer.toSeq)
uncacheTable("testData")
}
test("'CACHE TABLE' and 'UNCACHE TABLE' SQL statement") {
sql("CACHE TABLE testData")
assertCached(spark.table("testData"))
val rddId = rddIdOf("testData")
assert(
isMaterialized(rddId),
"Eagerly cached in-memory table should have already been materialized")
sql("UNCACHE TABLE testData")
assert(!spark.catalog.isCached("testData"), "Table 'testData' should not be cached")
eventually(timeout(10.seconds)) {
assert(!isMaterialized(rddId), "Uncached in-memory table should have been unpersisted")
}
}
test("CACHE TABLE tableName AS SELECT * FROM anotherTable") {
withTempView("testCacheTable") {
sql("CACHE TABLE testCacheTable AS SELECT * FROM testData")
assertCached(spark.table("testCacheTable"))
val rddId = rddIdOf("testCacheTable")
assert(
isMaterialized(rddId),
"Eagerly cached in-memory table should have already been materialized")
uncacheTable("testCacheTable")
eventually(timeout(10.seconds)) {
assert(!isMaterialized(rddId), "Uncached in-memory table should have been unpersisted")
}
}
}
test("CACHE TABLE tableName AS SELECT ...") {
withTempView("testCacheTable") {
sql("CACHE TABLE testCacheTable AS SELECT key FROM testData LIMIT 10")
assertCached(spark.table("testCacheTable"))
val rddId = rddIdOf("testCacheTable")
assert(
isMaterialized(rddId),
"Eagerly cached in-memory table should have already been materialized")
uncacheTable("testCacheTable")
eventually(timeout(10.seconds)) {
assert(!isMaterialized(rddId), "Uncached in-memory table should have been unpersisted")
}
}
}
test("CACHE LAZY TABLE tableName") {
sql("CACHE LAZY TABLE testData")
assertCached(spark.table("testData"))
val rddId = rddIdOf("testData")
assert(
!isMaterialized(rddId),
"Lazily cached in-memory table shouldn't be materialized eagerly")
sql("SELECT COUNT(*) FROM testData").collect()
assert(
isMaterialized(rddId),
"Lazily cached in-memory table should have been materialized")
uncacheTable("testData")
eventually(timeout(10.seconds)) {
assert(!isMaterialized(rddId), "Uncached in-memory table should have been unpersisted")
}
}
private def assertStorageLevel(cacheOptions: String, level: DataReadMethod): Unit = {
sql(s"CACHE TABLE testData OPTIONS$cacheOptions")
assertCached(spark.table("testData"))
val rddId = rddIdOf("testData")
assert(isExpectStorageLevel(rddId, level))
}
test("SQL interface support storageLevel(DISK_ONLY)") {
assertStorageLevel("('storageLevel' 'DISK_ONLY')", Disk)
}
test("SQL interface support storageLevel(DISK_ONLY) with invalid options") {
assertStorageLevel("('storageLevel' 'DISK_ONLY', 'a' '1', 'b' '2')", Disk)
}
test("SQL interface support storageLevel(MEMORY_ONLY)") {
assertStorageLevel("('storageLevel' 'MEMORY_ONLY')", Memory)
}
test("SQL interface cache SELECT ... support storageLevel(DISK_ONLY)") {
withTempView("testCacheSelect") {
sql("CACHE TABLE testCacheSelect OPTIONS('storageLevel' 'DISK_ONLY') SELECT * FROM testData")
assertCached(spark.table("testCacheSelect"))
val rddId = rddIdOf("testCacheSelect")
assert(isExpectStorageLevel(rddId, Disk))
}
}
test("SQL interface support storageLevel(Invalid StorageLevel)") {
val message = intercept[IllegalArgumentException] {
sql("CACHE TABLE testData OPTIONS('storageLevel' 'invalid_storage_level')")
}.getMessage
assert(message.contains("Invalid StorageLevel: INVALID_STORAGE_LEVEL"))
}
test("SQL interface support storageLevel(with LAZY)") {
sql("CACHE LAZY TABLE testData OPTIONS('storageLevel' 'disk_only')")
assertCached(spark.table("testData"))
val rddId = rddIdOf("testData")
assert(
!isMaterialized(rddId),
"Lazily cached in-memory table shouldn't be materialized eagerly")
sql("SELECT COUNT(*) FROM testData").collect()
assert(
isMaterialized(rddId),
"Lazily cached in-memory table should have been materialized")
assert(isExpectStorageLevel(rddId, Disk))
}
test("InMemoryRelation statistics") {
sql("CACHE TABLE testData")
spark.table("testData").queryExecution.withCachedData.collect {
case cached: InMemoryRelation =>
val actualSizeInBytes = (1 to 100).map(i => 4 + i.toString.length + 4).sum
assert(cached.stats.sizeInBytes === actualSizeInBytes)
}
}
test("Drops temporary table") {
withTempView("t1") {
testData.select("key").createOrReplaceTempView("t1")
spark.table("t1")
spark.catalog.dropTempView("t1")
intercept[AnalysisException](spark.table("t1"))
}
}
test("Drops cached temporary table") {
withTempView("t1", "t2") {
testData.select("key").createOrReplaceTempView("t1")
testData.select("key").createOrReplaceTempView("t2")
spark.catalog.cacheTable("t1")
assert(spark.catalog.isCached("t1"))
assert(spark.catalog.isCached("t2"))
spark.catalog.dropTempView("t1")
intercept[AnalysisException](spark.table("t1"))
assert(!spark.catalog.isCached("t2"))
}
}
test("Clear all cache") {
withTempView("t1", "t2") {
sql("SELECT key FROM testData LIMIT 10").createOrReplaceTempView("t1")
sql("SELECT key FROM testData LIMIT 5").createOrReplaceTempView("t2")
spark.catalog.cacheTable("t1")
spark.catalog.cacheTable("t2")
spark.catalog.clearCache()
assert(spark.sharedState.cacheManager.isEmpty)
sql("SELECT key FROM testData LIMIT 10").createOrReplaceTempView("t1")
sql("SELECT key FROM testData LIMIT 5").createOrReplaceTempView("t2")
spark.catalog.cacheTable("t1")
spark.catalog.cacheTable("t2")
sql("Clear CACHE")
assert(spark.sharedState.cacheManager.isEmpty)
}
}
test("Ensure accumulators to be cleared after GC when uncacheTable") {
withTempView("t1", "t2") {
sql("SELECT key FROM testData LIMIT 10").createOrReplaceTempView("t1")
sql("SELECT key FROM testData LIMIT 5").createOrReplaceTempView("t2")
spark.catalog.cacheTable("t1")
spark.catalog.cacheTable("t2")
sql("SELECT * FROM t1").count()
sql("SELECT * FROM t2").count()
sql("SELECT * FROM t1").count()
sql("SELECT * FROM t2").count()
val toBeCleanedAccIds = new HashSet[Long]
val accId1 = spark.table("t1").queryExecution.withCachedData.collect {
case i: InMemoryRelation => i.cacheBuilder.sizeInBytesStats.id
}.head
toBeCleanedAccIds += accId1
val accId2 = spark.table("t1").queryExecution.withCachedData.collect {
case i: InMemoryRelation => i.cacheBuilder.sizeInBytesStats.id
}.head
toBeCleanedAccIds += accId2
val cleanerListener = new CleanerListener {
def rddCleaned(rddId: Int): Unit = {}
def shuffleCleaned(shuffleId: Int): Unit = {}
def broadcastCleaned(broadcastId: Long): Unit = {}
def accumCleaned(accId: Long): Unit = {
toBeCleanedAccIds.synchronized { toBeCleanedAccIds -= accId }
}
def checkpointCleaned(rddId: Long): Unit = {}
}
spark.sparkContext.cleaner.get.attachListener(cleanerListener)
uncacheTable("t1")
uncacheTable("t2")
System.gc()
eventually(timeout(10.seconds)) {
assert(toBeCleanedAccIds.synchronized { toBeCleanedAccIds.isEmpty },
"batchStats accumulators should be cleared after GC when uncacheTable")
}
assert(AccumulatorContext.get(accId1).isEmpty)
assert(AccumulatorContext.get(accId2).isEmpty)
}
}
test("SPARK-10327 Cache Table is not working while subquery has alias in its project list") {
withTempView("abc") {
sparkContext.parallelize((1, 1) :: (2, 2) :: Nil)
.toDF("key", "value").selectExpr("key", "value", "key+1").createOrReplaceTempView("abc")
spark.catalog.cacheTable("abc")
val sparkPlan = sql(
"""select a.key, b.key, c.key from
|abc a join abc b on a.key=b.key
|join abc c on a.key=c.key""".stripMargin).queryExecution.sparkPlan
assert(sparkPlan.collect { case e: InMemoryTableScanExec => e }.size === 3)
assert(sparkPlan.collect { case e: RDDScanExec => e }.size === 0)
}
}
/**
* Verifies that the plan for `df` contains `expected` number of Exchange operators.
*/
private def verifyNumExchanges(df: DataFrame, expected: Int): Unit = {
assert(
collect(df.queryExecution.executedPlan) { case e: ShuffleExchangeExec => e }.size == expected)
}
test("A cached table preserves the partitioning and ordering of its cached SparkPlan") {
val table3x = testData.union(testData).union(testData)
table3x.createOrReplaceTempView("testData3x")
sql("SELECT key, value FROM testData3x ORDER BY key").createOrReplaceTempView("orderedTable")
spark.catalog.cacheTable("orderedTable")
assertCached(spark.table("orderedTable"))
// Should not have an exchange as the query is already sorted on the group by key.
verifyNumExchanges(sql("SELECT key, count(*) FROM orderedTable GROUP BY key"), 0)
checkAnswer(
sql("SELECT key, count(*) FROM orderedTable GROUP BY key ORDER BY key"),
sql("SELECT key, count(*) FROM testData3x GROUP BY key ORDER BY key").collect())
uncacheTable("orderedTable")
spark.catalog.dropTempView("orderedTable")
// Set up two tables distributed in the same way. Try this with the data distributed into
// different number of partitions.
for (numPartitions <- 1 until 10 by 4) {
withTempView("t1", "t2") {
testData.repartition(numPartitions, $"key").createOrReplaceTempView("t1")
testData2.repartition(numPartitions, $"a").createOrReplaceTempView("t2")
spark.catalog.cacheTable("t1")
spark.catalog.cacheTable("t2")
// Joining them should result in no exchanges.
verifyNumExchanges(sql("SELECT * FROM t1 t1 JOIN t2 t2 ON t1.key = t2.a"), 0)
checkAnswer(sql("SELECT * FROM t1 t1 JOIN t2 t2 ON t1.key = t2.a"),
sql("SELECT * FROM testData t1 JOIN testData2 t2 ON t1.key = t2.a"))
// Grouping on the partition key should result in no exchanges
verifyNumExchanges(sql("SELECT count(*) FROM t1 GROUP BY key"), 0)
checkAnswer(sql("SELECT count(*) FROM t1 GROUP BY key"),
sql("SELECT count(*) FROM testData GROUP BY key"))
uncacheTable("t1")
uncacheTable("t2")
}
}
// Distribute the tables into non-matching number of partitions. Need to shuffle one side.
withTempView("t1", "t2") {
testData.repartition(6, $"key").createOrReplaceTempView("t1")
testData2.repartition(3, $"a").createOrReplaceTempView("t2")
spark.catalog.cacheTable("t1")
spark.catalog.cacheTable("t2")
val query = sql("SELECT key, value, a, b FROM t1 t1 JOIN t2 t2 ON t1.key = t2.a")
verifyNumExchanges(query, 1)
assert(stripAQEPlan(query.queryExecution.executedPlan).outputPartitioning.numPartitions === 6)
checkAnswer(
query,
testData.join(testData2, $"key" === $"a").select($"key", $"value", $"a", $"b"))
uncacheTable("t1")
uncacheTable("t2")
}
// One side of join is not partitioned in the desired way. Need to shuffle one side.
withTempView("t1", "t2") {
testData.repartition(6, $"value").createOrReplaceTempView("t1")
testData2.repartition(6, $"a").createOrReplaceTempView("t2")
spark.catalog.cacheTable("t1")
spark.catalog.cacheTable("t2")
val query = sql("SELECT key, value, a, b FROM t1 t1 JOIN t2 t2 ON t1.key = t2.a")
verifyNumExchanges(query, 1)
assert(stripAQEPlan(query.queryExecution.executedPlan).outputPartitioning.numPartitions === 6)
checkAnswer(
query,
testData.join(testData2, $"key" === $"a").select($"key", $"value", $"a", $"b"))
uncacheTable("t1")
uncacheTable("t2")
}
withTempView("t1", "t2") {
testData.repartition(6, $"value").createOrReplaceTempView("t1")
testData2.repartition(12, $"a").createOrReplaceTempView("t2")
spark.catalog.cacheTable("t1")
spark.catalog.cacheTable("t2")
val query = sql("SELECT key, value, a, b FROM t1 t1 JOIN t2 t2 ON t1.key = t2.a")
verifyNumExchanges(query, 1)
assert(stripAQEPlan(query.queryExecution.executedPlan).
outputPartitioning.numPartitions === 12)
checkAnswer(
query,
testData.join(testData2, $"key" === $"a").select($"key", $"value", $"a", $"b"))
uncacheTable("t1")
uncacheTable("t2")
}
// One side of join is not partitioned in the desired way. Since the number of partitions of
// the side that has already partitioned is smaller than the side that is not partitioned,
// we shuffle both side.
withTempView("t1", "t2") {
testData.repartition(6, $"value").createOrReplaceTempView("t1")
testData2.repartition(3, $"a").createOrReplaceTempView("t2")
spark.catalog.cacheTable("t1")
spark.catalog.cacheTable("t2")
val query = sql("SELECT key, value, a, b FROM t1 t1 JOIN t2 t2 ON t1.key = t2.a")
verifyNumExchanges(query, 2)
checkAnswer(
query,
testData.join(testData2, $"key" === $"a").select($"key", $"value", $"a", $"b"))
uncacheTable("t1")
uncacheTable("t2")
}
// repartition's column ordering is different from group by column ordering.
// But they use the same set of columns.
withTempView("t1") {
testData.repartition(6, $"value", $"key").createOrReplaceTempView("t1")
spark.catalog.cacheTable("t1")
val query = sql("SELECT value, key from t1 group by key, value")
verifyNumExchanges(query, 0)
checkAnswer(
query,
testData.distinct().select($"value", $"key"))
uncacheTable("t1")
}
// repartition's column ordering is different from join condition's column ordering.
// We will still shuffle because hashcodes of a row depend on the column ordering.
// If we do not shuffle, we may actually partition two tables in totally two different way.
// See PartitioningSuite for more details.
withTempView("t1", "t2") {
val df1 = testData
df1.repartition(6, $"value", $"key").createOrReplaceTempView("t1")
val df2 = testData2.select($"a", $"b".cast("string"))
df2.repartition(6, $"a", $"b").createOrReplaceTempView("t2")
spark.catalog.cacheTable("t1")
spark.catalog.cacheTable("t2")
val query =
sql("SELECT key, value, a, b FROM t1 t1 JOIN t2 t2 ON t1.key = t2.a and t1.value = t2.b")
verifyNumExchanges(query, 1)
assert(stripAQEPlan(query.queryExecution.executedPlan).outputPartitioning.numPartitions === 6)
checkAnswer(
query,
df1.join(df2, $"key" === $"a" && $"value" === $"b").select($"key", $"value", $"a", $"b"))
uncacheTable("t1")
uncacheTable("t2")
}
}
test("SPARK-15870 DataFrame can't execute after uncacheTable") {
withTempView("selectStar") {
val selectStar = sql("SELECT * FROM testData WHERE key = 1")
selectStar.createOrReplaceTempView("selectStar")
spark.catalog.cacheTable("selectStar")
checkAnswer(
selectStar,
Seq(Row(1, "1")))
uncacheTable("selectStar")
checkAnswer(
selectStar,
Seq(Row(1, "1")))
}
}
test("SPARK-15915 Logical plans should use canonicalized plan when override sameResult") {
withTempView("localRelation") {
val localRelation = Seq(1, 2, 3).toDF()
localRelation.createOrReplaceTempView("localRelation")
spark.catalog.cacheTable("localRelation")
assert(getNumInMemoryRelations(localRelation) == 1)
}
}
test("SPARK-19093 Caching in side subquery") {
withTempView("t1") {
Seq(1).toDF("c1").createOrReplaceTempView("t1")
spark.catalog.cacheTable("t1")
val ds =
sql(
"""
|SELECT * FROM t1
|WHERE
|NOT EXISTS (SELECT * FROM t1)
""".stripMargin)
assert(getNumInMemoryRelations(ds) == 2)
}
}
test("SPARK-19093 scalar and nested predicate query") {
withTempView("t1", "t2", "t3", "t4") {
Seq(1).toDF("c1").createOrReplaceTempView("t1")
Seq(2).toDF("c1").createOrReplaceTempView("t2")
Seq(1).toDF("c1").createOrReplaceTempView("t3")
Seq(1).toDF("c1").createOrReplaceTempView("t4")
spark.catalog.cacheTable("t1")
spark.catalog.cacheTable("t2")
spark.catalog.cacheTable("t3")
spark.catalog.cacheTable("t4")
// Nested predicate subquery
val ds =
sql(
"""
|SELECT * FROM t1
|WHERE
|c1 IN (SELECT c1 FROM t2 WHERE c1 IN (SELECT c1 FROM t3 WHERE c1 = 1))
""".stripMargin)
assert(getNumInMemoryRelations(ds) == 3)
// Scalar subquery and predicate subquery
val ds2 =
sql(
"""
|SELECT * FROM (SELECT c1, max(c1) FROM t1 GROUP BY c1)
|WHERE
|c1 = (SELECT max(c1) FROM t2 GROUP BY c1)
|OR
|EXISTS (SELECT c1 FROM t3)
|OR
|c1 IN (SELECT c1 FROM t4)
""".stripMargin)
assert(getNumInMemoryRelations(ds2) == 4)
}
}
ignore("SPARK-19765: UNCACHE TABLE should un-cache all cached plans that refer to this table") {
withTable("t") {
withTempPath { path =>
Seq(1 -> "a").toDF("i", "j").write.parquet(path.getCanonicalPath)
sql(s"CREATE TABLE t USING parquet LOCATION '${path.toURI}'")
spark.catalog.cacheTable("t")
spark.table("t").select($"i").cache()
checkAnswer(spark.table("t").select($"i"), Row(1))
assertCached(spark.table("t").select($"i"))
Utils.deleteRecursively(path)
spark.sessionState.catalog.refreshTable(TableIdentifier("t"))
uncacheTable("t")
assert(spark.table("t").select($"i").count() == 0)
assert(getNumInMemoryRelations(spark.table("t").select($"i")) == 0)
}
}
}
ignore("refreshByPath should refresh all cached plans with the specified path") {
withTempDir { dir =>
val path = dir.getCanonicalPath()
spark.range(10).write.mode("overwrite").parquet(path)
spark.read.parquet(path).cache()
spark.read.parquet(path).filter($"id" > 4).cache()
assert(spark.read.parquet(path).filter($"id" > 4).count() == 5)
spark.range(20).write.mode("overwrite").parquet(path)
spark.catalog.refreshByPath(path)
assert(spark.read.parquet(path).count() == 20)
assert(spark.read.parquet(path).filter($"id" > 4).count() == 15)
}
}
test("SPARK-19993 simple subquery caching") {
withTempView("t1", "t2") {
Seq(1).toDF("c1").createOrReplaceTempView("t1")
Seq(2).toDF("c1").createOrReplaceTempView("t2")
val sql1 =
"""
|SELECT * FROM t1
|WHERE
|NOT EXISTS (SELECT * FROM t2)
""".stripMargin
sql(sql1).cache()
val cachedDs = sql(sql1)
assert(getNumInMemoryRelations(cachedDs) == 1)
// Additional predicate in the subquery plan should cause a cache miss
val cachedMissDs =
sql(
"""
|SELECT * FROM t1
|WHERE
|NOT EXISTS (SELECT * FROM t2 where c1 = 0)
""".stripMargin)
assert(getNumInMemoryRelations(cachedMissDs) == 0)
}
}
test("SPARK-19993 subquery caching with correlated predicates") {
withTempView("t1", "t2") {
Seq(1).toDF("c1").createOrReplaceTempView("t1")
Seq(1).toDF("c1").createOrReplaceTempView("t2")
// Simple correlated predicate in subquery
val sqlText =
"""
|SELECT * FROM t1
|WHERE
|t1.c1 in (SELECT t2.c1 FROM t2 where t1.c1 = t2.c1)
""".stripMargin
sql(sqlText).cache()
val cachedDs = sql(sqlText)
assert(getNumInMemoryRelations(cachedDs) == 1)
}
}
ignore("SPARK-19993 subquery with cached underlying relation") {
withTempView("t1") {
Seq(1).toDF("c1").createOrReplaceTempView("t1")
spark.catalog.cacheTable("t1")
// underlying table t1 is cached as well as the query that refers to it.
val sqlText =
"""
|SELECT * FROM t1
|WHERE
|NOT EXISTS (SELECT * FROM t1)
""".stripMargin
val ds = sql(sqlText)
assert(getNumInMemoryRelations(ds) == 2)
val cachedDs = sql(sqlText).cache()
assert(getNumInMemoryTablesRecursively(cachedDs.queryExecution.sparkPlan) == 3)
}
}
test("SPARK-19993 nested subquery caching and scalar + predicate subqueris") {
withTempView("t1", "t2", "t3", "t4") {
Seq(1).toDF("c1").createOrReplaceTempView("t1")
Seq(2).toDF("c1").createOrReplaceTempView("t2")
Seq(1).toDF("c1").createOrReplaceTempView("t3")
Seq(1).toDF("c1").createOrReplaceTempView("t4")
// Nested predicate subquery
val sql1 =
"""
|SELECT * FROM t1
|WHERE
|c1 IN (SELECT c1 FROM t2 WHERE c1 IN (SELECT c1 FROM t3 WHERE c1 = 1))
""".stripMargin
sql(sql1).cache()
val cachedDs = sql(sql1)
assert(getNumInMemoryRelations(cachedDs) == 1)
// Scalar subquery and predicate subquery
val sql2 =
"""
|SELECT * FROM (SELECT c1, max(c1) FROM t1 GROUP BY c1)
|WHERE
|c1 = (SELECT max(c1) FROM t2 GROUP BY c1)
|OR
|EXISTS (SELECT c1 FROM t3)
|OR
|c1 IN (SELECT c1 FROM t4)
""".stripMargin
sql(sql2).cache()
val cachedDs2 = sql(sql2)
assert(getNumInMemoryRelations(cachedDs2) == 1)
}
}
test("SPARK-23312: vectorized cache reader can be disabled") {
Seq(true, false).foreach { vectorized =>
withSQLConf(SQLConf.CACHE_VECTORIZED_READER_ENABLED.key -> vectorized.toString) {
val df = spark.range(10).cache()
df.queryExecution.executedPlan.foreach {
case i: InMemoryTableScanExec =>
assert(i.supportsColumnar == vectorized)
case _ =>
}
}
}
}
private def checkIfNoJobTriggered[T](f: => T): T = {
var numJobTrigered = 0
val jobListener = new SparkListener {
override def onJobStart(jobStart: SparkListenerJobStart): Unit = {
numJobTrigered += 1
}
}
sparkContext.addSparkListener(jobListener)
try {
val result = f
sparkContext.listenerBus.waitUntilEmpty()
assert(numJobTrigered === 0)
result
} finally {
sparkContext.removeSparkListener(jobListener)
}
}
test("SPARK-23880 table cache should be lazy and don't trigger any jobs") {
val cachedData = checkIfNoJobTriggered {
spark.range(1002).filter($"id" > 1000).orderBy($"id".desc).cache()
}
assert(cachedData.collect === Seq(1001))
}
ignore("SPARK-24596 Non-cascading Cache Invalidation - uncache temporary view") {
withTempView("t1", "t2") {
sql("CACHE TABLE t1 AS SELECT * FROM testData WHERE key > 1")
sql("CACHE TABLE t2 as SELECT * FROM t1 WHERE value > 1")
assert(spark.catalog.isCached("t1"))
assert(spark.catalog.isCached("t2"))
sql("UNCACHE TABLE t1")
assert(!spark.catalog.isCached("t1"))
assert(spark.catalog.isCached("t2"))
}
}
ignore("SPARK-24596 Non-cascading Cache Invalidation - drop temporary view") {
withTempView("t1", "t2") {
sql("CACHE TABLE t1 AS SELECT * FROM testData WHERE key > 1")
sql("CACHE TABLE t2 as SELECT * FROM t1 WHERE value > 1")
assert(spark.catalog.isCached("t1"))
assert(spark.catalog.isCached("t2"))
sql("DROP VIEW t1")
assert(spark.catalog.isCached("t2"))
}
}
ignore("SPARK-24596 Non-cascading Cache Invalidation - drop persistent view") {
withTable("t") {
spark.range(1, 10).toDF("key").withColumn("value", $"key" * 2)
.write.format("json").saveAsTable("t")
withView("t1") {
withTempView("t2") {
sql("CREATE VIEW t1 AS SELECT * FROM t WHERE key > 1")
sql("CACHE TABLE t1")
sql("CACHE TABLE t2 AS SELECT * FROM t1 WHERE value > 1")
assert(spark.catalog.isCached("t1"))
assert(spark.catalog.isCached("t2"))
sql("DROP VIEW t1")
assert(!spark.catalog.isCached("t2"))
}
}
}
}
ignore("SPARK-24596 Non-cascading Cache Invalidation - uncache table") {
withTable("t") {
spark.range(1, 10).toDF("key").withColumn("value", $"key" * 2)
.write.format("json").saveAsTable("t")
withTempView("t1", "t2") {
sql("CACHE TABLE t")
sql("CACHE TABLE t1 AS SELECT * FROM t WHERE key > 1")
sql("CACHE TABLE t2 AS SELECT * FROM t1 WHERE value > 1")
assert(spark.catalog.isCached("t"))
assert(spark.catalog.isCached("t1"))
assert(spark.catalog.isCached("t2"))
sql("UNCACHE TABLE t")
assert(!spark.catalog.isCached("t"))
assert(!spark.catalog.isCached("t1"))
assert(!spark.catalog.isCached("t2"))
}
}
}
test("Cache should respect the hint") {
def testHint(df: Dataset[_], expectedHint: JoinStrategyHint): Unit = {
val df2 = spark.range(2000).cache()
df2.count()
def checkHintExists(): Unit = {
// Test the broadcast hint.
val joinPlan = df.join(df2, "id").queryExecution.optimizedPlan
val joinHints = joinPlan.collect {
case Join(_, _, _, _, hint) => hint
}
assert(joinHints.size == 1)
assert(joinHints(0).leftHint.get.strategy.contains(expectedHint))
assert(joinHints(0).rightHint.isEmpty)
}
// Make sure the hint does exist when `df` is not cached.
checkHintExists()
df.cache()
try {
df.count()
// Make sure the hint still exists when `df` is cached.
checkHintExists()
} finally {
// Clean-up
df.unpersist()
}
}
// The hint is the root node
testHint(broadcast(spark.range(1000)), BROADCAST)
// The hint is under subquery alias
testHint(broadcast(spark.range(1000)).as("df"), BROADCAST)
// The hint is under filter
testHint(broadcast(spark.range(1000)).filter($"id" > 100), BROADCAST)
// If there are 2 adjacent hints, the top one takes effect.
testHint(
spark.range(1000)
.hint("SHUFFLE_MERGE")
.hint("SHUFFLE_HASH")
.as("df"),
SHUFFLE_HASH)
}
ignore("analyzes column statistics in cached query") {
def query(): DataFrame = {
spark.range(100)
.selectExpr("id % 3 AS c0", "id % 5 AS c1", "2 AS c2")
.groupBy("c0")
.agg(avg("c1").as("v1"), sum("c2").as("v2"))
}
// First, checks if there is no column statistic in cached query
val queryStats1 = query().cache.queryExecution.optimizedPlan.stats.attributeStats
assert(queryStats1.map(_._1.name).isEmpty)
val cacheManager = spark.sharedState.cacheManager
val cachedData = cacheManager.lookupCachedData(query().logicalPlan)
assert(cachedData.isDefined)
val queryAttrs = cachedData.get.plan.output
assert(queryAttrs.size === 3)
val (c0, v1, v2) = (queryAttrs(0), queryAttrs(1), queryAttrs(2))
// Analyzes one column in the query output
cacheManager.analyzeColumnCacheQuery(spark, cachedData.get, v1 :: Nil)
val queryStats2 = query().queryExecution.optimizedPlan.stats.attributeStats
assert(queryStats2.map(_._1.name).toSet === Set("v1"))
// Analyzes two more columns
cacheManager.analyzeColumnCacheQuery(spark, cachedData.get, c0 :: v2 :: Nil)
val queryStats3 = query().queryExecution.optimizedPlan.stats.attributeStats
assert(queryStats3.map(_._1.name).toSet === Set("c0", "v1", "v2"))
}
test("SPARK-27248 refreshTable should recreate cache with same cache name and storage level") {
// This section tests when a table is cached with its qualified name but it is refreshed with
// its unqualified name.
withTempDatabase { db =>
withTempPath { path =>
withTable(s"$db.cachedTable") {
// Create table 'cachedTable' in temp db for testing purpose.
spark.catalog.createTable(
s"$db.cachedTable",
"PARQUET",
StructType(Array(StructField("key", StringType))),
Map("LOCATION" -> path.toURI.toString))
withCache(s"$db.cachedTable") {
// Cache the table 'cachedTable' in temp db with qualified table name with storage level
// MEMORY_ONLY, and then check whether the table is cached with expected name and
// storage level.
spark.catalog.cacheTable(s"$db.cachedTable", MEMORY_ONLY)
assertCached(spark.table(s"$db.cachedTable"), s"$db.cachedTable", MEMORY_ONLY)
assert(spark.catalog.isCached(s"$db.cachedTable"),
s"Table '$db.cachedTable' should be cached.")
// Refresh the table 'cachedTable' in temp db with qualified table name, and then check
// whether the table is still cached with the same name and storage level.
// Without bug fix 'SPARK-27248', the recreated cache storage level will be default
// storage level 'MEMORY_AND_DISK', instead of 'MEMORY_ONLY'.
spark.catalog.refreshTable(s"$db.cachedTable")
assertCached(spark.table(s"$db.cachedTable"), s"$db.cachedTable", MEMORY_ONLY)
assert(spark.catalog.isCached(s"$db.cachedTable"),
s"Table '$db.cachedTable' should be cached after refreshing with its qualified name.")
// Change the active database to the temp db and refresh the table with unqualified
// table name, and then check whether the table is still cached with the same name and
// storage level.
// Without bug fix 'SPARK-27248', the recreated cache name will be changed to
// 'cachedTable', instead of '$db.cachedTable'
activateDatabase(db) {
spark.catalog.refreshTable("cachedTable")
assertCached(spark.table("cachedTable"), s"$db.cachedTable", MEMORY_ONLY)
assert(spark.catalog.isCached("cachedTable"),
s"Table '$db.cachedTable' should be cached after refreshing with its " +
"unqualified name.")
}
}
}
}
// This section tests when a table is cached with its unqualified name but it is refreshed
// with its qualified name.
withTempPath { path =>
withTable("cachedTable") {
// Create table 'cachedTable' in default db for testing purpose.
spark.catalog.createTable(
"cachedTable",
"PARQUET",
StructType(Array(StructField("key", StringType))),
Map("LOCATION" -> path.toURI.toString))
withCache("cachedTable") {
// Cache the table 'cachedTable' in default db without qualified table name with storage
// level 'MEMORY_AND_DISK2', and then check whether the table is cached with expected
// name and storage level.
spark.catalog.cacheTable("cachedTable", MEMORY_AND_DISK_2)
assertCached(spark.table("cachedTable"), "cachedTable", MEMORY_AND_DISK_2)
assert(spark.catalog.isCached("cachedTable"),
"Table 'cachedTable' should be cached.")
// Refresh the table 'cachedTable' in default db with unqualified table name, and then
// check whether the table is still cached with the same name and storage level.
// Without bug fix 'SPARK-27248', the recreated cache storage level will be default
// storage level 'MEMORY_AND_DISK', instead of 'MEMORY_AND_DISK2'.
spark.catalog.refreshTable("cachedTable")
assertCached(spark.table("cachedTable"), "cachedTable", MEMORY_AND_DISK_2)
assert(spark.catalog.isCached("cachedTable"),
"Table 'cachedTable' should be cached after refreshing with its unqualified name.")
// Change the active database to the temp db and refresh the table with qualified
// table name, and then check whether the table is still cached with the same name and
// storage level.
// Without bug fix 'SPARK-27248', the recreated cache name will be changed to
// 'default.cachedTable', instead of 'cachedTable'
activateDatabase(db) {
spark.catalog.refreshTable("default.cachedTable")
assertCached(spark.table("default.cachedTable"), "cachedTable", MEMORY_AND_DISK_2)
assert(spark.catalog.isCached("default.cachedTable"),
"Table 'cachedTable' should be cached after refreshing with its qualified name.")
}
}
}
}
}
}
ignore("cache supports for intervals") {
withTable("interval_cache") {
Seq((1, "1 second"), (2, "2 seconds"), (2, null))
.toDF("k", "v").write.saveAsTable("interval_cache")
sql("CACHE TABLE t1 AS SELECT k, cast(v as interval) FROM interval_cache")
assert(spark.catalog.isCached("t1"))
checkAnswer(sql("SELECT * FROM t1 WHERE k = 1"),
Row(1, new CalendarInterval(0, 0, DateTimeConstants.MICROS_PER_SECOND)))
sql("UNCACHE TABLE t1")
assert(!spark.catalog.isCached("t1"))
}
}
ignore("SPARK-30494 Fix the leak of cached data when replace an existing view") {
withTempView("tempView") {
spark.catalog.clearCache()
sql("create or replace temporary view tempView as select 1")
sql("cache table tempView")
assert(spark.sharedState.cacheManager.lookupCachedData(sql("select 1")).isDefined)
sql("create or replace temporary view tempView as select 1, 2")
assert(spark.sharedState.cacheManager.lookupCachedData(sql("select 1")).isEmpty)
sql("cache table tempView")
assert(spark.sharedState.cacheManager.lookupCachedData(sql("select 1, 2")).isDefined)
}
withGlobalTempView("tempGlobalTempView") {
spark.catalog.clearCache()
sql("create or replace global temporary view tempGlobalTempView as select 1")
sql("cache table global_temp.tempGlobalTempView")
assert(spark.sharedState.cacheManager.lookupCachedData(sql("select 1")).isDefined)
sql("create or replace global temporary view tempGlobalTempView as select 1, 2")
assert(spark.sharedState.cacheManager.lookupCachedData(sql("select 1")).isEmpty)
sql("cache table global_temp.tempGlobalTempView")
assert(spark.sharedState.cacheManager.lookupCachedData(sql("select 1, 2")).isDefined)
}
withView("view1") {
spark.catalog.clearCache()
sql("create or replace view view1 as select 1")
sql("cache table view1")
sql("create or replace view view1 as select 1, 2")
sql("cache table view1")
// the cached plan of persisted view likes below,
// we cannot use the same assertion of temp view.
// SubqueryAlias
// |
// + View
// |
// + Project[1 AS 1]
spark.sharedState.cacheManager.uncacheQuery(spark.table("view1"), cascade = false)
// make sure there is no cached data leak
assert(spark.sharedState.cacheManager.isEmpty)
}
}
}
| Intel-bigdata/OAP | oap-native-sql/core/src/test/scala/org/apache/spark/sql/CachedTableSuite.scala | Scala | apache-2.0 | 45,759 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.util
import java.net.URLClassLoader
import scala.collection.JavaConverters._
import org.scalatest.Matchers
import org.apache.spark.{SparkContext, SparkException, SparkFunSuite, TestUtils}
class MutableURLClassLoaderSuite extends SparkFunSuite with Matchers {
val urls2 = List(TestUtils.createJarWithClasses(
classNames = Seq("FakeClass1", "FakeClass2", "FakeClass3"),
toStringValue = "2")).toArray
val urls = List(TestUtils.createJarWithClasses(
classNames = Seq("FakeClass1"),
classNamesWithBase = Seq(("FakeClass2", "FakeClass3")), // FakeClass3 is in parent
toStringValue = "1",
classpathUrls = urls2)).toArray
val fileUrlsChild = List(TestUtils.createJarWithFiles(Map(
"resource1" -> "resource1Contents-child",
"resource2" -> "resource2Contents"))).toArray
val fileUrlsParent = List(TestUtils.createJarWithFiles(Map(
"resource1" -> "resource1Contents-parent"))).toArray
test("child first") {
val parentLoader = new URLClassLoader(urls2, null)
val classLoader = new ChildFirstURLClassLoader(urls, parentLoader)
val fakeClass = classLoader.loadClass("FakeClass2").getConstructor().newInstance()
val fakeClassVersion = fakeClass.toString
assert(fakeClassVersion === "1")
val fakeClass2 = classLoader.loadClass("FakeClass2").getConstructor().newInstance()
assert(fakeClass.getClass === fakeClass2.getClass)
classLoader.close()
parentLoader.close()
}
test("parent first") {
val parentLoader = new URLClassLoader(urls2, null)
val classLoader = new MutableURLClassLoader(urls, parentLoader)
val fakeClass = classLoader.loadClass("FakeClass1").getConstructor().newInstance()
val fakeClassVersion = fakeClass.toString
assert(fakeClassVersion === "2")
val fakeClass2 = classLoader.loadClass("FakeClass1").getConstructor().newInstance()
assert(fakeClass.getClass === fakeClass2.getClass)
classLoader.close()
parentLoader.close()
}
test("child first can fall back") {
val parentLoader = new URLClassLoader(urls2, null)
val classLoader = new ChildFirstURLClassLoader(urls, parentLoader)
val fakeClass = classLoader.loadClass("FakeClass3").getConstructor().newInstance()
val fakeClassVersion = fakeClass.toString
assert(fakeClassVersion === "2")
classLoader.close()
parentLoader.close()
}
test("child first can fail") {
val parentLoader = new URLClassLoader(urls2, null)
val classLoader = new ChildFirstURLClassLoader(urls, parentLoader)
intercept[java.lang.ClassNotFoundException] {
classLoader.loadClass("FakeClassDoesNotExist").getConstructor().newInstance()
}
classLoader.close()
parentLoader.close()
}
test("default JDK classloader get resources") {
val parentLoader = new URLClassLoader(fileUrlsParent, null)
val classLoader = new URLClassLoader(fileUrlsChild, parentLoader)
assert(classLoader.getResources("resource1").asScala.size === 2)
assert(classLoader.getResources("resource2").asScala.size === 1)
classLoader.close()
parentLoader.close()
}
test("parent first get resources") {
val parentLoader = new URLClassLoader(fileUrlsParent, null)
val classLoader = new MutableURLClassLoader(fileUrlsChild, parentLoader)
assert(classLoader.getResources("resource1").asScala.size === 2)
assert(classLoader.getResources("resource2").asScala.size === 1)
classLoader.close()
parentLoader.close()
}
test("child first get resources") {
val parentLoader = new URLClassLoader(fileUrlsParent, null)
val classLoader = new ChildFirstURLClassLoader(fileUrlsChild, parentLoader)
val res1 = classLoader.getResources("resource1").asScala.toList
assert(res1.size === 2)
assert(classLoader.getResources("resource2").asScala.size === 1)
res1.map(scala.io.Source.fromURL(_).mkString) should contain inOrderOnly
("resource1Contents-child", "resource1Contents-parent")
classLoader.close()
parentLoader.close()
}
test("driver sets context class loader in local mode") {
// Test the case where the driver program sets a context classloader and then runs a job
// in local mode. This is what happens when ./spark-submit is called with "local" as the
// master.
val original = Thread.currentThread().getContextClassLoader
val className = "ClassForDriverTest"
val jar = TestUtils.createJarWithClasses(Seq(className))
val contextLoader = new URLClassLoader(Array(jar), Utils.getContextOrSparkClassLoader)
Thread.currentThread().setContextClassLoader(contextLoader)
val sc = new SparkContext("local", "driverLoaderTest")
try {
sc.makeRDD(1 to 5, 2).mapPartitions { x =>
val loader = Thread.currentThread().getContextClassLoader
// scalastyle:off classforname
Class.forName(className, true, loader).getConstructor().newInstance()
// scalastyle:on classforname
Seq().iterator
}.count()
}
catch {
case e: SparkException if e.getMessage.contains("ClassNotFoundException") =>
fail("Local executor could not find class", e)
case t: Throwable => fail("Unexpected exception ", t)
}
sc.stop()
Thread.currentThread().setContextClassLoader(original)
}
}
| WindCanDie/spark | core/src/test/scala/org/apache/spark/util/MutableURLClassLoaderSuite.scala | Scala | apache-2.0 | 6,093 |
package debop4s.data.slick.northwind.model
import java.sql.Blob
import debop4s.data.slick.model.{IntEntity, StringEntity}
import org.joda.time.DateTime
case class AddressComponent(address: Option[String] = None,
city: Option[String] = None,
region: Option[String] = None,
postalCode: Option[String] = None,
country: Option[String] = None)
case class Category(name: String,
description: Option[String] = None,
picture: Option[Blob] = None,
var id: Option[Int]) extends IntEntity
case class CustomerCustomerDemo(id: String, typeId: String)
case class CustomerDemographic(typeId: String, desc: Option[String] = None)
case class Customer(companyName: String,
contactName: Option[String] = None,
contactTitle: Option[String] = None,
address: Option[AddressComponent] = None,
phone: Option[String] = None,
fax: Option[String] = None,
var id: Option[String]) extends StringEntity
case class Employee(lastname: String,
firstname: String,
title: Option[String] = None,
titleOfCoutesy: Option[String] = None,
birthDate: Option[DateTime] = None,
hireDate: Option[DateTime] = None,
address: Option[AddressComponent] = None,
homePhone: Option[String] = None,
extension: Option[String] = None,
photo: Option[Blob] = None,
notes: String,
reportsTo: Option[Int] = None,
photoPath: Option[String] = None,
salary: Option[Float] = None,
var id: Option[Int]) extends IntEntity {
lazy val Salesperson = firstname + " " + lastname
}
case class OrderDetail(orderId: Int,
productId: Int,
unitPrice: BigDecimal = 0.0000,
quantity: Short = 1,
discount: Double = 0.0) {
lazy val extendedPrice: Double =
(((unitPrice.toDouble * quantity.toDouble) * (1.0 - discount)) / 100.0) * 100.0
}
case class Order(customerId: Option[String] = None,
employeeId: Option[Int] = None,
orderDate: Option[DateTime] = None,
requiredDate: Option[DateTime] = None,
shippedDate: Option[DateTime] = None,
shipVia: Option[Int] = None,
freight: BigDecimal = 0.0,
shipName: Option[String] = None,
shipAddress: Option[AddressComponent] = None,
var id: Option[Int]) extends IntEntity
case class Product(name: String,
supplierId: Option[Int] = None,
categoryId: Option[Int] = None,
quantityPerUnit: Option[String] = None,
unitPrice: Option[BigDecimal] = None,
unitsInStock: Option[Short] = None,
unitsOnOrder: Option[Short] = None,
reorderLevel: Option[Short] = None,
discontinued: Boolean = false,
var id: Option[Int]) extends IntEntity
case class Region(id: Int, description: String)
case class Shipper(companyName: String,
phone: Option[String] = None,
var id: Option[Int]) extends IntEntity
case class Supplier(companyName: String,
contactName: Option[String] = None,
contactTitle: Option[String] = None,
address: Option[AddressComponent] = None,
phone: Option[String] = None,
fax: Option[String] = None,
homepage: Option[String] = None,
var id: Option[Int]) extends IntEntity
case class Territory(description: String,
regionId: Int,
var id: Option[String]) extends StringEntity | debop/debop4s | debop4s-data-slick-northwind/src/main/scala/debop4s/data/slick/northwind/model/Models.scala | Scala | apache-2.0 | 4,160 |
/*
* Copyright (c) 2016 Miles Sabin
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package shapeless
import org.junit.Assert._
import org.junit.Test
class natranges {
import nat._
import shapeless.testutil._
//
// Nil ranges: "X to same X"
@Test
def smallest_closed_range_symbolic_syntax = {
import syntax.nat._
assertTypedEquals[
_1 :: HNil
](
_1 :: HNil,
the[_1 *--* _1].apply()
)
}
@Test
def smallest_closed_range_word_syntax = {
import ops.nat.BoundedRange
import BoundedRange.{Inclusive, Exclusive}
assertTypedEquals[
_1 :: HNil
](
_1 :: HNil,
the[BoundedRange[Inclusive[_1], Inclusive[_1]]].apply()
)
}
@Test
def smallest_open_range_symbolic_syntax = {
import syntax.nat._
assertTypedEquals[
HNil
](
HNil,
the[_1 :--: _1].apply()
)
}
@Test
def smallest_open_range_word_syntax = {
import ops.nat.BoundedRange
import BoundedRange.{Inclusive, Exclusive}
assertTypedEquals[
HNil
](
HNil,
the[BoundedRange[Exclusive[_1], Exclusive[_1]]].apply()
)
}
@Test
def smallest_leftOpenRightClosed_range_symbolic_syntax = {
import syntax.nat._
assertTypedEquals[
_1 :: HNil
](
_1 :: HNil,
the[_1 :--* _1].apply()
)
}
@Test
def smallest_leftOpenRightClosed_range_word_syntax = {
import ops.nat.BoundedRange
import BoundedRange.{Inclusive, Exclusive}
assertTypedEquals[
_1 :: HNil
](
_1 :: HNil,
the[BoundedRange[Exclusive[_1], Inclusive[_1]]].apply()
)
}
@Test
def smallest_leftClosedRightOpen_range_symbolic_syntax = {
import syntax.nat._
assertTypedEquals[
_1 :: HNil
](
_1 :: HNil,
the[_1 *--: _1].apply()
)
}
@Test
def smallest_leftClosedRightOpen_range_word_syntax = {
import ops.nat.BoundedRange
import BoundedRange.{Inclusive, Exclusive}
assertTypedEquals[
_1 :: HNil
](
_1 :: HNil,
the[BoundedRange[Inclusive[_1], Exclusive[_1]]].apply()
)
}
//
// regular ranges: "X to Y"
@Test
def larger_closed_range_symbolic_syntax = {
import syntax.nat._
assertTypedEquals[
_1 :: _2 :: _3 :: _4 :: HNil
](
_1 :: _2 :: _3 :: _4 :: HNil,
the[_1 *--* _4].apply()
)
}
@Test
def larger_closed_range_word_syntax = {
import ops.nat.BoundedRange
import BoundedRange.{Inclusive, Exclusive}
assertTypedEquals[
_1 :: _2 :: _3 :: _4 :: HNil
](
_1 :: _2 :: _3 :: _4 :: HNil,
the[BoundedRange[Inclusive[_1], Inclusive[_4]]].apply()
)
}
@Test
def larger_open_range_symbolic_syntax = {
import syntax.nat._
assertTypedEquals[
_2 :: _3 :: HNil
](
_2 :: _3 :: HNil,
the[_1 :--: _4].apply()
)
}
@Test
def larger_open_range_word_syntax = {
import ops.nat.BoundedRange
import BoundedRange.{Inclusive, Exclusive}
assertTypedEquals[
_2 :: _3 :: HNil
](
_2 :: _3 :: HNil,
the[BoundedRange[Exclusive[_1], Exclusive[_4]]].apply()
)
}
@Test
def larger_leftClosedRightOpen_range_symbolic_syntax = {
import syntax.nat._
assertTypedEquals[
_1 :: _2 :: _3 :: HNil
](
_1 :: _2 :: _3 :: HNil,
the[_1 *--: _4].apply()
)
}
@Test
def larger_leftClosedRightOpen_range_word_syntax = {
import ops.nat.BoundedRange
import BoundedRange.{Inclusive, Exclusive}
assertTypedEquals[
_1 :: _2 :: _3 :: HNil
](
_1 :: _2 :: _3 :: HNil,
the[BoundedRange[Inclusive[_1], Exclusive[_4]]].apply()
)
}
@Test
def larger_leftOpenRightClosed_range_symbolic_syntax = {
import syntax.nat._
assertTypedEquals[
_2 :: _3 :: _4 :: HNil
](
_2 :: _3 :: _4 :: HNil,
the[_1 :--* _4].apply()
)
}
@Test
def larger_leftOpenRightClosed_range_word_syntax = {
import ops.nat.BoundedRange
import BoundedRange.{Inclusive, Exclusive}
assertTypedEquals[
_2 :: _3 :: _4 :: HNil
](
_2 :: _3 :: _4 :: HNil,
the[BoundedRange[Exclusive[_1], Inclusive[_4]]].apply()
)
}
//
// Reversed ranges: "X down to Y"
@Test
def reversed_closed_range_symbolic_syntax = {
import syntax.nat._
assertTypedEquals[
_4 :: _3 :: _2 :: _1 :: HNil
](
_4 :: _3 :: _2 :: _1 :: HNil,
the[_4 *--* _1].apply()
)
}
@Test
def reversed_closed_range_word_syntax = {
import ops.nat.BoundedRange
import BoundedRange.{Inclusive, Exclusive}
assertTypedEquals[
_4 :: _3 :: _2 :: _1 :: HNil
](
_4 :: _3 :: _2 :: _1 :: HNil,
the[BoundedRange[Inclusive[_4], Inclusive[_1]]].apply()
)
}
@Test
def reversed_open_range_symbolic_syntax = {
import syntax.nat._
assertTypedEquals[
_3 :: _2 :: HNil
](
_3 :: _2 :: HNil,
the[_4 :--: _1].apply()
)
}
@Test
def reversed_open_range_word_syntax = {
import ops.nat.BoundedRange
import BoundedRange.{Inclusive, Exclusive}
assertTypedEquals[
_3 :: _2 :: HNil
](
_3 :: _2 :: HNil,
the[BoundedRange[Exclusive[_4], Exclusive[_1]]].apply()
)
}
@Test
def reversed_leftClosedRightOpen_range_symbolic_syntax = {
import syntax.nat._
assertTypedEquals[
_4 :: _3 :: _2 :: HNil
](
_4 :: _3 :: _2 :: HNil,
the[_4 *--: _1].apply()
)
}
@Test
def reversed_leftClosedRightOpen_range_word_syntax = {
import ops.nat.BoundedRange
import BoundedRange.{Inclusive, Exclusive}
assertTypedEquals[
_4 :: _3 :: _2 :: HNil
](
_4 :: _3 :: _2 :: HNil,
the[BoundedRange[Inclusive[_4], Exclusive[_1]]].apply()
)
}
@Test
def reversed_leftOpenRightClosed_range_symbolic_syntax = {
import syntax.nat._
assertTypedEquals[
_3 :: _2 :: _1 :: HNil
](
_3 :: _2 :: _1 :: HNil,
the[_4 :--* _1].apply()
)
}
@Test
def reversed_leftOpenRightClosed_range_word_syntax = {
import ops.nat.BoundedRange
import BoundedRange.{Inclusive, Exclusive}
assertTypedEquals[
_3 :: _2 :: _1 :: HNil
](
_3 :: _2 :: _1 :: HNil,
the[BoundedRange[Exclusive[_4], Inclusive[_1]]].apply()
)
}
}
| rorygraves/perf_tester | corpus/shapeless/src/test/scala/shapeless/natranges.scala | Scala | apache-2.0 | 6,800 |
package kamkor
class Greeter(val greetMsg: String) {
def greet(to: String): String = s"$greetMsg $to"
}
| kamkor/scala-seed | src/main/scala/kamkor/Greeter.scala | Scala | mit | 109 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.plan.rules.dataSet
import org.apache.calcite.plan.volcano.RelSubset
import org.apache.calcite.plan.{Convention, RelOptRule, RelOptRuleCall, RelTraitSet}
import org.apache.calcite.rel.RelNode
import org.apache.calcite.rel.convert.ConverterRule
import org.apache.calcite.rel.logical.{LogicalFilter, LogicalCorrelate, LogicalTableFunctionScan}
import org.apache.calcite.rex.RexNode
import org.apache.flink.table.plan.nodes.dataset.{DataSetConvention, DataSetCorrelate}
/**
* Rule to convert a LogicalCorrelate into a DataSetCorrelate.
*/
class DataSetCorrelateRule
extends ConverterRule(
classOf[LogicalCorrelate],
Convention.NONE,
DataSetConvention.INSTANCE,
"DataSetCorrelateRule") {
override def matches(call: RelOptRuleCall): Boolean = {
val join: LogicalCorrelate = call.rel(0).asInstanceOf[LogicalCorrelate]
val right = join.getRight.asInstanceOf[RelSubset].getOriginal
right match {
// right node is a table function
case scan: LogicalTableFunctionScan => true
// a filter is pushed above the table function
case filter: LogicalFilter =>
filter
.getInput.asInstanceOf[RelSubset]
.getOriginal
.isInstanceOf[LogicalTableFunctionScan]
case _ => false
}
}
override def convert(rel: RelNode): RelNode = {
val join: LogicalCorrelate = rel.asInstanceOf[LogicalCorrelate]
val traitSet: RelTraitSet = rel.getTraitSet.replace(DataSetConvention.INSTANCE)
val convInput: RelNode = RelOptRule.convert(join.getInput(0), DataSetConvention.INSTANCE)
val right: RelNode = join.getInput(1)
def convertToCorrelate(relNode: RelNode, condition: Option[RexNode]): DataSetCorrelate = {
relNode match {
case rel: RelSubset =>
convertToCorrelate(rel.getRelList.get(0), condition)
case filter: LogicalFilter =>
convertToCorrelate(
filter.getInput.asInstanceOf[RelSubset].getOriginal,
Some(filter.getCondition))
case scan: LogicalTableFunctionScan =>
new DataSetCorrelate(
rel.getCluster,
traitSet,
convInput,
scan,
condition,
rel.getRowType,
join.getRowType,
join.getJoinType,
description)
}
}
convertToCorrelate(right, None)
}
}
object DataSetCorrelateRule {
val INSTANCE: RelOptRule = new DataSetCorrelateRule
}
| DieBauer/flink | flink-libraries/flink-table/src/main/scala/org/apache/flink/table/plan/rules/dataSet/DataSetCorrelateRule.scala | Scala | apache-2.0 | 3,377 |
/*
* Copyright (C) 2017
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package code
package model
import net.liftweb.common._
import net.liftweb.mapper._
import net.liftweb.util._
import net.liftweb.http._
import Helpers._
import code.mapper.MappedList
import com.roundeights.hasher.Implicits._
class SortedTopicCookie extends LongKeyedMapper[SortedTopicCookie] with IdPK {
def getSingleton = SortedTopicCookie
object hash extends MappedString(this, 64) {
override val defaultValue = ""
}
object ipaccess extends MappedString(this, 25)
object useragent extends MappedString(this, 2000)
object topics extends MappedList(this, 2000) {
override def defaultValue = "[]"
}
}
object SortedTopicCookie extends SortedTopicCookie with LongKeyedMetaMapper[SortedTopicCookie] {
override def dbTableName = "sortedtopiccookie"
def findOrCreateCookie(i_hash: String) = SortedTopicCookie.find(By(SortedTopicCookie.hash, i_hash)) match {
case Full(cookie) => cookie
case _ => SortedTopicCookie.create.hash(randomString(SortedTopicCookie.hash.maxLen).crc32)
}
} | EasterTheBunny/ourdistrict | src/main/scala/code/model/SortedTopicCookie.scala | Scala | gpl-3.0 | 1,684 |
package inloopio.math.vector
/**
* Element of vector
* By add 'index' field in Element, we can easily to store sparse vector.
*
* sparse vector pay-off the searching performance (think about set(index, value),
* get(index)), so, we'd better to use DefaultVec as default.
*
* @author Caoyuan Deng
*/
final case class VecItem(index: Int, value: Double) extends Serializable
| dcaoyuan/inloopio-libs | inloopio-math/src/main/scala/inloopio/math/vector/VecItem.scala | Scala | bsd-3-clause | 381 |
package com.arcusys.valamis.lesson.scorm.model.manifest
/**
* A rule executed each time an SCO activity terminates
* @param conditions Set of conditions that define whether the action will be applied or not
* @param action An action to perform if conditions hit true
*/
class PostConditionRule(conditions: RuleConditionSet, val action: PostConditionAction.Value) extends ConditionRule(conditions) | igor-borisov/valamis | valamis-scorm-lesson/src/main/scala/com/arcusys/valamis/lesson/scorm/model/manifest/PostConditionRule.scala | Scala | gpl-3.0 | 407 |
package benchmark.oneServer.multipleClients
import benchmark.utils.Launcher
import benchmark.utils.writer.TransactionLifeCycleWriter
import scala.collection.mutable.ArrayBuffer
object MultipleTransactionLifeCyclesTest extends Launcher {
override val clients = 4
override val streamName = "stream"
private val txnCount = 1000000
private val dataSize = 1
private val clientThreads = ArrayBuffer[Thread]()
private val rand = new scala.util.Random()
def main(args: Array[String]) {
// launch()
val streamID = createStream(streamName, clients)
launchClients(streamID)
System.exit(0)
}
override def launchClients(streamID: Int): Unit = {
(1 to clients).foreach(x => {
val thread = new Thread(new Runnable {
override def run(): Unit = {
val filename = rand.nextInt(100) + s"_${txnCount}TransactionLifeCycleWriterOSMC.csv"
new TransactionLifeCycleWriter(streamID, x).run(txnCount, dataSize, filename)
}
})
clientThreads.+=(thread)
})
clientThreads.foreach(_.start())
clientThreads.foreach(_.join())
}
}
| bwsw/tstreams-transaction-server | src/test/scala/benchmark/oneServer/multipleClients/MultipleTransactionLifeCyclesTest.scala | Scala | apache-2.0 | 1,105 |
package memcached
class BusyClientException
extends IllegalStateException("This client already has pending requests, you have to wait until they are finished")
| daewon/til | scala/netty/src/main/scala/memcached/BusyClientException.scala | Scala | mpl-2.0 | 163 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.server
import java.io.File
import java.util.{Optional, Properties}
import java.util.concurrent.atomic.AtomicBoolean
import kafka.cluster.Partition
import kafka.log.{Log, LogManager, LogOffsetSnapshot}
import kafka.utils._
import kafka.zk.KafkaZkClient
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.metrics.Metrics
import org.apache.kafka.common.record.{CompressionType, MemoryRecords, SimpleRecord}
import org.apache.kafka.common.requests.FetchRequest.PartitionData
import org.easymock.EasyMock
import EasyMock._
import org.junit.Assert._
import org.junit.{After, Test}
import scala.jdk.CollectionConverters._
class ReplicaManagerQuotasTest {
val configs = TestUtils.createBrokerConfigs(2, TestUtils.MockZkConnect).map(KafkaConfig.fromProps(_, new Properties()))
val time = new MockTime
val metrics = new Metrics
val record = new SimpleRecord("some-data-in-a-message".getBytes())
val topicPartition1 = new TopicPartition("test-topic", 1)
val topicPartition2 = new TopicPartition("test-topic", 2)
val fetchInfo = Seq(
topicPartition1 -> new PartitionData(0, 0, 100, Optional.empty()),
topicPartition2 -> new PartitionData(0, 0, 100, Optional.empty()))
var replicaManager: ReplicaManager = _
@Test
def shouldExcludeSubsequentThrottledPartitions(): Unit = {
setUpMocks(fetchInfo)
val followerReplicaId = configs.last.brokerId
val quota = mockQuota(1000000)
expect(quota.isQuotaExceeded).andReturn(false).once()
expect(quota.isQuotaExceeded).andReturn(true).once()
replay(quota)
val fetch = replicaManager.readFromLocalLog(
replicaId = followerReplicaId,
fetchOnlyFromLeader = true,
fetchIsolation = FetchHighWatermark,
fetchMaxBytes = Int.MaxValue,
hardMaxBytesLimit = false,
readPartitionInfo = fetchInfo,
quota = quota,
clientMetadata = None)
assertEquals("Given two partitions, with only one throttled, we should get the first", 1,
fetch.find(_._1 == topicPartition1).get._2.info.records.batches.asScala.size)
assertEquals("But we shouldn't get the second", 0,
fetch.find(_._1 == topicPartition2).get._2.info.records.batches.asScala.size)
}
@Test
def shouldGetNoMessagesIfQuotasExceededOnSubsequentPartitions(): Unit = {
setUpMocks(fetchInfo)
val followerReplicaId = configs.last.brokerId
val quota = mockQuota(1000000)
expect(quota.isQuotaExceeded).andReturn(true).once()
expect(quota.isQuotaExceeded).andReturn(true).once()
replay(quota)
val fetch = replicaManager.readFromLocalLog(
replicaId = followerReplicaId,
fetchOnlyFromLeader = true,
fetchIsolation = FetchHighWatermark,
fetchMaxBytes = Int.MaxValue,
hardMaxBytesLimit = false,
readPartitionInfo = fetchInfo,
quota = quota,
clientMetadata = None)
assertEquals("Given two partitions, with both throttled, we should get no messages", 0,
fetch.find(_._1 == topicPartition1).get._2.info.records.batches.asScala.size)
assertEquals("Given two partitions, with both throttled, we should get no messages", 0,
fetch.find(_._1 == topicPartition2).get._2.info.records.batches.asScala.size)
}
@Test
def shouldGetBothMessagesIfQuotasAllow(): Unit = {
setUpMocks(fetchInfo)
val followerReplicaId = configs.last.brokerId
val quota = mockQuota(1000000)
expect(quota.isQuotaExceeded).andReturn(false).once()
expect(quota.isQuotaExceeded).andReturn(false).once()
replay(quota)
val fetch = replicaManager.readFromLocalLog(
replicaId = followerReplicaId,
fetchOnlyFromLeader = true,
fetchIsolation = FetchHighWatermark,
fetchMaxBytes = Int.MaxValue,
hardMaxBytesLimit = false,
readPartitionInfo = fetchInfo,
quota = quota,
clientMetadata = None)
assertEquals("Given two partitions, with both non-throttled, we should get both messages", 1,
fetch.find(_._1 == topicPartition1).get._2.info.records.batches.asScala.size)
assertEquals("Given two partitions, with both non-throttled, we should get both messages", 1,
fetch.find(_._1 == topicPartition2).get._2.info.records.batches.asScala.size)
}
@Test
def shouldIncludeInSyncThrottledReplicas(): Unit = {
setUpMocks(fetchInfo, bothReplicasInSync = true)
val followerReplicaId = configs.last.brokerId
val quota = mockQuota(1000000)
expect(quota.isQuotaExceeded).andReturn(false).once()
expect(quota.isQuotaExceeded).andReturn(true).once()
replay(quota)
val fetch = replicaManager.readFromLocalLog(
replicaId = followerReplicaId,
fetchOnlyFromLeader = true,
fetchIsolation = FetchHighWatermark,
fetchMaxBytes = Int.MaxValue,
hardMaxBytesLimit = false,
readPartitionInfo = fetchInfo,
quota = quota,
clientMetadata = None)
assertEquals("Given two partitions, with only one throttled, we should get the first", 1,
fetch.find(_._1 == topicPartition1).get._2.info.records.batches.asScala.size)
assertEquals("But we should get the second too since it's throttled but in sync", 1,
fetch.find(_._1 == topicPartition2).get._2.info.records.batches.asScala.size)
}
@Test
def testCompleteInDelayedFetchWithReplicaThrottling(): Unit = {
// Set up DelayedFetch where there is data to return to a follower replica, either in-sync or out of sync
def setupDelayedFetch(isReplicaInSync: Boolean): DelayedFetch = {
val endOffsetMetadata = LogOffsetMetadata(messageOffset = 100L, segmentBaseOffset = 0L, relativePositionInSegment = 500)
val partition: Partition = EasyMock.createMock(classOf[Partition])
val offsetSnapshot = LogOffsetSnapshot(
logStartOffset = 0L,
logEndOffset = endOffsetMetadata,
highWatermark = endOffsetMetadata,
lastStableOffset = endOffsetMetadata)
EasyMock.expect(partition.fetchOffsetSnapshot(Optional.empty(), fetchOnlyFromLeader = true))
.andReturn(offsetSnapshot)
val replicaManager: ReplicaManager = EasyMock.createMock(classOf[ReplicaManager])
EasyMock.expect(replicaManager.getPartitionOrException(
EasyMock.anyObject[TopicPartition], EasyMock.anyBoolean()))
.andReturn(partition).anyTimes()
EasyMock.expect(replicaManager.shouldLeaderThrottle(EasyMock.anyObject[ReplicaQuota], EasyMock.anyObject[Partition], EasyMock.anyObject[Int]))
.andReturn(!isReplicaInSync).anyTimes()
EasyMock.expect(partition.getReplica(1)).andReturn(None)
EasyMock.replay(replicaManager, partition)
val tp = new TopicPartition("t1", 0)
val fetchPartitionStatus = FetchPartitionStatus(LogOffsetMetadata(messageOffset = 50L, segmentBaseOffset = 0L,
relativePositionInSegment = 250), new PartitionData(50, 0, 1, Optional.empty()))
val fetchMetadata = FetchMetadata(fetchMinBytes = 1,
fetchMaxBytes = 1000,
hardMaxBytesLimit = true,
fetchOnlyLeader = true,
fetchIsolation = FetchLogEnd,
isFromFollower = true,
replicaId = 1,
fetchPartitionStatus = List((tp, fetchPartitionStatus))
)
new DelayedFetch(delayMs = 600, fetchMetadata = fetchMetadata, replicaManager = replicaManager,
quota = null, clientMetadata = None, responseCallback = null) {
override def forceComplete(): Boolean = true
}
}
assertTrue("In sync replica should complete", setupDelayedFetch(isReplicaInSync = true).tryComplete())
assertFalse("Out of sync replica should not complete", setupDelayedFetch(isReplicaInSync = false).tryComplete())
}
def setUpMocks(fetchInfo: Seq[(TopicPartition, PartitionData)], record: SimpleRecord = this.record,
bothReplicasInSync: Boolean = false): Unit = {
val zkClient: KafkaZkClient = EasyMock.createMock(classOf[KafkaZkClient])
val scheduler: KafkaScheduler = createNiceMock(classOf[KafkaScheduler])
//Create log which handles both a regular read and a 0 bytes read
val log: Log = createNiceMock(classOf[Log])
expect(log.logStartOffset).andReturn(0L).anyTimes()
expect(log.logEndOffset).andReturn(20L).anyTimes()
expect(log.highWatermark).andReturn(5).anyTimes()
expect(log.lastStableOffset).andReturn(5).anyTimes()
expect(log.logEndOffsetMetadata).andReturn(LogOffsetMetadata(20L)).anyTimes()
//if we ask for len 1 return a message
expect(log.read(anyObject(),
maxLength = geq(1),
isolation = anyObject(),
minOneMessage = anyBoolean())).andReturn(
FetchDataInfo(
LogOffsetMetadata(0L, 0L, 0),
MemoryRecords.withRecords(CompressionType.NONE, record)
)).anyTimes()
//if we ask for len = 0, return 0 messages
expect(log.read(anyObject(),
maxLength = EasyMock.eq(0),
isolation = anyObject(),
minOneMessage = anyBoolean())).andReturn(
FetchDataInfo(
LogOffsetMetadata(0L, 0L, 0),
MemoryRecords.EMPTY
)).anyTimes()
replay(log)
//Create log manager
val logManager: LogManager = createMock(classOf[LogManager])
//Return the same log for each partition as it doesn't matter
expect(logManager.getLog(anyObject(), anyBoolean())).andReturn(Some(log)).anyTimes()
expect(logManager.liveLogDirs).andReturn(Array.empty[File]).anyTimes()
replay(logManager)
val leaderBrokerId = configs.head.brokerId
replicaManager = new ReplicaManager(configs.head, metrics, time, zkClient, scheduler, logManager,
new AtomicBoolean(false), QuotaFactory.instantiate(configs.head, metrics, time, ""),
new BrokerTopicStats, new MetadataCache(leaderBrokerId), new LogDirFailureChannel(configs.head.logDirs.size))
//create the two replicas
for ((p, _) <- fetchInfo) {
val partition = replicaManager.createPartition(p)
log.updateHighWatermark(5)
partition.leaderReplicaIdOpt = Some(leaderBrokerId)
partition.setLog(log, isFutureLog = false)
partition.updateAssignmentAndIsr(
assignment = Seq(leaderBrokerId, configs.last.brokerId),
isr = if (bothReplicasInSync) Set(leaderBrokerId, configs.last.brokerId) else Set(leaderBrokerId),
addingReplicas = Seq.empty,
removingReplicas = Seq.empty
)
}
}
@After
def tearDown(): Unit = {
if (replicaManager != null)
replicaManager.shutdown(false)
metrics.close()
}
def mockQuota(bound: Long): ReplicaQuota = {
val quota: ReplicaQuota = createMock(classOf[ReplicaQuota])
expect(quota.isThrottled(anyObject())).andReturn(true).anyTimes()
quota
}
}
| sslavic/kafka | core/src/test/scala/unit/kafka/server/ReplicaManagerQuotasTest.scala | Scala | apache-2.0 | 11,434 |
package zeroformatter
import _root_.scalaz._
package object scalaz {
implicit val formatterInvariantFunctor: InvariantFunctor[Formatter] = new InvariantFunctor[Formatter] {
override def xmap[A, B](fa: Formatter[A], f: A => B, g: B => A) = fa.xmap(f, g)
}
@inline
implicit def idFormatter[T](implicit F: Formatter[T]): Formatter[Id.Id[T]] = F
implicit def maybeFormatter[T: Formatter]: Formatter[Maybe[T]] =
Formatter[Option[T]].xmap(o => Maybe.fromOption(o), _.toOption)
implicit def ilistFormatter[A](implicit F: Formatter[A]): Formatter[IList[A]] = new Formatter[IList[A]] {
override val length = None
override def serialize(encoder: Encoder, offset: Int, value: IList[A]) =
if(value == null) encoder.writeInt(offset, -1)
else {
val length = value.length
val byteSize =
F.length.fold(encoder.writeInt(offset, length))(
l => {
encoder.ensureCapacity(offset, 4 + l * length)
encoder.writeIntUnsafe(offset, length)
}
)
value.foldLeft(byteSize){ case (acc, v) =>
acc + F.serialize(encoder, offset + acc, v)
}
}
override def deserialize(decoder: Decoder) = {
val length = decoder.readInt()
if(length == -1) null
else if(length < -1) throw FormatException(decoder.offset - 4, s"Invalid List length($length).")
else {
var list: IList[A] = IList.empty
var i = 0
while(i < length) {
list ::= F.deserialize(decoder)
i += 1
}
list.reverse
}
}
}
implicit def eagerListFormatter[A](implicit
F: Formatter[A]
): Formatter[LazyList[Id.Id, A]] =
LazyList.lazyListFormatter[Id.Id, A](F, F)
}
| pocketberserker/scala-zero-formatter | scalaz/src/main/scala/zeroformatter/scalaz/package.scala | Scala | mit | 1,758 |
import scala.tools.partest.ReplTest
object Test extends ReplTest {
def code = """
|case class X(s: String)
|scala.reflect.runtime.universe.typeOf[X]
|""".stripMargin
}
| yusuke2255/dotty | tests/pending/run/t6086-repl.scala | Scala | bsd-3-clause | 183 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib.util
import org.scalatest.exceptions.TestFailedException
import org.apache.spark.SparkFunSuite
import org.apache.spark.mllib.linalg.Vectors
import org.apache.spark.mllib.util.TestingUtils._
class TestingUtilsSuite extends SparkFunSuite {
test("Comparing doubles using relative error.") {
assert(23.1 ~== 23.52 relTol 0.02)
assert(23.1 ~== 22.74 relTol 0.02)
assert(23.1 ~= 23.52 relTol 0.02)
assert(23.1 ~= 22.74 relTol 0.02)
assert(!(23.1 !~= 23.52 relTol 0.02))
assert(!(23.1 !~= 22.74 relTol 0.02))
// Should throw exception with message when test fails.
intercept[TestFailedException](23.1 !~== 23.52 relTol 0.02)
intercept[TestFailedException](23.1 !~== 22.74 relTol 0.02)
intercept[TestFailedException](23.1 ~== 23.63 relTol 0.02)
intercept[TestFailedException](23.1 ~== 22.34 relTol 0.02)
assert(23.1 !~== 23.63 relTol 0.02)
assert(23.1 !~== 22.34 relTol 0.02)
assert(23.1 !~= 23.63 relTol 0.02)
assert(23.1 !~= 22.34 relTol 0.02)
assert(!(23.1 ~= 23.63 relTol 0.02))
assert(!(23.1 ~= 22.34 relTol 0.02))
// Comparing against zero should fail the test and throw exception with message
// saying that the relative error is meaningless in this situation.
intercept[TestFailedException](0.1 ~== 0.0 relTol 0.032)
intercept[TestFailedException](0.1 ~= 0.0 relTol 0.032)
intercept[TestFailedException](0.1 !~== 0.0 relTol 0.032)
intercept[TestFailedException](0.1 !~= 0.0 relTol 0.032)
intercept[TestFailedException](0.0 ~== 0.1 relTol 0.032)
intercept[TestFailedException](0.0 ~= 0.1 relTol 0.032)
intercept[TestFailedException](0.0 !~== 0.1 relTol 0.032)
intercept[TestFailedException](0.0 !~= 0.1 relTol 0.032)
// Comparisons of numbers very close to zero.
assert(10 * Double.MinPositiveValue ~== 9.5 * Double.MinPositiveValue relTol 0.01)
assert(10 * Double.MinPositiveValue !~== 11 * Double.MinPositiveValue relTol 0.01)
assert(-Double.MinPositiveValue ~== 1.18 * -Double.MinPositiveValue relTol 0.012)
assert(-Double.MinPositiveValue ~== 1.38 * -Double.MinPositiveValue relTol 0.012)
}
test("Comparing doubles using absolute error.") {
assert(17.8 ~== 17.99 absTol 0.2)
assert(17.8 ~== 17.61 absTol 0.2)
assert(17.8 ~= 17.99 absTol 0.2)
assert(17.8 ~= 17.61 absTol 0.2)
assert(!(17.8 !~= 17.99 absTol 0.2))
assert(!(17.8 !~= 17.61 absTol 0.2))
// Should throw exception with message when test fails.
intercept[TestFailedException](17.8 !~== 17.99 absTol 0.2)
intercept[TestFailedException](17.8 !~== 17.61 absTol 0.2)
intercept[TestFailedException](17.8 ~== 18.01 absTol 0.2)
intercept[TestFailedException](17.8 ~== 17.59 absTol 0.2)
assert(17.8 !~== 18.01 absTol 0.2)
assert(17.8 !~== 17.59 absTol 0.2)
assert(17.8 !~= 18.01 absTol 0.2)
assert(17.8 !~= 17.59 absTol 0.2)
assert(!(17.8 ~= 18.01 absTol 0.2))
assert(!(17.8 ~= 17.59 absTol 0.2))
// Comparisons of numbers very close to zero, and both side of zeros
assert(
Double.MinPositiveValue ~== 4 * Double.MinPositiveValue absTol 5 * Double.MinPositiveValue)
assert(
Double.MinPositiveValue !~== 6 * Double.MinPositiveValue absTol 5 * Double.MinPositiveValue)
assert(
-Double.MinPositiveValue ~== 3 * Double.MinPositiveValue absTol 5 * Double.MinPositiveValue)
assert(
Double.MinPositiveValue !~== -4 * Double.MinPositiveValue absTol 5 * Double.MinPositiveValue)
}
test("Comparing vectors using relative error.") {
// Comparisons of two dense vectors
assert(Vectors.dense(Array(3.1, 3.5)) ~== Vectors.dense(Array(3.130, 3.534)) relTol 0.01)
assert(Vectors.dense(Array(3.1, 3.5)) !~== Vectors.dense(Array(3.135, 3.534)) relTol 0.01)
assert(Vectors.dense(Array(3.1, 3.5)) ~= Vectors.dense(Array(3.130, 3.534)) relTol 0.01)
assert(Vectors.dense(Array(3.1, 3.5)) !~= Vectors.dense(Array(3.135, 3.534)) relTol 0.01)
assert(!(Vectors.dense(Array(3.1, 3.5)) !~= Vectors.dense(Array(3.130, 3.534)) relTol 0.01))
assert(!(Vectors.dense(Array(3.1, 3.5)) ~= Vectors.dense(Array(3.135, 3.534)) relTol 0.01))
// Should throw exception with message when test fails.
intercept[TestFailedException](
Vectors.dense(Array(3.1, 3.5)) !~== Vectors.dense(Array(3.130, 3.534)) relTol 0.01)
intercept[TestFailedException](
Vectors.dense(Array(3.1, 3.5)) ~== Vectors.dense(Array(3.135, 3.534)) relTol 0.01)
// Comparing against zero should fail the test and throw exception with message
// saying that the relative error is meaningless in this situation.
intercept[TestFailedException](
Vectors.dense(Array(3.1, 0.01)) ~== Vectors.dense(Array(3.13, 0.0)) relTol 0.01)
intercept[TestFailedException](
Vectors.dense(Array(3.1, 0.01)) ~== Vectors.sparse(2, Array(0), Array(3.13)) relTol 0.01)
// Comparisons of two sparse vectors
assert(Vectors.dense(Array(3.1, 3.5)) ~==
Vectors.sparse(2, Array(0, 1), Array(3.130, 3.534)) relTol 0.01)
assert(Vectors.dense(Array(3.1, 3.5)) !~==
Vectors.sparse(2, Array(0, 1), Array(3.135, 3.534)) relTol 0.01)
}
test("Comparing vectors using absolute error.") {
// Comparisons of two dense vectors
assert(Vectors.dense(Array(3.1, 3.5, 0.0)) ~==
Vectors.dense(Array(3.1 + 1E-8, 3.5 + 2E-7, 1E-8)) absTol 1E-6)
assert(Vectors.dense(Array(3.1, 3.5, 0.0)) !~==
Vectors.dense(Array(3.1 + 1E-5, 3.5 + 2E-7, 1 + 1E-3)) absTol 1E-6)
assert(Vectors.dense(Array(3.1, 3.5, 0.0)) ~=
Vectors.dense(Array(3.1 + 1E-8, 3.5 + 2E-7, 1E-8)) absTol 1E-6)
assert(Vectors.dense(Array(3.1, 3.5, 0.0)) !~=
Vectors.dense(Array(3.1 + 1E-5, 3.5 + 2E-7, 1 + 1E-3)) absTol 1E-6)
assert(!(Vectors.dense(Array(3.1, 3.5, 0.0)) !~=
Vectors.dense(Array(3.1 + 1E-8, 3.5 + 2E-7, 1E-8)) absTol 1E-6))
assert(!(Vectors.dense(Array(3.1, 3.5, 0.0)) ~=
Vectors.dense(Array(3.1 + 1E-5, 3.5 + 2E-7, 1 + 1E-3)) absTol 1E-6))
// Should throw exception with message when test fails.
intercept[TestFailedException](Vectors.dense(Array(3.1, 3.5, 0.0)) !~==
Vectors.dense(Array(3.1 + 1E-8, 3.5 + 2E-7, 1E-8)) absTol 1E-6)
intercept[TestFailedException](Vectors.dense(Array(3.1, 3.5, 0.0)) ~==
Vectors.dense(Array(3.1 + 1E-5, 3.5 + 2E-7, 1 + 1E-3)) absTol 1E-6)
// Comparisons of two sparse vectors
assert(Vectors.sparse(3, Array(0, 2), Array(3.1, 2.4)) ~==
Vectors.sparse(3, Array(0, 2), Array(3.1 + 1E-8, 2.4 + 1E-7)) absTol 1E-6)
assert(Vectors.sparse(3, Array(0, 2), Array(3.1 + 1E-8, 2.4 + 1E-7)) ~==
Vectors.sparse(3, Array(0, 2), Array(3.1, 2.4)) absTol 1E-6)
assert(Vectors.sparse(3, Array(0, 2), Array(3.1, 2.4)) !~==
Vectors.sparse(3, Array(0, 2), Array(3.1 + 1E-3, 2.4)) absTol 1E-6)
assert(Vectors.sparse(3, Array(0, 2), Array(3.1 + 1E-3, 2.4)) !~==
Vectors.sparse(3, Array(0, 2), Array(3.1, 2.4)) absTol 1E-6)
// Comparisons of a dense vector and a sparse vector
assert(Vectors.sparse(3, Array(0, 2), Array(3.1, 2.4)) ~==
Vectors.dense(Array(3.1 + 1E-8, 0, 2.4 + 1E-7)) absTol 1E-6)
assert(Vectors.dense(Array(3.1 + 1E-8, 0, 2.4 + 1E-7)) ~==
Vectors.sparse(3, Array(0, 2), Array(3.1, 2.4)) absTol 1E-6)
assert(Vectors.sparse(3, Array(0, 2), Array(3.1, 2.4)) !~==
Vectors.dense(Array(3.1, 1E-3, 2.4)) absTol 1E-6)
}
}
| gioenn/xSpark | mllib/src/test/scala/org/apache/spark/mllib/util/TestingUtilsSuite.scala | Scala | apache-2.0 | 8,249 |
package util
import org.json4s._
import org.json4s.native.Serialization._
import org.scalatest.{FunSpec, ShouldMatchers}
import sample.Transfer.TransferRequest
import sample.util.JSONUtil
import sample.{Account, Person}
class ObjectJsonViewer extends FunSpec with ShouldMatchers {
implicit def json4sFormats: Formats = JSONUtil.formats
describe("ObjectJsonViewer"){
it("is just used to show json result of an scala object") {
println(write(TransferRequest(1, Account("yy", 100.0), Account("xx", 100.0), 50.0)))
}
it("can transform normal class to json") {
val p = new Person
p.name = "notyy"
p.age = 37
println(write(p))
}
}
}
| notyy/blogEngine | src/test/scala/util/ObjectJsonViewer.scala | Scala | apache-2.0 | 682 |
package tu.extensions.algorithms
import tu.extensions.Extensions
import tu.extensions.interfaces.{Generalizer, ContentModifier}
/**
*
* @author: Alexander Toschev
* Date: 12/10/12
* Time: 12:35 AM
*
*/
object Algorithms {
/**
* instatiate current generalizer algorithm
* @return
*/
def generalizer():ContentModifier =
{
return Extensions.load[Generalizer]()
}
}
| keskival/2 | extensions/src/main/scala/tu/extensions/algorithms/Algorithms.scala | Scala | gpl-3.0 | 398 |
package sylvestris.core
import spray.json._
object NodeManifest {
def apply[T : NodeManifest] = implicitly[NodeManifest[T]]
}
trait NodeManifest[T] {
implicit def tag: Tag
implicit def jsonFormat: JsonFormat[T]
}
| drostron/sylvestris | core/src/main/scala/sylvestris/core/NodeManifest.scala | Scala | mit | 222 |
package sh.webserver
import java.net.ServerSocket
import java.util.concurrent.{ExecutorService, Executors}
import sh.webserver.request.Request
import scala.annotation.tailrec
class Server(port: Int) {
def start() {
val server = new ServerSocket(port)
val pool = Executors.newFixedThreadPool(8)
listen(server, pool)
}
@tailrec
private def listen(server : ServerSocket,pool : ExecutorService) {
val socket = server.accept()
pool.execute(new RequestHandler(socket))
listen(server, pool)
}
}
| stefan-hering/scalaserver | src/main/sh/webserver/Server.scala | Scala | apache-2.0 | 527 |
package org.apache.spark.ml.feature
import com.collective.TestSparkContext
import org.apache.spark.sql.Row
import org.apache.spark.sql.types._
import org.scalatest.FlatSpec
class GatherEncoderSpec extends FlatSpec with TestSparkContext {
val schema = StructType(Seq(
StructField("cookie_id", StringType),
StructField("sites", ArrayType(StructType(Seq(
StructField("site", StringType),
StructField("impressions", LongType
))), containsNull = false))
))
val cookie1 = "cookie1"
val cookie2 = "cookie2"
val cookie3 = "cookie3"
val dataset = sqlContext.createDataFrame(sc.parallelize(
Seq.fill(250)(Row(cookie1, Array( // 250 * 2 = 500 // total: 500 // cover: 50%
Row("google.com", 12L),
Row("cnn.com", 14L)
))) ++
Seq.fill(100)(Row(cookie2, Array( // 100 * 3 = 300 // total: 800 // cover: 80%
Row("bbc.com", 20L),
Row("auto.com", 1L),
Row("moto.com", 3L)
))) ++
Seq.fill(80)(Row(cookie3, Array( // 80 // total: 880 // cover: 88%
Row("sport.com", 100L)
))) ++
Seq.fill(50)(Row(cookie3, Array( // 50 // total: 930 // cover: 93%
Row("netflix.com", 1L)
))) ++
Seq.fill(40)(Row(cookie3, Array( // 40 // total: 970 // cover: 97%
Row("amazon.com", 1L)
))) ++
Seq.fill(30)(Row(cookie3, Array( // 30 // total: 1000 // cover: 100%
Row("imdb.com", 1L)
)))
), schema)
val baseEncoder = new GatherEncoder()
.setInputCol("sites")
.setOutputCol("features")
.setKeyCol("site")
.setValueCol("impressions")
"Gather Encoder" should "collect all keys when cover is 100.0" in {
val encoder = baseEncoder.setCover(100.0)
val features = encoder.fit(dataset)
assert(features.modelKeys.length == 9)
}
it should "exclude imdb.com for 95% coverage" in {
val encoder = baseEncoder.setCover(95.0)
val features = encoder.fit(dataset)
assert(features.modelKeys.length == 8)
assert(!features.modelKeys.contains("imdb.com"))
}
it should "exclude amazon.com for 90% coverage" in {
val encoder = baseEncoder.setCover(90.0)
val features = encoder.fit(dataset)
assert(features.modelKeys.length == 7)
assert(!features.modelKeys.contains("amazon.com"))
}
it should "exclude netflix.com for 85% coverage" in {
val encoder = baseEncoder.setCover(85.0)
val features = encoder.fit(dataset)
assert(features.modelKeys.length == 6)
assert(!features.modelKeys.contains("netflix.com"))
}
it should "exclude sport.com for 75% coverage" in {
val encoder = baseEncoder.setCover(75.0)
val features = encoder.fit(dataset)
assert(features.modelKeys.length == 5)
assert(!features.modelKeys.contains("sport.com"))
}
}
| codeaudit/spark-ext | sparkext-mllib/src/test/scala/org/apache/spark/ml/feature/GatherEncoderSpec.scala | Scala | apache-2.0 | 2,792 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.tree
import java.util.Locale
import scala.util.Try
import org.apache.spark.annotation.Since
import org.apache.spark.ml.PredictorParams
import org.apache.spark.ml.param._
import org.apache.spark.ml.param.shared._
import org.apache.spark.ml.util.SchemaUtils
import org.apache.spark.mllib.tree.configuration.{Algo => OldAlgo, BoostingStrategy => OldBoostingStrategy, Strategy => OldStrategy}
import org.apache.spark.mllib.tree.impurity.{Entropy => OldEntropy, Gini => OldGini, Impurity => OldImpurity, Variance => OldVariance}
import org.apache.spark.mllib.tree.loss.{AbsoluteError => OldAbsoluteError, ClassificationLoss => OldClassificationLoss, LogLoss => OldLogLoss, Loss => OldLoss, SquaredError => OldSquaredError}
import org.apache.spark.sql.types.{DataType, DoubleType, StructType}
/**
* Parameters for Decision Tree-based algorithms.
*
* Note: Marked as private and DeveloperApi since this may be made public in the future.
*/
private[ml] trait DecisionTreeParams extends PredictorParams
with HasCheckpointInterval with HasSeed with HasWeightCol {
/**
* Maximum depth of the tree (>= 0).
* E.g., depth 0 means 1 leaf node; depth 1 means 1 internal node + 2 leaf nodes.
* (default = 5)
* @group param
*/
final val maxDepth: IntParam =
new IntParam(this, "maxDepth", "Maximum depth of the tree. (>= 0)" +
" E.g., depth 0 means 1 leaf node; depth 1 means 1 internal node + 2 leaf nodes.",
ParamValidators.gtEq(0))
/**
* Maximum number of bins used for discretizing continuous features and for choosing how to split
* on features at each node. More bins give higher granularity.
* Must be >= 2 and >= number of categories in any categorical feature.
* (default = 32)
* @group param
*/
final val maxBins: IntParam = new IntParam(this, "maxBins", "Max number of bins for" +
" discretizing continuous features. Must be >=2 and >= number of categories for any" +
" categorical feature.", ParamValidators.gtEq(2))
/**
* Minimum number of instances each child must have after split.
* If a split causes the left or right child to have fewer than minInstancesPerNode,
* the split will be discarded as invalid.
* Should be >= 1.
* (default = 1)
* @group param
*/
final val minInstancesPerNode: IntParam = new IntParam(this, "minInstancesPerNode", "Minimum" +
" number of instances each child must have after split. If a split causes the left or right" +
" child to have fewer than minInstancesPerNode, the split will be discarded as invalid." +
" Should be >= 1.", ParamValidators.gtEq(1))
/**
* Minimum fraction of the weighted sample count that each child must have after split.
* If a split causes the fraction of the total weight in the left or right child to be less than
* minWeightFractionPerNode, the split will be discarded as invalid.
* Should be in the interval [0.0, 0.5).
* (default = 0.0)
* @group param
*/
final val minWeightFractionPerNode: DoubleParam = new DoubleParam(this,
"minWeightFractionPerNode", "Minimum fraction of the weighted sample count that each child " +
"must have after split. If a split causes the fraction of the total weight in the left or " +
"right child to be less than minWeightFractionPerNode, the split will be discarded as " +
"invalid. Should be in interval [0.0, 0.5)",
ParamValidators.inRange(0.0, 0.5, lowerInclusive = true, upperInclusive = false))
/**
* Minimum information gain for a split to be considered at a tree node.
* Should be >= 0.0.
* (default = 0.0)
* @group param
*/
final val minInfoGain: DoubleParam = new DoubleParam(this, "minInfoGain",
"Minimum information gain for a split to be considered at a tree node.",
ParamValidators.gtEq(0.0))
/**
* Maximum memory in MB allocated to histogram aggregation. If too small, then 1 node will be
* split per iteration, and its aggregates may exceed this size.
* (default = 256 MB)
* @group expertParam
*/
final val maxMemoryInMB: IntParam = new IntParam(this, "maxMemoryInMB",
"Maximum memory in MB allocated to histogram aggregation.",
ParamValidators.gtEq(0))
/**
* If false, the algorithm will pass trees to executors to match instances with nodes.
* If true, the algorithm will cache node IDs for each instance.
* Caching can speed up training of deeper trees. Users can set how often should the
* cache be checkpointed or disable it by setting checkpointInterval.
* (default = false)
* @group expertParam
*/
final val cacheNodeIds: BooleanParam = new BooleanParam(this, "cacheNodeIds", "If false, the" +
" algorithm will pass trees to executors to match instances with nodes. If true, the" +
" algorithm will cache node IDs for each instance. Caching can speed up training of deeper" +
" trees.")
setDefault(maxDepth -> 5, maxBins -> 32, minInstancesPerNode -> 1,
minWeightFractionPerNode -> 0.0, minInfoGain -> 0.0, maxMemoryInMB -> 256,
cacheNodeIds -> false, checkpointInterval -> 10)
/** @group getParam */
final def getMaxDepth: Int = $(maxDepth)
/** @group getParam */
final def getMaxBins: Int = $(maxBins)
/** @group getParam */
final def getMinInstancesPerNode: Int = $(minInstancesPerNode)
/** @group getParam */
final def getMinWeightFractionPerNode: Double = $(minWeightFractionPerNode)
/** @group getParam */
final def getMinInfoGain: Double = $(minInfoGain)
/** @group expertGetParam */
final def getMaxMemoryInMB: Int = $(maxMemoryInMB)
/** @group expertGetParam */
final def getCacheNodeIds: Boolean = $(cacheNodeIds)
/** (private[ml]) Create a Strategy instance to use with the old API. */
private[ml] def getOldStrategy(
categoricalFeatures: Map[Int, Int],
numClasses: Int,
oldAlgo: OldAlgo.Algo,
oldImpurity: OldImpurity,
subsamplingRate: Double): OldStrategy = {
val strategy = OldStrategy.defaultStrategy(oldAlgo)
strategy.impurity = oldImpurity
strategy.checkpointInterval = getCheckpointInterval
strategy.maxBins = getMaxBins
strategy.maxDepth = getMaxDepth
strategy.maxMemoryInMB = getMaxMemoryInMB
strategy.minInfoGain = getMinInfoGain
strategy.minInstancesPerNode = getMinInstancesPerNode
strategy.minWeightFractionPerNode = getMinWeightFractionPerNode
strategy.useNodeIdCache = getCacheNodeIds
strategy.numClasses = numClasses
strategy.categoricalFeaturesInfo = categoricalFeatures
strategy.subsamplingRate = subsamplingRate
strategy
}
}
/**
* Parameters for Decision Tree-based classification algorithms.
*/
private[ml] trait TreeClassifierParams extends Params {
/**
* Criterion used for information gain calculation (case-insensitive).
* Supported: "entropy" and "gini".
* (default = gini)
* @group param
*/
final val impurity: Param[String] = new Param[String](this, "impurity", "Criterion used for" +
" information gain calculation (case-insensitive). Supported options:" +
s" ${TreeClassifierParams.supportedImpurities.mkString(", ")}",
(value: String) =>
TreeClassifierParams.supportedImpurities.contains(value.toLowerCase(Locale.ROOT)))
setDefault(impurity -> "gini")
/** @group getParam */
final def getImpurity: String = $(impurity).toLowerCase(Locale.ROOT)
/** Convert new impurity to old impurity. */
private[ml] def getOldImpurity: OldImpurity = {
getImpurity match {
case "entropy" => OldEntropy
case "gini" => OldGini
case _ =>
// Should never happen because of check in setter method.
throw new RuntimeException(
s"TreeClassifierParams was given unrecognized impurity: $impurity.")
}
}
}
private[ml] object TreeClassifierParams {
// These options should be lowercase.
final val supportedImpurities: Array[String] =
Array("entropy", "gini").map(_.toLowerCase(Locale.ROOT))
}
private[ml] trait DecisionTreeClassifierParams
extends DecisionTreeParams with TreeClassifierParams
private[ml] trait HasVarianceImpurity extends Params {
/**
* Criterion used for information gain calculation (case-insensitive).
* Supported: "variance".
* (default = variance)
* @group param
*/
final val impurity: Param[String] = new Param[String](this, "impurity", "Criterion used for" +
" information gain calculation (case-insensitive). Supported options:" +
s" ${HasVarianceImpurity.supportedImpurities.mkString(", ")}",
(value: String) =>
HasVarianceImpurity.supportedImpurities.contains(value.toLowerCase(Locale.ROOT)))
setDefault(impurity -> "variance")
/** @group getParam */
final def getImpurity: String = $(impurity).toLowerCase(Locale.ROOT)
/** Convert new impurity to old impurity. */
private[ml] def getOldImpurity: OldImpurity = {
getImpurity match {
case "variance" => OldVariance
case _ =>
// Should never happen because of check in setter method.
throw new RuntimeException(
s"TreeRegressorParams was given unrecognized impurity: $impurity")
}
}
}
private[ml] object HasVarianceImpurity {
// These options should be lowercase.
final val supportedImpurities: Array[String] =
Array("variance").map(_.toLowerCase(Locale.ROOT))
}
/**
* Parameters for Decision Tree-based regression algorithms.
*/
private[ml] trait TreeRegressorParams extends HasVarianceImpurity
private[ml] trait DecisionTreeRegressorParams extends DecisionTreeParams
with TreeRegressorParams with HasVarianceCol {
override protected def validateAndTransformSchema(
schema: StructType,
fitting: Boolean,
featuresDataType: DataType): StructType = {
val newSchema = super.validateAndTransformSchema(schema, fitting, featuresDataType)
if (isDefined(varianceCol) && $(varianceCol).nonEmpty) {
SchemaUtils.appendColumn(newSchema, $(varianceCol), DoubleType)
} else {
newSchema
}
}
}
private[spark] object TreeEnsembleParams {
// These options should be lowercase.
final val supportedFeatureSubsetStrategies: Array[String] =
Array("auto", "all", "onethird", "sqrt", "log2").map(_.toLowerCase(Locale.ROOT))
}
/**
* Parameters for Decision Tree-based ensemble algorithms.
*
* Note: Marked as private and DeveloperApi since this may be made public in the future.
*/
private[ml] trait TreeEnsembleParams extends DecisionTreeParams {
/**
* Fraction of the training data used for learning each decision tree, in range (0, 1].
* (default = 1.0)
* @group param
*/
final val subsamplingRate: DoubleParam = new DoubleParam(this, "subsamplingRate",
"Fraction of the training data used for learning each decision tree, in range (0, 1].",
ParamValidators.inRange(0, 1, lowerInclusive = false, upperInclusive = true))
setDefault(subsamplingRate -> 1.0)
/** @group getParam */
final def getSubsamplingRate: Double = $(subsamplingRate)
/**
* Create a Strategy instance to use with the old API.
* NOTE: The caller should set impurity and seed.
*/
private[ml] def getOldStrategy(
categoricalFeatures: Map[Int, Int],
numClasses: Int,
oldAlgo: OldAlgo.Algo,
oldImpurity: OldImpurity): OldStrategy = {
super.getOldStrategy(categoricalFeatures, numClasses, oldAlgo, oldImpurity, getSubsamplingRate)
}
/**
* The number of features to consider for splits at each tree node.
* Supported options:
* - "auto": Choose automatically for task:
* If numTrees == 1, set to "all."
* If numTrees > 1 (forest), set to "sqrt" for classification and
* to "onethird" for regression.
* - "all": use all features
* - "onethird": use 1/3 of the features
* - "sqrt": use sqrt(number of features)
* - "log2": use log2(number of features)
* - "n": when n is in the range (0, 1.0], use n * number of features. When n
* is in the range (1, number of features), use n features.
* (default = "auto")
*
* These various settings are based on the following references:
* - log2: tested in Breiman (2001)
* - sqrt: recommended by Breiman manual for random forests
* - The defaults of sqrt (classification) and onethird (regression) match the R randomForest
* package.
* @see <a href="http://www.stat.berkeley.edu/~breiman/randomforest2001.pdf">Breiman (2001)</a>
* @see <a href="http://www.stat.berkeley.edu/~breiman/Using_random_forests_V3.1.pdf">
* Breiman manual for random forests</a>
*
* @group param
*/
final val featureSubsetStrategy: Param[String] = new Param[String](this, "featureSubsetStrategy",
"The number of features to consider for splits at each tree node." +
s" Supported options: ${TreeEnsembleParams.supportedFeatureSubsetStrategies.mkString(", ")}" +
s", (0.0-1.0], [1-n].",
(value: String) =>
TreeEnsembleParams.supportedFeatureSubsetStrategies.contains(
value.toLowerCase(Locale.ROOT))
|| Try(value.toInt).filter(_ > 0).isSuccess
|| Try(value.toDouble).filter(_ > 0).filter(_ <= 1.0).isSuccess)
setDefault(featureSubsetStrategy -> "auto")
/** @group getParam */
final def getFeatureSubsetStrategy: String = $(featureSubsetStrategy).toLowerCase(Locale.ROOT)
}
/**
* Parameters for Random Forest algorithms.
*/
private[ml] trait RandomForestParams extends TreeEnsembleParams {
/**
* Number of trees to train (>= 1).
* If 1, then no bootstrapping is used. If > 1, then bootstrapping is done.
* TODO: Change to always do bootstrapping (simpler). SPARK-7130
* (default = 20)
*
* Note: The reason that we cannot add this to both GBT and RF (i.e. in TreeEnsembleParams)
* is the param `maxIter` controls how many trees a GBT has. The semantics in the algorithms
* are a bit different.
* @group param
*/
final val numTrees: IntParam = new IntParam(this, "numTrees", "Number of trees to train (>= 1)",
ParamValidators.gtEq(1))
setDefault(numTrees -> 20)
/** @group getParam */
final def getNumTrees: Int = $(numTrees)
}
private[ml] trait RandomForestClassifierParams
extends RandomForestParams with TreeClassifierParams
private[ml] trait RandomForestRegressorParams
extends RandomForestParams with TreeRegressorParams
/**
* Parameters for Gradient-Boosted Tree algorithms.
*
* Note: Marked as private and DeveloperApi since this may be made public in the future.
*/
private[ml] trait GBTParams extends TreeEnsembleParams with HasMaxIter with HasStepSize
with HasValidationIndicatorCol {
/**
* Threshold for stopping early when fit with validation is used.
* (This parameter is ignored when fit without validation is used.)
* The decision to stop early is decided based on this logic:
* If the current loss on the validation set is greater than 0.01, the diff
* of validation error is compared to relative tolerance which is
* validationTol * (current loss on the validation set).
* If the current loss on the validation set is less than or equal to 0.01,
* the diff of validation error is compared to absolute tolerance which is
* validationTol * 0.01.
* @group param
* @see validationIndicatorCol
*/
@Since("2.4.0")
final val validationTol: DoubleParam = new DoubleParam(this, "validationTol",
"Threshold for stopping early when fit with validation is used." +
"If the error rate on the validation input changes by less than the validationTol," +
"then learning will stop early (before `maxIter`)." +
"This parameter is ignored when fit without validation is used.",
ParamValidators.gtEq(0.0)
)
/** @group getParam */
@Since("2.4.0")
final def getValidationTol: Double = $(validationTol)
/**
* Param for Step size (a.k.a. learning rate) in interval (0, 1] for shrinking
* the contribution of each estimator.
* (default = 0.1)
* @group param
*/
final override val stepSize: DoubleParam = new DoubleParam(this, "stepSize", "Step size " +
"(a.k.a. learning rate) in interval (0, 1] for shrinking the contribution of each estimator.",
ParamValidators.inRange(0, 1, lowerInclusive = false, upperInclusive = true))
setDefault(maxIter -> 20, stepSize -> 0.1, validationTol -> 0.01)
setDefault(featureSubsetStrategy -> "all")
/** (private[ml]) Create a BoostingStrategy instance to use with the old API. */
private[ml] def getOldBoostingStrategy(
categoricalFeatures: Map[Int, Int],
oldAlgo: OldAlgo.Algo): OldBoostingStrategy = {
val strategy = super.getOldStrategy(categoricalFeatures, numClasses = 2, oldAlgo, OldVariance)
// NOTE: The old API does not support "seed" so we ignore it.
new OldBoostingStrategy(strategy, getOldLossType, getMaxIter, getStepSize, getValidationTol)
}
/** Get old Gradient Boosting Loss type */
private[ml] def getOldLossType: OldLoss
}
private[ml] object GBTClassifierParams {
// The losses below should be lowercase.
/** Accessor for supported loss settings: logistic */
final val supportedLossTypes: Array[String] =
Array("logistic").map(_.toLowerCase(Locale.ROOT))
}
private[ml] trait GBTClassifierParams extends GBTParams with HasVarianceImpurity {
/**
* Loss function which GBT tries to minimize. (case-insensitive)
* Supported: "logistic"
* (default = logistic)
* @group param
*/
val lossType: Param[String] = new Param[String](this, "lossType", "Loss function which GBT" +
" tries to minimize (case-insensitive). Supported options:" +
s" ${GBTClassifierParams.supportedLossTypes.mkString(", ")}",
(value: String) =>
GBTClassifierParams.supportedLossTypes.contains(value.toLowerCase(Locale.ROOT)))
setDefault(lossType -> "logistic")
/** @group getParam */
def getLossType: String = $(lossType).toLowerCase(Locale.ROOT)
/** (private[ml]) Convert new loss to old loss. */
override private[ml] def getOldLossType: OldClassificationLoss = {
getLossType match {
case "logistic" => OldLogLoss
case _ =>
// Should never happen because of check in setter method.
throw new RuntimeException(s"GBTClassifier was given bad loss type: $getLossType")
}
}
}
private[ml] object GBTRegressorParams {
// The losses below should be lowercase.
/** Accessor for supported loss settings: squared (L2), absolute (L1) */
final val supportedLossTypes: Array[String] =
Array("squared", "absolute").map(_.toLowerCase(Locale.ROOT))
}
private[ml] trait GBTRegressorParams extends GBTParams with TreeRegressorParams {
/**
* Loss function which GBT tries to minimize. (case-insensitive)
* Supported: "squared" (L2) and "absolute" (L1)
* (default = squared)
* @group param
*/
val lossType: Param[String] = new Param[String](this, "lossType", "Loss function which GBT" +
" tries to minimize (case-insensitive). Supported options:" +
s" ${GBTRegressorParams.supportedLossTypes.mkString(", ")}",
(value: String) =>
GBTRegressorParams.supportedLossTypes.contains(value.toLowerCase(Locale.ROOT)))
setDefault(lossType -> "squared")
/** @group getParam */
def getLossType: String = $(lossType).toLowerCase(Locale.ROOT)
/** (private[ml]) Convert new loss to old loss. */
override private[ml] def getOldLossType: OldLoss = {
convertToOldLossType(getLossType)
}
private[ml] def convertToOldLossType(loss: String): OldLoss = {
loss match {
case "squared" => OldSquaredError
case "absolute" => OldAbsoluteError
case _ =>
// Should never happen because of check in setter method.
throw new RuntimeException(s"GBTRegressorParams was given bad loss type: $getLossType")
}
}
}
| hhbyyh/spark | mllib/src/main/scala/org/apache/spark/ml/tree/treeParams.scala | Scala | apache-2.0 | 20,539 |
package com.eevolution.context.dictionary.infrastructure.repository
import com.eevolution.context.dictionary.domain.model.UserBusinessPartnerAccess
import com.eevolution.context.dictionary.infrastructure.db.DbContext._
/**
* Copyright (C) 2003-2017, e-Evolution Consultants S.A. , http://www.e-evolution.com
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
* Email: emeris.hernandez@e-evolution.com, http://www.e-evolution.com , http://github.com/EmerisScala
* Created by emeris.hernandez@e-evolution.com , www.e-evolution.com on 27/10/17.
*/
/**
* User Business Partner Access Mapping
*/
trait UserBusinessPartnerAccessMapping {
val queryUserBusinessPartnerAccess = quote {
querySchema[UserBusinessPartnerAccess]("AD_UserBPAccess",
_.userBusinessPartnerAccessId-> "AD_UserBPAccess_ID",
_.tenantId-> "AD_Client_ID",
_.organizationId-> "AD_Org_ID",
_.isActive-> "IsActive",
_.created-> "Created",
_.createdBy-> "CreatedBy",
_.updated-> "Updated",
_.updatedBy-> "UpdatedBy",
_.userId-> "AD_User_ID",
_.businessPartnerAccessType-> "BPAccessType",
_.requestType-> "R_RequestType_ID",
_.documentBaseType-> "DocBaseType",
_.uuid-> "UUID")
}
}
| adempiere/ADReactiveSystem | dictionary-impl/src/main/scala/com/eevolution/context/dictionary/infrastructure/repository/UserBusinessPartnerAccessMapping.scala | Scala | gpl-3.0 | 1,840 |
package scala.meta.internal.semanticdb
import scala.{meta => m}
trait TypeOps { self: DatabaseOps =>
private[this] lazy val ignoreName: Set[g.Name] = Set(
g.nme.CONSTRUCTOR,
g.nme.MIXIN_CONSTRUCTOR,
g.nme.asInstanceOf_,
g.nme.asInstanceOf_Ob,
g.nme.isInstanceOf_,
g.nme.isInstanceOf_Ob,
g.nme.hashCode_,
g.nme.HASHHASH,
g.nme.ne,
g.nme.eq,
g.nme.finalize_,
g.nme.wait_,
g.nme.EQ,
g.nme.NE,
g.nme.synchronized_,
g.nme.notify_,
g.nme.notifyAll_,
g.nme.clone_,
g.nme.equals_,
g.nme.toString_,
g.nme.getClass_
)
implicit class XtensionGTypeMSignatures(tpe: g.Type) {
def lookupMembers: List[m.Signature] = {
val buffer = List.newBuilder[m.Signature]
tpe.members.iterator.filterNot(s => ignoreName(s.name)).foreach { s =>
buffer += (
if (s.name.isTermName) m.Signature.Term(s.decodedName)
else m.Signature.Type(s.decodedName)
)
}
buffer.result()
}
}
}
| DavidDudson/scalameta | scalameta/semanticdb-scalac-core/src/main/scala/scala/meta/internal/semanticdb/TypeOps.scala | Scala | bsd-3-clause | 1,010 |
package openquant.yahoofinance.impl
import java.time.format.DateTimeFormatter
import java.time.{LocalDate, ZoneId, ZonedDateTime}
import com.github.tototoshi.csv._
import openquant.yahoofinance.Quote
import scala.io.Source
/**
* Parses historical data in CSV format from Yahoo Finance into [[Quote]]
*/
class QuoteParser {
private[this] val df = DateTimeFormatter.ofPattern("yyyy-MM-dd")
private[this] val zoneId = ZoneId.of("America/New_York")
def parse(content: String): Vector[Quote] = {
val csvReader = CSVReader.open(Source.fromString(content))
val quotes: Vector[Quote] = csvReader.toStream.drop(1).map { fields ⇒
parseCSVLine(fields.toVector)
}.toVector
quotes
}
private def parseCSVLine(field: Vector[String]): Quote = {
require(field.length >= 7)
Quote(
parseDate(field(0)),
BigDecimal(field(1)),
BigDecimal(field(4)),
BigDecimal(field(2)),
BigDecimal(field(3)),
BigDecimal(field(5)),
BigDecimal(field(6))
)
}
private def parseDate(date: String): ZonedDateTime = {
LocalDate.parse(date, df).atStartOfDay().atZone(zoneId)
}
}
object QuoteParser {
def apply() = new QuoteParser
}
| openquant/YahooFinanceScala | src/main/scala/openquant/yahoofinance/impl/QuoteParser.scala | Scala | mit | 1,199 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.log
import java.io._
import junit.framework.Assert._
import org.junit.Test
import org.scalatest.junit.JUnit3Suite
import kafka.server.{BrokerState, OffsetCheckpoint}
import kafka.common._
import kafka.utils._
class LogManagerTest extends JUnit3Suite {
val time: MockTime = new MockTime()
val maxRollInterval = 100
val maxLogAgeMs = 10*60*60*1000
val logConfig = LogConfig(segmentSize = 1024, maxIndexSize = 4096, retentionMs = maxLogAgeMs)
var logDir: File = null
var logManager: LogManager = null
val name = "kafka"
val veryLargeLogFlushInterval = 10000000L
val cleanerConfig = CleanerConfig(enableCleaner = false)
override def setUp() {
super.setUp()
logDir = TestUtils.tempDir()
logManager = new LogManager(logDirs = Array(logDir),
topicConfigs = Map(),
defaultConfig = logConfig,
cleanerConfig = cleanerConfig,
flushCheckMs = 1000L,
flushCheckpointMs = 100000L,
retentionCheckMs = 1000L,
scheduler = time.scheduler,
time = time,
brokerState = new BrokerState())
logManager.startup
logDir = logManager.logDirs(0)
}
override def tearDown() {
if(logManager != null)
logManager.shutdown()
Utils.rm(logDir)
logManager.logDirs.map(Utils.rm(_))
super.tearDown()
}
/**
* Test that getOrCreateLog on a non-existent log creates a new log and that we can append to the new log.
*/
@Test
def testCreateLog() {
val log = logManager.createLog(TopicAndPartition(name, 0), logConfig)
val logFile = new File(logDir, name + "-0")
assertTrue(logFile.exists)
log.append(TestUtils.singleMessageSet("test".getBytes()))
}
/**
* Test that get on a non-existent returns None and no log is created.
*/
@Test
def testGetNonExistentLog() {
val log = logManager.getLog(TopicAndPartition(name, 0))
assertEquals("No log should be found.", None, log)
val logFile = new File(logDir, name + "-0")
assertTrue(!logFile.exists)
}
/**
* Test time-based log cleanup. First append messages, then set the time into the future and run cleanup.
*/
@Test
def testCleanupExpiredSegments() {
val log = logManager.createLog(TopicAndPartition(name, 0), logConfig)
var offset = 0L
for(i <- 0 until 200) {
var set = TestUtils.singleMessageSet("test".getBytes())
val info = log.append(set)
offset = info.lastOffset
}
assertTrue("There should be more than one segment now.", log.numberOfSegments > 1)
log.logSegments.foreach(_.log.file.setLastModified(time.milliseconds))
time.sleep(maxLogAgeMs + 1)
assertEquals("Now there should only be only one segment in the index.", 1, log.numberOfSegments)
time.sleep(log.config.fileDeleteDelayMs + 1)
assertEquals("Files should have been deleted", log.numberOfSegments * 2, log.dir.list.length)
assertEquals("Should get empty fetch off new log.", 0, log.read(offset+1, 1024).sizeInBytes)
try {
log.read(0, 1024)
fail("Should get exception from fetching earlier.")
} catch {
case e: OffsetOutOfRangeException => "This is good."
}
// log should still be appendable
log.append(TestUtils.singleMessageSet("test".getBytes()))
}
/**
* Test size-based cleanup. Append messages, then run cleanup and check that segments are deleted.
*/
@Test
def testCleanupSegmentsToMaintainSize() {
val setSize = TestUtils.singleMessageSet("test".getBytes()).sizeInBytes
logManager.shutdown()
val config = logConfig.copy(segmentSize = 10 * (setSize - 1), retentionSize = 5L * 10L * setSize + 10L)
logManager = new LogManager(
logDirs = Array(logDir),
topicConfigs = Map(),
defaultConfig = config,
cleanerConfig = cleanerConfig,
flushCheckMs = 1000L,
flushCheckpointMs = 100000L,
retentionCheckMs = 1000L,
scheduler = time.scheduler,
brokerState = new BrokerState(),
time = time
)
logManager.startup
// create a log
val log = logManager.createLog(TopicAndPartition(name, 0), config)
var offset = 0L
// add a bunch of messages that should be larger than the retentionSize
val numMessages = 200
for(i <- 0 until numMessages) {
val set = TestUtils.singleMessageSet("test".getBytes())
val info = log.append(set)
offset = info.firstOffset
}
assertEquals("Check we have the expected number of segments.", numMessages * setSize / config.segmentSize, log.numberOfSegments)
// this cleanup shouldn't find any expired segments but should delete some to reduce size
time.sleep(logManager.InitialTaskDelayMs)
assertEquals("Now there should be exactly 6 segments", 6, log.numberOfSegments)
time.sleep(log.config.fileDeleteDelayMs + 1)
assertEquals("Files should have been deleted", log.numberOfSegments * 2, log.dir.list.length)
assertEquals("Should get empty fetch off new log.", 0, log.read(offset + 1, 1024).sizeInBytes)
try {
log.read(0, 1024)
fail("Should get exception from fetching earlier.")
} catch {
case e: OffsetOutOfRangeException => "This is good."
}
// log should still be appendable
log.append(TestUtils.singleMessageSet("test".getBytes()))
}
/**
* Test that flush is invoked by the background scheduler thread.
*/
@Test
def testTimeBasedFlush() {
logManager.shutdown()
val config = logConfig.copy(flushMs = 1000)
logManager = new LogManager(
logDirs = Array(logDir),
topicConfigs = Map(),
defaultConfig = config,
cleanerConfig = cleanerConfig,
flushCheckMs = 1000L,
flushCheckpointMs = 10000L,
retentionCheckMs = 1000L,
scheduler = time.scheduler,
brokerState = new BrokerState(),
time = time
)
logManager.startup
val log = logManager.createLog(TopicAndPartition(name, 0), config)
val lastFlush = log.lastFlushTime
for(i <- 0 until 200) {
var set = TestUtils.singleMessageSet("test".getBytes())
log.append(set)
}
time.sleep(logManager.InitialTaskDelayMs)
assertTrue("Time based flush should have been triggered triggered", lastFlush != log.lastFlushTime)
}
/**
* Test that new logs that are created are assigned to the least loaded log directory
*/
@Test
def testLeastLoadedAssignment() {
// create a log manager with multiple data directories
val dirs = Array(TestUtils.tempDir(),
TestUtils.tempDir(),
TestUtils.tempDir())
logManager.shutdown()
logManager = new LogManager(
logDirs = dirs,
topicConfigs = Map(),
defaultConfig = logConfig,
cleanerConfig = cleanerConfig,
flushCheckMs = 1000L,
flushCheckpointMs = 10000L,
retentionCheckMs = 1000L,
scheduler = time.scheduler,
brokerState = new BrokerState(),
time = time
)
// verify that logs are always assigned to the least loaded partition
for(partition <- 0 until 20) {
logManager.createLog(TopicAndPartition("test", partition), logConfig)
assertEquals("We should have created the right number of logs", partition + 1, logManager.allLogs.size)
val counts = logManager.allLogs.groupBy(_.dir.getParent).values.map(_.size)
assertTrue("Load should balance evenly", counts.max <= counts.min + 1)
}
}
/**
* Test that it is not possible to open two log managers using the same data directory
*/
@Test
def testTwoLogManagersUsingSameDirFails() {
try {
new LogManager(
logDirs = Array(logDir),
topicConfigs = Map(),
defaultConfig = logConfig,
cleanerConfig = cleanerConfig,
flushCheckMs = 1000L,
flushCheckpointMs = 10000L,
retentionCheckMs = 1000L,
scheduler = time.scheduler,
brokerState = new BrokerState(),
time = time
)
fail("Should not be able to create a second log manager instance with the same data directory")
} catch {
case e: KafkaException => // this is good
}
}
/**
* Test that recovery points are correctly written out to disk
*/
@Test
def testCheckpointRecoveryPoints() {
verifyCheckpointRecovery(Seq(TopicAndPartition("test-a", 1), TopicAndPartition("test-b", 1)), logManager)
}
/**
* Test that recovery points directory checking works with trailing slash
*/
@Test
def testRecoveryDirectoryMappingWithTrailingSlash() {
logManager.shutdown()
logDir = TestUtils.tempDir()
logManager = new LogManager(logDirs = Array(new File(logDir.getAbsolutePath + File.separator)),
topicConfigs = Map(),
defaultConfig = logConfig,
cleanerConfig = cleanerConfig,
flushCheckMs = 1000L,
flushCheckpointMs = 100000L,
retentionCheckMs = 1000L,
scheduler = time.scheduler,
time = time,
brokerState = new BrokerState())
logManager.startup
verifyCheckpointRecovery(Seq(TopicAndPartition("test-a", 1)), logManager)
}
/**
* Test that recovery points directory checking works with relative directory
*/
@Test
def testRecoveryDirectoryMappingWithRelativeDirectory() {
logManager.shutdown()
logDir = new File("data" + File.separator + logDir.getName)
logDir.mkdirs()
logDir.deleteOnExit()
logManager = new LogManager(logDirs = Array(logDir),
topicConfigs = Map(),
defaultConfig = logConfig,
cleanerConfig = cleanerConfig,
flushCheckMs = 1000L,
flushCheckpointMs = 100000L,
retentionCheckMs = 1000L,
scheduler = time.scheduler,
time = time,
brokerState = new BrokerState())
logManager.startup
verifyCheckpointRecovery(Seq(TopicAndPartition("test-a", 1)), logManager)
}
private def verifyCheckpointRecovery(topicAndPartitions: Seq[TopicAndPartition],
logManager: LogManager) {
val logs = topicAndPartitions.map(this.logManager.createLog(_, logConfig))
logs.foreach(log => {
for(i <- 0 until 50)
log.append(TestUtils.singleMessageSet("test".getBytes()))
log.flush()
})
logManager.checkpointRecoveryPointOffsets()
val checkpoints = new OffsetCheckpoint(new File(logDir, logManager.RecoveryPointCheckpointFile)).read()
topicAndPartitions.zip(logs).foreach {
case(tp, log) => {
assertEquals("Recovery point should equal checkpoint", checkpoints(tp), log.recoveryPoint)
}
}
}
}
| stealthly/kafka | core/src/test/scala/unit/kafka/log/LogManagerTest.scala | Scala | apache-2.0 | 11,538 |
/**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package rxscala.util
/**
* Provides [[Opening]]s, [[Closing]]s, and [[Timestamped]].
*/
object `package` {
/**
* Tagging interface for objects which can open buffers.
* @see [[Observable `Observable.buffer(Observable[Opening], Opening => Observable[Closing])`]]
*/
type Opening = rx.util.Opening
/**
* Creates an object which can open buffers.
* @see [[Observable `Observable.buffer(Observable[Opening], Opening => Observable[Closing])`]]
*/
def Opening() = rx.util.Openings.create()
/**
* Tagging interface for objects which can close buffers.
* @see [[Observable `Observable.buffer(Observable[Opening], Opening => Observable[Closing])`]]
*/
type Closing = rx.util.Closing
/**
* Creates an object which can close buffers.
* @see [[Observable `Observable.buffer(Observable[Opening], Opening => Observable[Closing])`]]
*/
def Closing() = rx.util.Closings.create()
// rx.util.Range not needed because there's a standard Scala Range
}
| kevinwright/RxScala | src/main/scala/rxscala/util/package.scala | Scala | apache-2.0 | 1,600 |
package controllers
import akka.util.ByteString
import play.twirl.api.Xml
import play.api.libs.json.{JsString, Json}
import play.api.data._
import play.api.data.Forms._
import javax.inject._
import scala.collection.{immutable => imm}
import play.api._
import play.api.i18n.{I18nSupport, Lang, MessagesApi}
import play.api.mvc._
import models._
import java.sql.Connection
import play.api.db.DBApi
import play.api.i18n.{I18nSupport, Messages => msg}
import play.api.http.Writeable
import scala.concurrent.ExecutionContext
@Singleton
class RssController @Inject()(
cc: ControllerComponents,
implicit val ec: ExecutionContext,
dbApi: DBApi,
val bloggerRepo: BloggerRepo,
settings: Settings
) extends AbstractController(cc) with I18nSupport with AuthenticatedSupport with TimeZoneSupport {
val db = dbApi.database("default")
val xmlWriteable = new Writeable[Xml](
xml => ByteString("""<?xml version="1.0" encoding="UTF-8"?>""" + xml.toString, ByteString.UTF_8),
Some("text/xml")
)
def atom(page: Int, pageSize: Int, orderBySpec: String, now: Long) = Action { implicit req =>
db.withConnection { implicit conn =>
val recs: PagedRecords[Article] = Article.list(page, pageSize, OrderBy(orderBySpec), now)
Ok(
views.xml.atom(recs, settings)
)(xmlWriteable)
}
}
}
| ruimo/blog | app/controllers/RssController.scala | Scala | apache-2.0 | 1,323 |
package is.hail.expr
import is.hail.HailSuite
import org.testng.annotations.Test
class ParserSuite extends HailSuite{
@Test def testOneOfLiteral(): Unit = {
val strings = Array("A", "B", "AB", "AA", "CAD", "EF")
val p = Parser.oneOfLiteral(strings)
strings.foreach(s => assert(p.parse(s) == s))
assert(p.parseOpt("hello^&").isEmpty)
assert(p.parseOpt("ABhello").isEmpty)
assert(Parser.rep(p).parse("ABCADEF") == List("AB", "CAD", "EF"))
}
}
| hail-is/hail | hail/src/test/scala/is/hail/expr/ParserSuite.scala | Scala | mit | 473 |
/*
* Copyright (C) 2014 Romain Reuillon
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.openmole.core
import scala.slick.lifted.TableQuery
package object replication {
lazy val replicas = TableQuery[Replicas]
}
| ISCPIF/PSEExperiments | openmole-src/openmole/core/org.openmole.core.replication/src/main/scala/org/openmole/core/replication/package.scala | Scala | agpl-3.0 | 841 |
package io.hydrosphere.mist
import java.nio.file.{Files, Path}
import io.hydrosphere.mist.master.JobResult
import io.hydrosphere.mist.master.models.FunctionConfig
import scalaj.http._
case class MistHttpInterface(
host: String,
port: Int,
timeout: Int = 120
) {
import io.hydrosphere.mist.master.interfaces.JsonCodecs._
import spray.json.{enrichString, _}
def runJob(routeId: String, params: (String, Any)*): JobResult =
callV2Api(routeId, params.toMap)
def uploadArtifact(name: String, file: Path): Unit = {
val bytes = Files.readAllBytes(file)
val req = Http(s"http://$host:$port/v2/api/artifacts")
.postMulti(MultiPart("file", name, "application/octet-stream", bytes))
val resp = req.asBytes
if (resp.code != 200)
throw new RuntimeException(s"File $file uploading failed. Code: ${resp.code}, body: ${resp.body}")
}
def status: String = {
val req = Http(s"http://$host:$port/v2/api/status")
new String(req.asBytes.body)
}
def createFunction(ep: FunctionConfig): FunctionConfig = {
val req = Http(s"http://$host:$port/v2/api/functions")
.postData(ep.toJson)
val resp = req.asString
if (resp.code == 200)
resp.body.parseJson.convertTo[FunctionConfig]
else
throw new RuntimeException(s"Function creation failed. Code: ${resp.code}, body: ${resp.body}")
}
def callV2Api(
functionId: String,
params: Map[String, Any]
): JobResult = {
val millis = timeout * 1000
val url = s"http://$host:$port/v2/api/functions/$functionId/jobs?force=true"
val req = Http(url)
.timeout(millis, millis)
.header("Content-Type", "application/json")
.postData(params.toJson)
val resp = req.asString
if (resp.code == 200)
resp.body.parseJson.convertTo[JobResult]
else
throw new RuntimeException(s"Job failed body ${resp.body}")
}
sealed trait ActionType
case object Execute extends ActionType
case object Serve extends ActionType
}
| Hydrospheredata/mist | mist-tests/scala/io.hydrosphere.mist/MistHttpInterface.scala | Scala | apache-2.0 | 1,994 |
/*
* Copyright (C) Lightbend Inc. <https://www.lightbend.com>
*/
package com.lightbend.lagom.internal.persistence.cassandra
import java.net.InetSocketAddress
import java.net.URI
import scala.collection.immutable
import scala.concurrent.ExecutionContext
import scala.concurrent.Future
import scala.concurrent.Promise
import scala.concurrent.duration._
import scala.util.control.NoStackTrace
import akka.actor.ActorSystem
import akka.persistence.cassandra.ConfigSessionProvider
import com.typesafe.config.Config
import play.api.Logger
/**
* Internal API
*/
private[lagom] final class ServiceLocatorSessionProvider(system: ActorSystem, config: Config)
extends ConfigSessionProvider(system, config) {
private val log = Logger(getClass)
override def lookupContactPoints(
clusterId: String
)(implicit ec: ExecutionContext): Future[immutable.Seq[InetSocketAddress]] = {
ServiceLocatorHolder(system).serviceLocatorEventually.flatMap { serviceLocatorAdapter =>
serviceLocatorAdapter.locateAll(clusterId).map {
case Nil => throw new NoContactPointsException(s"No contact points for [$clusterId]")
case uris =>
log.debug(s"Found Cassandra contact points: $uris")
// URIs must be all valid
uris.foreach { uri =>
require(uri.getHost != null, s"missing host in $uri for Cassandra contact points $clusterId")
require(uri.getPort != -1, s"missing port in $uri for Cassandra contact points $clusterId")
}
uris.map { uri =>
new InetSocketAddress(uri.getHost, uri.getPort)
}
}
}
}
}
private[lagom] final class NoContactPointsException(msg: String) extends RuntimeException(msg) with NoStackTrace
| lagom/lagom | persistence-cassandra/core/src/main/scala/com/lightbend/lagom/internal/persistence/cassandra/ServiceLocatorSessionProvider.scala | Scala | apache-2.0 | 1,740 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.